repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
mabirck/dqn-EWC
[ "ed97ad9b382e93e9c9fbfc8d7a2772cf8de992ff" ]
[ "utils.py" ]
[ "import os\nimport csv\nimport torch\nimport torch.optim as optim\nfrom torch.autograd import Variable as V\nimport torch.nn.functional as F\nfrom myDataLoader import getDataLoader\n\ndef saveLog(test_loss, test_acc, correct, dropout, args, epoch, test_task, continual=False):\n #print(test_task, continual)\n path = './log/'\n #path += \"_\".join([args.arc, str(args.epochs), args.filter_reg, str(args.phi), 'seed', str(args.seed), 'depth', str(args.depth), args.intra_extra])\n path+= dropout+'_MNIST_TASK_'+str(test_task)+'_'+str(args.seed)\n path = path+'.csv'\n if epoch == 0 and os.path.isfile(path) and not continual: os.remove(path)\n assert not(os.path.isfile(path) == True and epoch ==0 and not continual), \"That can't be right. This file should not be here!!!!\"\n fields = ['epoch', epoch, 'test_loss', test_loss, 'test_acc', test_acc, 'correct', correct]\n with open(path, 'a+') as f:\n writer = csv.writer(f)\n writer.writerow(fields)\n\ndef to_one_hot(y, n_dims=None):\n \"\"\" Take integer y (tensor or V) with n dims and convert it to 1-hot representation with n+1 dims. \"\"\"\n y_tensor = y.data if isinstance(y, V) else y\n y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1)\n n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1\n y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).scatter_(1, y_tensor, 1)\n y_one_hot = y_one_hot.view(*y.shape, -1)\n return V(y_one_hot) if isinstance(y, V) else y_one_hot\n\n\ndef test(model, epoch, test_loader, test_task, args, continuous):\n model.eval()\n test_loss = 0\n correct = 0\n for data, target in test_loader:\n if torch.cuda.is_available():\n data, target = data.cuda(), target.cuda()\n data, target = V(data, volatile=True), V(target).long()\n #print(target)\n output = model(data)\n\n test_loss += F.cross_entropy(output, target, size_average=False).data[0] # sum up batch loss\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n #print(correct)\n\n test_loss /= len(test_loader.dataset)\n test_acc = 100. * correct / len(test_loader.dataset)\n print('\\nTest set task {}: Average loss: {:.4f}, Accuracy: {}/{} ({:.5f}%)\\n'.format(\n test_task, test_loss, correct, len(test_loader.dataset),\n test_acc))\n if args.dropout == True:\n drop_way = \"Dropout\"\n elif args.ewc == True:\n drop_way = \"EWC\"\n else:\n drop_way = \"None\"\n saveLog(test_loss, test_acc, correct, drop_way, args, epoch, test_task, continuous)\n\n\ndef test_it(model, epoch, test_datasets, args, task):\n # EVALUATING MULTIPLE TASKS #\n # task != test_task is to evaluate if is a continual #\n # run or first one #\n [\n test(model, epoch, test_dataset, test_task, args, task != test_task)\n if test_task <= task else None\n for test_task, test_dataset in enumerate(test_datasets)\n ]\n\ndef get_datasets(permutations, args):\n train_datasets = [\n getDataLoader(train=True, permutation=p, args=args) for p in permutations\n ]\n\n test_datasets = [\n getDataLoader(train=False, permutation=p, args=args) for p in permutations\n ]\n\n return train_datasets, test_datasets\n" ]
[ [ "torch.nn.functional.cross_entropy", "torch.max", "torch.cuda.is_available", "torch.autograd.Variable" ] ]
eddyfortier/fortier_project
[ "586e35b5c417b4ae454ee4fe40755112b29b97a0" ]
[ "code/other_functions.py" ]
[ "import numpy as np\nimport plotly.graph_objects as go\n\n\nif __name__ == \"__main__\":\n print(\"This script is not designed to be used by itself.\",\n \"Please use the graph_generator.py script instead\")\n\n\nelse:\n def retrieve_spreadsheet():\n \"\"\"\n INPUTS\n -none\n OUTPUTS\n -returns the corrected URL to retrieve the spreadsheet in a .csv format\n \"\"\"\n\n # url_share = input(\"Please input the URL of the Google \"\\\n # \"Spreadsheet data file: \")\n url_share = \"https://docs.google.com/spreadsheets/d/1UQsU6FNr7ovVjL\"\\\n \"RIMItgtYWr1zN7UHpMjfHtdGa1myc/edit#gid=0\"\n url_csv = url_share.replace(\"/edit#gid=\", \"/export?format=csv&gid=\")\n\n return url_csv\n\n def eliminate_row(df, column_to_search, value_to_search):\n \"\"\"\n INPUTS\n -df: pandas dataframe to modify\n -column_to_search: takes a column name where to search for a specific\n value\n -value_to_search: takes a value to search in the specified column\n OUPUT\n -returns a dataframe where the unnecessary rows have been eliminated\n \"\"\"\n\n to_drop = []\n\n for i in range(0, len(df)):\n if df[column_to_search][i] == value_to_search:\n to_drop.append(i)\n else:\n continue\n\n df = df.drop(to_drop, axis=0)\n df = df.reset_index(drop=True)\n\n return df\n\n def extract_subject(df, subject):\n \"\"\"\n INPUTS\n -df: dataframe containing all the tests\n -subject: ID of the participant to isolate\n OUTPUTS\n -returns a dataframe containing only the sessions linked to the ID\n provided\n \"\"\"\n\n mask = df[\"Participant_ID\"] == subject\n df_sub = df[mask].reset_index(drop=True)\n\n return df_sub\n\n def extract_language(df, run_ID):\n \"\"\"\n INPUTS\n -df: one-line dataframe containing the tests\n -run_ID: specifies if it is the first or second language\n OUTPUTS\n -returns the name of the language linked to the run_ID\n \"\"\"\n\n row = df.index[0]\n\n if run_ID == \"L1\":\n language = df[\"MTX1_LANG\"][row]\n elif run_ID == \"L2\":\n language = df[\"MTX2_LANG\"][row]\n\n return language\n\n def save_graph_PTA(graph, df, ear):\n \"\"\"\n INPUTS\n -graph: interactive plotly.graph_objects figure\n -df: dataframe with the informations that were used\n to generate the graph\n -ear: ear side linked with the graph\n OUTPUTS\n -saves the graph in a .html file to a subject's specific\n location in the repository\n \"\"\"\n\n test = \"PTA\"\n\n row = df.index[0]\n\n sub_long = df[\"Participant_ID\"][row]\n sub_short = sub_long.lstrip(\"Sub\")\n\n folder = \"../results/\" + sub_long + \"/\"\n path_header = folder + \"Sub-\" + sub_short + \"_\" + test + \"_\"\n\n if ear == \"All_runs\":\n path = path_header + ear + \".html\"\n\n else:\n session = df[\"DATE\"][row]\n name = df[\"Protocol name\"][row]\n condition = df[\"Protocol condition\"][row]\n\n path = (path_header + session + \"_\" + name + \": \" + condition\n + \" (\" + ear + \")\" + \".html\")\n\n graph.write_html(path)\n\n return True\n\n def save_graph_MTX(graph, df, language_ID):\n \"\"\"\n INPUTS\n -graph: interactive plotly.graph_objects figure\n -df: dataframe with the informations that were used\n to generate the graph\n -language_ID: specifies if it is the L1 or L2 and which\n language was used\n OUTPUTS\n -saves the graph in a .html file to a subject's specific location\n in the repository\n \"\"\"\n\n test = \"MTX\"\n\n row = df.index[0]\n\n sub_long = df[\"Participant_ID\"][row]\n sub_short = sub_long.lstrip(\"Sub\")\n\n folder = \"../results/\" + sub_long + \"/\"\n path_header = folder + \"Sub-\" + sub_short + \"_\" + test + \"_\"\n\n if language_ID.endswith(\"_All_runs\") is True:\n path = path_header + language_ID + \".html\"\n\n else:\n session = df[\"DATE\"][row]\n name = df[\"Protocol name\"][row]\n condition = df[\"Protocol condition\"][row]\n\n path = (path_header + session + \"_\" + name + \": \" + condition\n + \" (\" + language_ID + \")\" + \".html\")\n\n graph.write_html(path)\n\n return True\n\n def return_130(df, to_search):\n \"\"\"\n INPUTS\n -df: takes a one row dataframe to scan for the value 130 (PTA's\n \"no response\" marker)\n -to_search: list of column names in which the fct has to look in\n OUTPUTS\n -returns a list of the columns containing the value 130\n \"\"\"\n\n index_value = df.index[0]\n to_drop = []\n\n for i in to_search:\n if df[i][index_value] == 130:\n to_drop.append(to_search.index(i))\n else:\n continue\n\n return to_drop\n\n def generate_title_graph(df, test):\n \"\"\"\n INPUTS\n -df: dataframe with the informations to generate the title for\n a multiple runs graph\n OUTPUTS\n -returns a string to use as a title for the graph to generate\n \"\"\"\n\n row = df.index[0]\n ID = df[\"Participant_ID\"][row]\n\n if test == \"PTA\":\n title = ID + \" - \" + \"Pure-Tone Audiometry\"\n elif test == \"MTX\":\n title = ID + \" - \" + \"Matrix Speech-in-Noise Perception Test\"\n else:\n print(\"ERROR: The test parameter passed to the\",\n \"generate_title_graph function is not valid.\")\n\n return title\n\n def generate_title_run_PTA(df, ear, index):\n \"\"\"\n INPUTS\n -df: dataframe with the informations to generate the title\n for a single run\n -ear: ear side linked with the title\n OUTPUTS\n -returns a string to use as a title for the graph to generate\n \"\"\"\n\n ID = df[\"Participant_ID\"][index]\n name = df[\"Protocol name\"][index]\n condition = df[\"Protocol condition\"][index]\n\n title = (ID + \" - \" + \"PTA, \" + name + \": \" + condition\n + \" (\" + ear + \")\")\n\n return title\n\n def generate_title_run_MTX(df, run_ID, index):\n \"\"\"\n INPUTS\n -df: dataframe with the informations to generate the title for\n a single run\n -run_ID: indicates if it is the first or second language\n OUTPUTS\n -returns a string to use as a title for the graph to generate\n \"\"\"\n\n ID = df[\"Participant_ID\"][index]\n name = df[\"Protocol name\"][index]\n condition = df[\"Protocol condition\"][index]\n language = run_ID + \": \" + extract_language(df.loc[[index]], run_ID)\n\n title = (ID + \" - \" + \"Matrix Test, \" + name + \": \" + condition + \" (\"\n + language + \")\")\n\n return title\n\n def data_to_plot_PTA(df, prefix):\n \"\"\"\n INPUTS\n -df: one line dataframe from which this function extract the\n data to plot\n -prefix: str marker pointing to the columns containing the\n relevant data\n OUTPUTS\n -returns two lists of data containing the x and y values to plot\n \"\"\"\n\n column_names = df.columns\n row = df.index[0]\n to_search = []\n x = []\n y = []\n\n for i in column_names:\n if i.startswith(prefix):\n to_search.append(i)\n else:\n continue\n\n list_130 = return_130(df, to_search)\n\n for j in list_130:\n to_remove = to_search[j]\n df = df.drop(to_remove, axis=1)\n\n column_names = df.columns\n\n for k in column_names:\n if k.startswith(prefix):\n x.append(int(k.lstrip(prefix)))\n y.append(df[k][row])\n else:\n continue\n\n return x, y\n\n def data_to_plot_MTX(df, prefix):\n \"\"\"\n INPUTS\n -df: one line dataframe from which this function extracts the\n data to plot\n -prefix: str marker pointing to the columns containing the\n relevant data\n OUTPUTS\n -returns two lists of data containing the x and y values to plot\n \"\"\"\n\n column_names = df.columns\n row = df.index[0]\n x = [\"Noise: Left<br>Speech: Left\",\n \"Noise: Binaural<br>Speech: Left\",\n \"Noise: Binaural<br>Speech: Binaural\",\n \"Noise: Binaural<br>Speech: Right\",\n \"Noise: Right<br>Speech: Right\"]\n y = []\n\n for i in column_names:\n if i.startswith(prefix):\n value = df[i][row].replace(\",\", \".\")\n y.append(float(value))\n else:\n continue\n\n return x, y\n\n def plot_pta_L(df):\n \"\"\"\n INPUTS\n -df: pandas dataframe containing the data to plot\n OUTPUTS\n -saves pta graphs in .html\n \"\"\"\n\n title = generate_title_run_PTA(df, \"Left Ear\", df.index[0])\n labels = {\"title\": title,\n \"x\": \"Frequency (Hz)\",\n \"y\": \"Hearing Threshold (dB HL)\"}\n\n fig = go.Figure()\n\n fig.update_layout(title=labels[\"title\"],\n xaxis_title=labels[\"x\"],\n yaxis_title=labels[\"y\"],\n xaxis_type=\"log\",\n xaxis_range=[np.log10(100), np.log10(20000)],\n yaxis_range=[80, -20],\n yaxis_dtick=10,\n xaxis_showline=True,\n xaxis_linecolor=\"black\",\n yaxis_showline=True,\n yaxis_linecolor=\"black\",\n yaxis_zeroline=True,\n yaxis_zerolinewidth=1,\n yaxis_zerolinecolor=\"black\")\n\n x, y = data_to_plot_PTA(df, \"LE_\")\n\n fig.add_trace(go.Scatter(x=x,\n y=y,\n line_color=\"blue\",\n mode='lines+markers',\n name=labels[\"title\"],\n hovertemplate=\"%{x:1.0f} Hz<br>\" +\n \"%{y:1.0f} dB HL\"))\n\n completed = save_graph_PTA(fig, df, \"Left Ear\")\n\n if completed is True:\n return True\n else:\n return False\n\n def plot_pta_R(df):\n \"\"\"\n INPUTS\n -df: pandas dataframe containing the data to plot\n OUTPUTS\n -saves pta graphs in .html\n \"\"\"\n\n title = generate_title_run_PTA(df, \"Right Ear\", df.index[0])\n labels = {\"title\": title,\n \"x\": \"Frequency (Hz)\",\n \"y\": \"Hearing Threshold (dB HL)\"}\n\n fig = go.Figure()\n\n fig.update_layout(title=labels[\"title\"],\n xaxis_title=labels[\"x\"],\n yaxis_title=labels[\"y\"],\n xaxis_type=\"log\",\n xaxis_range=[np.log10(100), np.log10(20000)],\n yaxis_range=[80, -20],\n yaxis_dtick=10,\n xaxis_showline=True,\n xaxis_linecolor=\"black\",\n yaxis_showline=True,\n yaxis_linecolor=\"black\",\n yaxis_zeroline=True,\n yaxis_zerolinewidth=1,\n yaxis_zerolinecolor=\"black\")\n\n x, y = data_to_plot_PTA(df, \"RE_\")\n\n fig.add_trace(go.Scatter(x=x,\n y=y,\n line_color=\"red\",\n mode='lines+markers',\n name=labels[\"title\"],\n hovertemplate=\"%{x:1.0f} Hz<br>\" +\n \"%{y:1.0f} dB HL\"))\n\n completed = save_graph_PTA(fig, df, \"Right Ear\")\n\n if completed is True:\n return True\n else:\n return False\n\n def plot_pta_subject(df, display=False):\n \"\"\"\n INPUTS\n -df: pandas dataframe containing the data to plot\n OUTPUTS\n -saves pta graph in .html\n \"\"\"\n\n title_graph = generate_title_graph(df, \"PTA\")\n labels = {\"title\": title_graph,\n \"x\": \"Frequency (Hz)\",\n \"y\": \"Hearing Threshold (dB HL)\"}\n\n fig = go.Figure()\n\n fig.update_layout(title=labels[\"title\"],\n xaxis_title=labels[\"x\"],\n yaxis_title=labels[\"y\"],\n xaxis_type=\"log\",\n xaxis_range=[np.log10(100), np.log10(20000)],\n yaxis_range=[80, -20],\n yaxis_dtick=10,\n xaxis_showline=True,\n xaxis_linecolor=\"black\",\n yaxis_showline=True,\n yaxis_linecolor=\"black\",\n yaxis_zeroline=True,\n yaxis_zerolinewidth=1,\n yaxis_zerolinecolor=\"black\")\n\n for i in range(0, len(df)):\n run_R_x, run_R_y = data_to_plot_PTA(df.loc[[i]], \"RE_\")\n run_L_x, run_L_y = data_to_plot_PTA(df.loc[[i]], \"LE_\")\n\n title_run_R = generate_title_run_PTA(df, \"Right Ear\", i)\n title_run_L = generate_title_run_PTA(df, \"Left Ear\", i)\n\n fig.add_trace(go.Scatter(x=run_R_x,\n y=run_R_y,\n line_color=\"red\",\n mode='lines+markers',\n name=title_run_R,\n hovertemplate=\"%{x:1.0f} Hz<br>\" +\n \"%{y:1.0f} dB HL\"))\n\n fig.add_trace(go.Scatter(x=run_L_x,\n y=run_L_y,\n line_color=\"blue\",\n mode='lines+markers',\n name=title_run_L,\n hovertemplate=\"%{x:1.0f} Hz<br>\" +\n \"%{y:1.0f} dB HL\"))\n\n if display is True:\n fig.show()\n else:\n completed = save_graph_PTA(fig, df, \"All_runs\")\n\n if completed is True:\n return True\n else:\n return False\n\n def plot_mtx(df, run_ID):\n \"\"\"\n INPUTS\n -df: pandas dataframe containing the data to plot\n -run_ID: specifies if it is the first or second language\n OUTPUTS\n -saves mtx graphs in .html\n \"\"\"\n if run_ID == \"L1\":\n prefix = \"MTX1_\"\n elif run_ID == \"L2\":\n prefix = \"MTX2_\"\n\n language = extract_language(df, run_ID)\n language_ID = run_ID + \": \" + language\n no_lang_df = df.drop(labels=f\"{prefix}LANG\", axis=1)\n title = generate_title_run_MTX(df, run_ID, df.index[0])\n labels = {\"title\": title,\n \"x\": \"Test Condition\",\n \"y\": \"50% Comprehension Threshold (dB)\"}\n\n fig = go.Figure()\n\n fig.update_layout(title=labels[\"title\"],\n xaxis_title=labels[\"x\"],\n yaxis_title=labels[\"y\"],\n yaxis_range=[-15, 5],\n yaxis_dtick=5,\n xaxis_showline=True,\n xaxis_linecolor=\"black\",\n yaxis_showline=True,\n yaxis_linecolor=\"black\",\n yaxis_zeroline=True,\n yaxis_zerolinewidth=1,\n yaxis_zerolinecolor=\"black\")\n\n x, y = data_to_plot_MTX(no_lang_df, prefix)\n\n fig.add_trace(go.Scatter(x=x,\n y=y,\n mode='lines+markers',\n name=labels[\"title\"],\n hovertemplate=\"%{x}<br>\" +\n \"%{y:0.1f} dB\"))\n\n completed = save_graph_MTX(fig, df, language_ID)\n\n if completed is True:\n return True\n else:\n return False\n\n def plot_mtx_subject(df, run_ID, display=False):\n \"\"\"\n INPUTS\n -df: pandas dataframe containing the data to plot\n -run_ID: specifies if it is the first or second language\n OUTPUTS\n -saves mtx graphs in .html\n \"\"\"\n\n if run_ID == \"L1\":\n prefix = \"MTX1_\"\n elif run_ID == \"L2\":\n prefix = \"MTX2_\"\n\n language_title = extract_language(df, run_ID)\n language_ID_title = run_ID + \": \" + language_title\n language_ID_save = run_ID + \"_\" + language_title + \"_All_runs\"\n no_lang_df = df.drop(labels=f\"{prefix}LANG\", axis=1)\n title_graph = (generate_title_graph(df, \"MTX\") + \" (\"\n + language_ID_title + \")\")\n labels = {\"title\": title_graph,\n \"x\": \"Test Condition\",\n \"y\": \"50% Comprehension Threshold (dB)\"}\n\n fig = go.Figure()\n\n fig.update_layout(title=labels[\"title\"],\n xaxis_title=labels[\"x\"],\n yaxis_title=labels[\"y\"],\n yaxis_range=[-15, 5],\n yaxis_dtick=5,\n xaxis_showline=True,\n xaxis_linecolor=\"black\",\n yaxis_showline=True,\n yaxis_linecolor=\"black\",\n yaxis_zeroline=True,\n yaxis_zerolinewidth=1,\n yaxis_zerolinecolor=\"black\")\n\n for i in range(0, len(df)):\n x, y = data_to_plot_MTX(no_lang_df.loc[[i]], prefix)\n\n title_run = generate_title_run_MTX(df, run_ID, i)\n\n fig.add_trace(go.Scatter(x=x,\n y=y,\n mode='lines+markers',\n name=title_run,\n hovertemplate=\"%{x}<br>\" +\n \"%{y:0.1f} dB\"))\n\n if display is True:\n fig.show()\n else:\n completed = save_graph_MTX(fig, df, language_ID_save)\n\n if completed is True:\n return True\n else:\n return False\n" ]
[ [ "numpy.log10" ] ]
jhlee9010/espnet
[ "a3c7c50bccf84c1f40bf79d40bdd735f1b02d79f" ]
[ "espnet2/asr/espnet_model.py" ]
[ "from contextlib import contextmanager\nfrom distutils.version import LooseVersion\nimport logging\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nimport torch\nfrom typeguard import check_argument_types\n\nfrom espnet.nets.e2e_asr_common import ErrorCalculator\nfrom espnet.nets.pytorch_backend.nets_utils import th_accuracy\nfrom espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos\nfrom espnet.nets.pytorch_backend.transformer.label_smoothing_loss import (\n LabelSmoothingLoss, # noqa: H301\n)\nfrom espnet2.asr.ctc import CTC\nfrom espnet2.asr.decoder.abs_decoder import AbsDecoder\nfrom espnet2.asr.encoder.abs_encoder import AbsEncoder\nfrom espnet2.asr.frontend.abs_frontend import AbsFrontend\nfrom espnet2.asr.postencoder.abs_postencoder import AbsPostEncoder\nfrom espnet2.asr.preencoder.abs_preencoder import AbsPreEncoder\nfrom espnet2.asr.specaug.abs_specaug import AbsSpecAug\nfrom espnet2.asr.transducer.error_calculator import ErrorCalculatorTransducer\nfrom espnet2.asr.transducer.utils import get_transducer_task_io\nfrom espnet2.layers.abs_normalize import AbsNormalize\nfrom espnet2.torch_utils.device_funcs import force_gatherable\nfrom espnet2.train.abs_espnet_model import AbsESPnetModel\n\nif LooseVersion(torch.__version__) >= LooseVersion(\"1.6.0\"):\n from torch.cuda.amp import autocast\nelse:\n # Nothing to do if torch<1.6.0\n @contextmanager\n def autocast(enabled=True):\n yield\n\n\nclass ESPnetASRModel(AbsESPnetModel):\n \"\"\"CTC-attention hybrid Encoder-Decoder model\"\"\"\n\n def __init__(\n self,\n vocab_size: int,\n token_list: Union[Tuple[str, ...], List[str]],\n frontend: Optional[AbsFrontend],\n specaug: Optional[AbsSpecAug],\n normalize: Optional[AbsNormalize],\n preencoder: Optional[AbsPreEncoder],\n encoder: AbsEncoder,\n postencoder: Optional[AbsPostEncoder],\n decoder: AbsDecoder,\n ctc: CTC,\n joint_network: Optional[torch.nn.Module],\n ctc_weight: float = 0.5,\n interctc_weight: float = 0.0,\n ignore_id: int = -1,\n lsm_weight: float = 0.0,\n length_normalized_loss: bool = False,\n report_cer: bool = True,\n report_wer: bool = True,\n sym_space: str = \"<space>\",\n sym_blank: str = \"<blank>\",\n extract_feats_in_collect_stats: bool = True,\n ):\n assert check_argument_types()\n assert 0.0 <= ctc_weight <= 1.0, ctc_weight\n assert 0.0 <= interctc_weight < 1.0, interctc_weight\n\n super().__init__()\n # note that eos is the same as sos (equivalent ID)\n self.blank_id = 0\n self.sos = vocab_size - 1\n self.eos = vocab_size - 1\n self.vocab_size = vocab_size\n self.ignore_id = ignore_id\n self.ctc_weight = ctc_weight\n self.interctc_weight = interctc_weight\n self.token_list = token_list.copy()\n\n self.frontend = frontend\n self.specaug = specaug\n self.normalize = normalize\n self.preencoder = preencoder\n self.postencoder = postencoder\n self.encoder = encoder\n\n if not hasattr(self.encoder, \"interctc_use_conditioning\"):\n self.encoder.interctc_use_conditioning = False\n if self.encoder.interctc_use_conditioning:\n self.encoder.conditioning_layer = torch.nn.Linear(\n vocab_size, self.encoder.output_size()\n )\n\n self.use_transducer_decoder = joint_network is not None\n\n self.error_calculator = None\n\n if self.use_transducer_decoder:\n from warprnnt_pytorch import RNNTLoss\n\n self.decoder = decoder\n self.joint_network = joint_network\n\n self.criterion_transducer = RNNTLoss(\n blank=self.blank_id,\n fastemit_lambda=0.0,\n )\n\n if report_cer or report_wer:\n self.error_calculator_trans = ErrorCalculatorTransducer(\n decoder,\n joint_network,\n token_list,\n sym_space,\n sym_blank,\n report_cer=report_cer,\n report_wer=report_wer,\n )\n else:\n self.error_calculator_trans = None\n\n if self.ctc_weight != 0:\n self.error_calculator = ErrorCalculator(\n token_list, sym_space, sym_blank, report_cer, report_wer\n )\n else:\n # we set self.decoder = None in the CTC mode since\n # self.decoder parameters were never used and PyTorch complained\n # and threw an Exception in the multi-GPU experiment.\n # thanks Jeff Farris for pointing out the issue.\n if ctc_weight == 1.0:\n self.decoder = None\n else:\n self.decoder = decoder\n\n self.criterion_att = LabelSmoothingLoss(\n size=vocab_size,\n padding_idx=ignore_id,\n smoothing=lsm_weight,\n normalize_length=length_normalized_loss,\n )\n\n if report_cer or report_wer:\n self.error_calculator = ErrorCalculator(\n token_list, sym_space, sym_blank, report_cer, report_wer\n )\n\n if ctc_weight == 0.0:\n self.ctc = None\n else:\n self.ctc = ctc\n\n self.extract_feats_in_collect_stats = extract_feats_in_collect_stats\n\n def forward(\n self,\n speech: torch.Tensor,\n speech_lengths: torch.Tensor,\n text: torch.Tensor,\n text_lengths: torch.Tensor,\n **kwargs,\n ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:\n \"\"\"Frontend + Encoder + Decoder + Calc loss\n\n Args:\n speech: (Batch, Length, ...)\n speech_lengths: (Batch, )\n text: (Batch, Length)\n text_lengths: (Batch,)\n kwargs: \"utt_id\" is among the input.\n \"\"\"\n assert text_lengths.dim() == 1, text_lengths.shape\n # Check that batch_size is unified\n assert (\n speech.shape[0]\n == speech_lengths.shape[0]\n == text.shape[0]\n == text_lengths.shape[0]\n ), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape)\n batch_size = speech.shape[0]\n\n # for data-parallel\n text = text[:, : text_lengths.max()]\n\n # 1. Encoder\n encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)\n intermediate_outs = None\n if isinstance(encoder_out, tuple):\n intermediate_outs = encoder_out[1]\n encoder_out = encoder_out[0]\n\n loss_att, acc_att, cer_att, wer_att = None, None, None, None\n loss_ctc, cer_ctc = None, None\n loss_transducer, cer_transducer, wer_transducer = None, None, None\n stats = dict()\n\n # 1. CTC branch\n if self.ctc_weight != 0.0:\n loss_ctc, cer_ctc = self._calc_ctc_loss(\n encoder_out, encoder_out_lens, text, text_lengths\n )\n\n # Collect CTC branch stats\n stats[\"loss_ctc\"] = loss_ctc.detach() if loss_ctc is not None else None\n stats[\"cer_ctc\"] = cer_ctc\n\n # Intermediate CTC (optional)\n loss_interctc = 0.0\n if self.interctc_weight != 0.0 and intermediate_outs is not None:\n for layer_idx, intermediate_out in intermediate_outs:\n # we assume intermediate_out has the same length & padding\n # as those of encoder_out\n loss_ic, cer_ic = self._calc_ctc_loss(\n intermediate_out, encoder_out_lens, text, text_lengths\n )\n loss_interctc = loss_interctc + loss_ic\n\n # Collect Intermedaite CTC stats\n stats[\"loss_interctc_layer{}\".format(layer_idx)] = (\n loss_ic.detach() if loss_ic is not None else None\n )\n stats[\"cer_interctc_layer{}\".format(layer_idx)] = cer_ic\n\n loss_interctc = loss_interctc / len(intermediate_outs)\n\n # calculate whole encoder loss\n loss_ctc = (\n 1 - self.interctc_weight\n ) * loss_ctc + self.interctc_weight * loss_interctc\n\n if self.use_transducer_decoder:\n # 2a. Transducer decoder branch\n (\n loss_transducer,\n cer_transducer,\n wer_transducer,\n ) = self._calc_transducer_loss(\n encoder_out,\n encoder_out_lens,\n text,\n )\n\n if loss_ctc is not None:\n loss = loss_transducer + (self.ctc_weight * loss_ctc)\n else:\n loss = loss_transducer\n\n # Collect Transducer branch stats\n stats[\"loss_transducer\"] = (\n loss_transducer.detach() if loss_transducer is not None else None\n )\n stats[\"cer_transducer\"] = cer_transducer\n stats[\"wer_transducer\"] = wer_transducer\n\n else:\n # 2b. Attention decoder branch\n if self.ctc_weight != 1.0:\n loss_att, acc_att, cer_att, wer_att = self._calc_att_loss(\n encoder_out, encoder_out_lens, text, text_lengths\n )\n\n # 3. CTC-Att loss definition\n if self.ctc_weight == 0.0:\n loss = loss_att\n elif self.ctc_weight == 1.0:\n loss = loss_ctc\n else:\n loss = self.ctc_weight * loss_ctc + (1 - self.ctc_weight) * loss_att\n\n # Collect Attn branch stats\n stats[\"loss_att\"] = loss_att.detach() if loss_att is not None else None\n stats[\"acc\"] = acc_att\n stats[\"cer\"] = cer_att\n stats[\"wer\"] = wer_att\n\n # Collect total loss stats\n stats[\"loss\"] = loss.detach()\n\n # force_gatherable: to-device and to-tensor if scalar for DataParallel\n loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)\n return loss, stats, weight\n\n def collect_feats(\n self,\n speech: torch.Tensor,\n speech_lengths: torch.Tensor,\n text: torch.Tensor,\n text_lengths: torch.Tensor,\n **kwargs,\n ) -> Dict[str, torch.Tensor]:\n if self.extract_feats_in_collect_stats:\n feats, feats_lengths = self._extract_feats(speech, speech_lengths)\n else:\n # Generate dummy stats if extract_feats_in_collect_stats is False\n logging.warning(\n \"Generating dummy stats for feats and feats_lengths, \"\n \"because encoder_conf.extract_feats_in_collect_stats is \"\n f\"{self.extract_feats_in_collect_stats}\"\n )\n feats, feats_lengths = speech, speech_lengths\n return {\"feats\": feats, \"feats_lengths\": feats_lengths}\n\n def encode(\n self, speech: torch.Tensor, speech_lengths: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Frontend + Encoder. Note that this method is used by asr_inference.py\n\n Args:\n speech: (Batch, Length, ...)\n speech_lengths: (Batch, )\n \"\"\"\n with autocast(False):\n # 1. Extract feats\n feats, feats_lengths = self._extract_feats(speech, speech_lengths)\n\n # 2. Data augmentation\n if self.specaug is not None and self.training:\n feats, feats_lengths = self.specaug(feats, feats_lengths)\n\n # 3. Normalization for feature: e.g. Global-CMVN, Utterance-CMVN\n if self.normalize is not None:\n feats, feats_lengths = self.normalize(feats, feats_lengths)\n\n # Pre-encoder, e.g. used for raw input data\n if self.preencoder is not None:\n feats, feats_lengths = self.preencoder(feats, feats_lengths)\n\n # 4. Forward encoder\n # feats: (Batch, Length, Dim)\n # -> encoder_out: (Batch, Length2, Dim2)\n if self.encoder.interctc_use_conditioning:\n encoder_out, encoder_out_lens, _ = self.encoder(\n feats, feats_lengths, ctc=self.ctc\n )\n else:\n encoder_out, encoder_out_lens, _ = self.encoder(feats, feats_lengths)\n intermediate_outs = None\n if isinstance(encoder_out, tuple):\n intermediate_outs = encoder_out[1]\n encoder_out = encoder_out[0]\n\n # Post-encoder, e.g. NLU\n if self.postencoder is not None:\n encoder_out, encoder_out_lens = self.postencoder(\n encoder_out, encoder_out_lens\n )\n\n assert encoder_out.size(0) == speech.size(0), (\n encoder_out.size(),\n speech.size(0),\n )\n assert encoder_out.size(1) <= encoder_out_lens.max(), (\n encoder_out.size(),\n encoder_out_lens.max(),\n )\n\n if intermediate_outs is not None:\n return (encoder_out, intermediate_outs), encoder_out_lens\n\n return encoder_out, encoder_out_lens\n\n def _extract_feats(\n self, speech: torch.Tensor, speech_lengths: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n assert speech_lengths.dim() == 1, speech_lengths.shape\n\n # for data-parallel\n speech = speech[:, : speech_lengths.max()]\n\n if self.frontend is not None:\n # Frontend\n # e.g. STFT and Feature extract\n # data_loader may send time-domain signal in this case\n # speech (Batch, NSamples) -> feats: (Batch, NFrames, Dim)\n feats, feats_lengths = self.frontend(speech, speech_lengths)\n else:\n # No frontend and no feature extract\n feats, feats_lengths = speech, speech_lengths\n return feats, feats_lengths\n\n def nll(\n self,\n encoder_out: torch.Tensor,\n encoder_out_lens: torch.Tensor,\n ys_pad: torch.Tensor,\n ys_pad_lens: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"Compute negative log likelihood(nll) from transformer-decoder\n\n Normally, this function is called in batchify_nll.\n\n Args:\n encoder_out: (Batch, Length, Dim)\n encoder_out_lens: (Batch,)\n ys_pad: (Batch, Length)\n ys_pad_lens: (Batch,)\n \"\"\"\n ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id)\n ys_in_lens = ys_pad_lens + 1\n\n # 1. Forward decoder\n decoder_out, _ = self.decoder(\n encoder_out, encoder_out_lens, ys_in_pad, ys_in_lens\n ) # [batch, seqlen, dim]\n batch_size = decoder_out.size(0)\n decoder_num_class = decoder_out.size(2)\n # nll: negative log-likelihood\n nll = torch.nn.functional.cross_entropy(\n decoder_out.view(-1, decoder_num_class),\n ys_out_pad.view(-1),\n ignore_index=self.ignore_id,\n reduction=\"none\",\n )\n nll = nll.view(batch_size, -1)\n nll = nll.sum(dim=1)\n assert nll.size(0) == batch_size\n return nll\n\n def batchify_nll(\n self,\n encoder_out: torch.Tensor,\n encoder_out_lens: torch.Tensor,\n ys_pad: torch.Tensor,\n ys_pad_lens: torch.Tensor,\n batch_size: int = 100,\n ):\n \"\"\"Compute negative log likelihood(nll) from transformer-decoder\n\n To avoid OOM, this fuction seperate the input into batches.\n Then call nll for each batch and combine and return results.\n Args:\n encoder_out: (Batch, Length, Dim)\n encoder_out_lens: (Batch,)\n ys_pad: (Batch, Length)\n ys_pad_lens: (Batch,)\n batch_size: int, samples each batch contain when computing nll,\n you may change this to avoid OOM or increase\n GPU memory usage\n \"\"\"\n total_num = encoder_out.size(0)\n if total_num <= batch_size:\n nll = self.nll(encoder_out, encoder_out_lens, ys_pad, ys_pad_lens)\n else:\n nll = []\n start_idx = 0\n while True:\n end_idx = min(start_idx + batch_size, total_num)\n batch_encoder_out = encoder_out[start_idx:end_idx, :, :]\n batch_encoder_out_lens = encoder_out_lens[start_idx:end_idx]\n batch_ys_pad = ys_pad[start_idx:end_idx, :]\n batch_ys_pad_lens = ys_pad_lens[start_idx:end_idx]\n batch_nll = self.nll(\n batch_encoder_out,\n batch_encoder_out_lens,\n batch_ys_pad,\n batch_ys_pad_lens,\n )\n nll.append(batch_nll)\n start_idx = end_idx\n if start_idx == total_num:\n break\n nll = torch.cat(nll)\n assert nll.size(0) == total_num\n return nll\n\n def _calc_att_loss(\n self,\n encoder_out: torch.Tensor,\n encoder_out_lens: torch.Tensor,\n ys_pad: torch.Tensor,\n ys_pad_lens: torch.Tensor,\n ):\n ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id)\n ys_in_lens = ys_pad_lens + 1\n\n # 1. Forward decoder\n decoder_out, _ = self.decoder(\n encoder_out, encoder_out_lens, ys_in_pad, ys_in_lens\n )\n\n # 2. Compute attention loss\n loss_att = self.criterion_att(decoder_out, ys_out_pad)\n acc_att = th_accuracy(\n decoder_out.view(-1, self.vocab_size),\n ys_out_pad,\n ignore_label=self.ignore_id,\n )\n\n # Compute cer/wer using attention-decoder\n if self.training or self.error_calculator is None:\n cer_att, wer_att = None, None\n else:\n ys_hat = decoder_out.argmax(dim=-1)\n cer_att, wer_att = self.error_calculator(ys_hat.cpu(), ys_pad.cpu())\n\n return loss_att, acc_att, cer_att, wer_att\n\n def _calc_ctc_loss(\n self,\n encoder_out: torch.Tensor,\n encoder_out_lens: torch.Tensor,\n ys_pad: torch.Tensor,\n ys_pad_lens: torch.Tensor,\n ):\n # Calc CTC loss\n loss_ctc = self.ctc(encoder_out, encoder_out_lens, ys_pad, ys_pad_lens)\n\n # Calc CER using CTC\n cer_ctc = None\n if not self.training and self.error_calculator is not None:\n ys_hat = self.ctc.argmax(encoder_out).data\n cer_ctc = self.error_calculator(ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)\n return loss_ctc, cer_ctc\n\n def _calc_transducer_loss(\n self,\n encoder_out: torch.Tensor,\n encoder_out_lens: torch.Tensor,\n labels: torch.Tensor,\n ):\n \"\"\"Compute Transducer loss.\n\n Args:\n encoder_out: Encoder output sequences. (B, T, D_enc)\n encoder_out_lens: Encoder output sequences lengths. (B,)\n labels: Label ID sequences. (B, L)\n\n Return:\n loss_transducer: Transducer loss value.\n cer_transducer: Character error rate for Transducer.\n wer_transducer: Word Error Rate for Transducer.\n\n \"\"\"\n decoder_in, target, t_len, u_len = get_transducer_task_io(\n labels,\n encoder_out_lens,\n ignore_id=self.ignore_id,\n blank_id=self.blank_id,\n )\n\n self.decoder.set_device(encoder_out.device)\n decoder_out = self.decoder(decoder_in)\n\n joint_out = self.joint_network(\n encoder_out.unsqueeze(2), decoder_out.unsqueeze(1)\n )\n\n loss_transducer = self.criterion_transducer(\n joint_out,\n target,\n t_len,\n u_len,\n )\n\n cer_transducer, wer_transducer = None, None\n if not self.training and self.error_calculator_trans is not None:\n cer_transducer, wer_transducer = self.error_calculator_trans(\n encoder_out, target\n )\n\n return loss_transducer, cer_transducer, wer_transducer\n" ]
[ [ "torch.cat", "torch.cuda.amp.autocast" ] ]
Mizeri/SolveSudoku
[ "e66be4a7cbebf835b672781bb6c9dd998d94c442" ]
[ "SolveSudoku.py" ]
[ "import numpy as np\n\nSudoku = np.array([0] * 9, [0] * 9, [0] * 9, [0] * 9, [0] * 9, [0] * 9,\n [0] * 9, [0] * 9, [0] * 9)\n\n\ndef findNextCell(sudoku):\n for i in range(0, 9):\n for j in range(0, 9):\n if sudoku[i][j] == 0:\n return i, j\n return -1, -1\n\n\ndef isValid(sudoku, x, y, e):\n row_isValid = all([e != sudoku[x][p] for p in range(0, 9)])\n col_isValid = all([e != sudoku[q][y] for q in range(0, 9)])\n sec_isValid = all([e != sudoku[q][p]\n for q in range(x // 3 * 3, x // 3 * 3 + 3)\n for p in range(y // 3 * 3, y // 3 * 3 + 3)])\n return (row_isValid and col_isValid and sec_isValid)\n\n\ndef solveSudoku(sudoku, i=0, j=0):\n\n global times\n\n i, j = findNextCell(sudoku)\n if i == -1 and j == -1:\n return True\n\n for e in range(1, 10):\n if isValid(sudoku, i, j, e):\n sudoku[i][j] = e\n if solveSudoku(sudoku, i, j):\n return True\n sudoku[i][j] = 0\n times += 1\n return False\n\n\nif __name__ == '__main__':\n solveSudoku()\n" ]
[ [ "numpy.array" ] ]
md-arif-shaikh/publication-plot-settings
[ "55d851646b492fc58c8011e1047bfcaccc947415" ]
[ "examples/example.py" ]
[ "import numpy as np\nimport plotsettings as plts\n\nx = np.arange(-np.pi, np.pi, 0.01)\nfig, ax = plts.set(\"APS\", left=0.17)\nfor idx in range(5):\n ax.plot(x, np.sin(x + idx), label=r\"$i = %.f$\" % idx)\nax.set_xlabel(r\"$\\theta$\")\nax.set_ylabel(r\"$\\sin(\\theta + i)$\")\nax.legend(loc=\"upper left\")\nax.grid()\nfig.savefig(\"../figs/APS.pdf\")\n\nfig, ax = plts.set(\"APS\", fig_type=\"twocol\", left=0.1)\nfor idx in range(5):\n ax.plot(x, np.sin(x + idx), label=r\"$i = %.f$\" % idx)\nax.set_xlabel(r\"$\\theta$\")\nax.set_ylabel(r\"$\\sin(\\theta + i)$\")\nax.legend(loc=\"upper left\")\nax.grid()\nfig.savefig(\"../figs/APS_twocol.pdf\")\n" ]
[ [ "numpy.arange", "numpy.sin" ] ]
scott-mao/DenseDepth_Pruning
[ "88bcff70e93dd68058a5cf0dfeac119a57abc6de" ]
[ "src/models/utils.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Utils for handling models.\n\n- Author: Curt-Park\n- Email: [email protected]\n\"\"\"\n\nfrom collections import OrderedDict\nimport hashlib\nimport os\nimport re\nimport tarfile\nfrom typing import Any, Dict, List, Set, Tuple\n\nimport gdown\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.utils.prune as prune\nimport wandb\nimport yaml\n\n\ndef get_model(model_name: str, model_config: Dict[str, Any]) -> nn.Module:\n \"\"\"Get PyTorch model.\"\"\"\n # get model constructor\n return __import__(\"src.models.\" + model_name, fromlist=[model_name]).get_model(\n #**model_config\n )\n\n\ndef initialize_params(model: Any, state_dict: Dict[str, Any], with_mask=True) -> None:\n \"\"\"Initialize weights and masks.\"\"\"\n model_dict = model.state_dict()\n # 1. filter out unnecessary keys\n pretrained_dict = OrderedDict()\n for key_ori, key_pre in zip(model_dict.keys(), state_dict.keys()):\n if with_mask or (\"weight_mask\" not in key_ori and \"bias_mask\" not in key_ori):\n pretrained_dict[key_ori] = state_dict[key_pre]\n # 3. load the new state dict\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)\n\n\ndef get_model_hash(model: nn.Module) -> str:\n \"\"\"Get model info as hash.\"\"\"\n return hashlib.sha224(str(model).encode(\"UTF-8\")).hexdigest()\n\n\ndef get_pretrained_model_info(model: nn.Module) -> Dict[str, str]:\n \"\"\"Read yaml file, get pretrained model information(model_dir, gdrive_link) \\\n given hash.\"\"\"\n model_hash = str(get_model_hash(model))\n with open(\"config/pretrained_model_url.yaml\", mode=\"r\") as f:\n model_info = yaml.load(f, Loader=yaml.FullLoader)[model_hash]\n return model_info\n\n\ndef get_model_tensor_datatype(model: nn.Module) -> List[Tuple[str, torch.dtype]]:\n \"\"\"Print all tensors data types.\"\"\"\n return [\n (name, tensor.dtype)\n for name, tensor in model.state_dict().items()\n if hasattr(tensor, \"dtype\")\n ]\n\n\ndef get_params(\n model: nn.Module, extract_conditions: Tuple[Tuple[Any, str], ...]\n) -> Tuple[Tuple[nn.Module, str], ...]:\n \"\"\"Get parameters(weight and bias) tuples for pruning.\"\"\"\n t = []\n for module in model.modules():\n for module_type, param_name in extract_conditions:\n # it returns true when we try hasattr(even though it returns None)\n if (\n isinstance(module, module_type)\n and getattr(module, param_name) is not None\n ):\n t += [(module, param_name)]\n return tuple(t)\n\n\ndef get_layernames(model: nn.Module) -> Set[str]:\n \"\"\"Get parameters(weight and bias) layer name.\n\n Notes:\n No usage now, can be deprecated.\n \"\"\"\n\n t = set()\n for name, param in model.named_parameters():\n if not param.requires_grad:\n continue\n layer_name = name.rsplit(\".\", 1)[0]\n t.add(layer_name)\n return t\n\n\ndef get_model_size_mb(model: nn.Module) -> float:\n \"\"\"Get the model file size.\"\"\"\n torch.save(model.state_dict(), \"temp.p\")\n size = os.path.getsize(\"temp.p\") / 1e6\n os.remove(\"temp.p\")\n return size\n\n\ndef remove_pruning_reparameterization(\n params_to_prune: Tuple[Tuple[nn.Module, str], ...]\n) -> None:\n \"\"\"Combine (weight_orig, weight_mask) and reduce the model size.\"\"\"\n for module, weight_type in params_to_prune:\n prune.remove(module, weight_type)\n\n\ndef get_masks(model: nn.Module) -> Dict[str, torch.Tensor]:\n \"\"\"Get masks from the model.\"\"\"\n mask = dict()\n for k, v in model.state_dict().items():\n if \"mask\" in k:\n mask[k] = v.detach().cpu().clone()\n return mask\n\n\ndef dummy_pruning(params_all: Tuple[Tuple[nn.Module, str], ...]) -> None:\n \"\"\"Conduct fake pruning.\"\"\"\n prune.global_unstructured(\n params_all,\n pruning_method=prune.L1Unstructured,\n amount=0.0,\n )\n\n\ndef sparsity(\n params_all: Tuple[Tuple[nn.Module, str], ...],\n module_types: Tuple[Any, ...] = (\n nn.Conv2d,\n nn.Linear,\n nn.BatchNorm1d,\n nn.BatchNorm2d,\n ),\n) -> float:\n \"\"\"Get the proportion of zeros in weights (default: model's sparsity).\"\"\"\n n_zero = n_total = 0\n\n for module, param_name in params_all:\n match = next((m for m in module_types if type(module) is m), None)\n if not match:\n continue\n n_zero += int(torch.sum(getattr(module, param_name) == 0.0).item())\n n_total += getattr(module, param_name).nelement()\n\n return (100.0 * n_zero / n_total) if n_total != 0 else 0.0\n\n\ndef mask_sparsity(\n params_all: Tuple[Tuple[nn.Module, str], ...],\n module_types: Tuple[Any, ...] = (\n nn.Conv2d,\n nn.Linear,\n nn.BatchNorm1d,\n nn.BatchNorm2d,\n ),\n) -> float:\n \"\"\"Get the ratio of zeros in weight masks.\"\"\"\n n_zero = n_total = 0\n for module, param_name in params_all:\n match = next((m for m in module_types if type(module) is m), None)\n if not match:\n continue\n param_mask_name = param_name + \"_mask\"\n if hasattr(module, param_mask_name):\n param = getattr(module, param_mask_name)\n n_zero += int(torch.sum(param == 0.0).item())\n n_total += param.nelement()\n\n return (100.0 * n_zero / n_total) if n_total != 0 else 0.0\n\n\ndef download_pretrained_model(file_path: str, download_link: str) -> None:\n \"\"\"Get pretrained model from google drive.\"\"\"\n model_folder, model_name, file_name = file_path.rsplit(os.path.sep, 2)\n if not os.path.exists(model_folder):\n os.makedirs(model_folder)\n # Download, unzip\n zip_file_path = os.path.join(model_folder, model_name + \".tar.xz\")\n gdown.download(download_link, zip_file_path)\n with tarfile.open(zip_file_path, \"r:*\") as f:\n f.extractall(model_folder)\n\n\ndef dot2bracket(s: str) -> str:\n \"\"\"Replace layer names with valid names for pruning.\n\n Test:\n >>> dot2bracket(\"dense2.1.bn1.bias\")\n 'dense2[1].bn1.bias'\n >>> dot2bracket(\"dense2.13.bn1.bias\")\n 'dense2[13].bn1.bias'\n >>> dot2bracket(\"conv2.123.bn1.bias\")\n 'conv2[123].bn1.bias'\n >>> dot2bracket(\"dense2.6.conv2.5.bn1.bias\")\n 'dense2[6].conv2[5].bn1.bias'\n >>> dot2bracket(\"model.6\")\n 'model[6]'\n >>> dot2bracket(\"vgg.2.conv2.bn.2\")\n 'vgg[2].conv2.bn[2]'\n >>> dot2bracket(\"features.11\")\n 'features[11]'\n >>> dot2bracket(\"dense_blocks.0.0.conv1\")\n 'dense_blocks[0][0].conv1'\n \"\"\"\n pattern = r\"\\.[0-9]+\"\n s_list = list(s)\n for m in re.finditer(pattern, s):\n start, end = m.span()\n # e.g s_list == [..., \".\", \"0\", \".\", \"0\", \".\", ...]\n # step1: [..., \"[\", \"0\", \"].\", \"0\", \".\", ...]\n # step2: [..., \"[\", \"0\", \"][\", \"0\", \"].\", ...]\n s_list[start] = s_list[start][:-1] + \"[\"\n if end < len(s) and s_list[end] == \".\":\n s_list[end] = \"].\"\n else:\n s_list.insert(end, \"]\")\n return \"\".join(s_list)\n\n\ndef wlog_weight(model: nn.Module) -> None:\n \"\"\"Log weights on wandb.\"\"\"\n wlog = dict()\n for name, param in model.named_parameters():\n if not param.requires_grad:\n continue\n layer_name, weight_type = name.rsplit(\".\", 1)\n\n # get params(weight, bias, weight_orig)\n if weight_type in (\"weight\", \"bias\", \"weight_orig\"):\n w_name = \"params/\" + layer_name + \".\" + weight_type\n weight = eval(\"model.\" + dot2bracket(layer_name) + \".\" + weight_type)\n weight = weight.cpu().data.numpy()\n wlog.update({w_name: wandb.Histogram(weight)})\n else:\n continue\n\n # get masked weights\n if weight_type == \"weight_orig\":\n w_name = \"params/\" + layer_name + \".weight\"\n named_buffers = eval(\n \"model.\" + dot2bracket(layer_name) + \".named_buffers()\"\n )\n mask: Tuple[str, torch.Tensor] = (\n next(x for x in list(named_buffers) if x[0] == \"weight_mask\")[1]\n .cpu()\n .data.numpy()\n )\n masked_weight = weight[np.where(mask == 1.0)]\n wlog.update({w_name: wandb.Histogram(masked_weight)})\n wandb.log(wlog, commit=False)\n\n\ndef split_channels(n_channels: int, n_chunks: int) -> List[int]:\n \"\"\"Get splitted channel numbers.\n\n It adds up all the remainders to the first chunck.\n \"\"\"\n split = [n_channels // n_chunks for _ in range(n_chunks)]\n split[0] += n_channels - sum(split)\n return split\n\n\ndef count_model_params(model: nn.Module) -> int:\n \"\"\"Count and return the total number of model params.\"\"\"\n return sum(p.numel() for p in model.parameters())\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n" ]
[ [ "torch.sum", "torch.nn.utils.prune.global_unstructured", "torch.nn.utils.prune.remove", "numpy.where" ] ]
ibaiGorordo/PyTorch-High-Res-Stereo-Depth-Estimation
[ "f8860766369efe1586344af7f0a70fc760bc82cc" ]
[ "highres_stereo/hsm/hsm.py" ]
[ "from __future__ import print_function\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom .submodule import disparityregression, decoderBlock\nfrom .unet import unet\nimport numpy as np\n\nclass HSMNet(nn.Module):\n def __init__(self, maxdisp,clean,level=1):\n super(HSMNet, self).__init__()\n self.maxdisp = maxdisp\n self.clean = clean\n self.feature_extraction = unet()\n self.level = level\n \n # block 4\n self.decoder6 = decoderBlock(6,32,32,up=True, pool=True)\n if self.level > 2:\n self.decoder5 = decoderBlock(6,32,32,up=False, pool=True)\n else:\n self.decoder5 = decoderBlock(6,32,32,up=True, pool=True)\n if self.level > 1:\n self.decoder4 = decoderBlock(6,32,32, up=False)\n else:\n self.decoder4 = decoderBlock(6,32,32, up=True)\n self.decoder3 = decoderBlock(5,32,32, stride=(2,1,1),up=False, nstride=1)\n # reg\n self.disp_reg8 = disparityregression(self.maxdisp,16)\n self.disp_reg16 = disparityregression(self.maxdisp,16)\n self.disp_reg32 = disparityregression(self.maxdisp,32)\n self.disp_reg64 = disparityregression(self.maxdisp,64)\n\n \n\n def feature_vol(self, refimg_fea, targetimg_fea,maxdisp, leftview=True):\n '''\n diff feature volume\n '''\n width = refimg_fea.shape[-1]\n cost = Variable(torch.cuda.FloatTensor(refimg_fea.size()[0], refimg_fea.size()[1], maxdisp, refimg_fea.size()[2], refimg_fea.size()[3]).fill_(0.))\n for i in range(min(maxdisp, width)):\n feata = refimg_fea[:,:,:,i:width]\n featb = targetimg_fea[:,:,:,:width-i]\n # concat\n if leftview:\n cost[:, :refimg_fea.size()[1], i, :,i:] = torch.abs(feata-featb)\n else:\n cost[:, :refimg_fea.size()[1], i, :,:width-i] = torch.abs(featb-feata)\n cost = cost.contiguous()\n return cost\n\n\n def forward(self, left, right):\n nsample = left.shape[0]\n conv4,conv3,conv2,conv1 = self.feature_extraction(torch.cat([left,right],0))\n conv40,conv30,conv20,conv10 = conv4[:nsample], conv3[:nsample], conv2[:nsample], conv1[:nsample]\n conv41,conv31,conv21,conv11 = conv4[nsample:], conv3[nsample:], conv2[nsample:], conv1[nsample:]\n\n feat6 = self.feature_vol(conv40, conv41, self.maxdisp//64)\n feat5 = self.feature_vol(conv30, conv31, self.maxdisp//32)\n feat4 = self.feature_vol(conv20, conv21, self.maxdisp//16)\n feat3 = self.feature_vol(conv10, conv11, self.maxdisp//8)\n\n feat6_2x, cost6 = self.decoder6(feat6)\n feat5 = torch.cat((feat6_2x, feat5),dim=1)\n\n feat5_2x, cost5 = self.decoder5(feat5)\n if self.level > 2:\n cost3 = F.interpolate(cost5, [left.size()[2],left.size()[3]], mode='bilinear')\n else:\n feat4 = torch.cat((feat5_2x, feat4),dim=1)\n\n feat4_2x, cost4 = self.decoder4(feat4) # 32\n if self.level > 1:\n cost3 = F.interpolate((cost4).unsqueeze(1), [self.disp_reg8.disp.shape[1], left.size()[2],left.size()[3]], mode='trilinear').squeeze(1)\n else:\n feat3 = torch.cat((feat4_2x, feat3),dim=1)\n\n feat3_2x, cost3 = self.decoder3(feat3) # 32\n cost3 = F.interpolate(cost3, [left.size()[2],left.size()[3]], mode='bilinear')\n if self.level > 2:\n final_reg = self.disp_reg32\n else:\n final_reg = self.disp_reg8\n\n if self.training or self.clean==-1:\n pred3 = final_reg(F.softmax(cost3,1)); entropy = pred3 # to save memory\n else:\n pred3,entropy = final_reg(F.softmax(cost3,1),ifent=True)\n pred3[entropy>self.clean] = np.inf\n\n if self.training:\n cost6 = F.interpolate((cost6).unsqueeze(1), [self.disp_reg8.disp.shape[1], left.size()[2],left.size()[3]], mode='trilinear').squeeze(1)\n cost5 = F.interpolate((cost5).unsqueeze(1), [self.disp_reg8.disp.shape[1], left.size()[2],left.size()[3]], mode='trilinear').squeeze(1)\n cost4 = F.interpolate(cost4, [left.size()[2],left.size()[3]], mode='bilinear')\n pred6 = self.disp_reg16(F.softmax(cost6,1))\n pred5 = self.disp_reg16(F.softmax(cost5,1))\n pred4 = self.disp_reg16(F.softmax(cost4,1))\n stacked = [pred3,pred4,pred5,pred6] \n return stacked,entropy\n else:\n return pred3,torch.squeeze(entropy)\n" ]
[ [ "torch.abs", "torch.nn.functional.softmax", "torch.squeeze", "torch.cat" ] ]
oztalha/News-Commentary-Tweets-of-Elites
[ "cd93caccd7a725e9c34f3bf7be6f0b9ecd88faf1" ]
[ "scrapers/scrape-nediyor.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 08 15:41:01 2015\n\n@author: Talha\n\"\"\"\n\nfrom selenium import webdriver\nimport pandas as pd\nimport time\n\n#initialize variables\ndf = pd.DataFrame(columns=('title', 'twcount', 'href'))\ndriver = webdriver.Firefox()\n\n# nediyor.com gundem sayfasi\n#driver.get('http://nediyor.com/haberler/gundem/') \ndriver.get('file:///Users/toz/Documents/workspace/TR-2014/nediyor.html')\n#ime.sleep(60) #sayfanin tamamen yuklenmesini beklememiz gerekiyor\n\n\"\"\"\n# Post islem icin benim indirmis oldugum nediyor.html'i kullanabilirsiniz\n# eger kendiniz indirmek isterseniz lazy loading problemini cozmek icin: \n# sayfanin sonuna git - her defasinda 18 yeni haber yukleniyor\nfor i in range(1000):\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(3)\n\"\"\"\nerrors = []\nnews = driver.find_elements_by_xpath(\"//div[@class='brick brick-big']\")\nfor i,haber in enumerate(news):\n try:\n title = haber.find_element_by_tag_name(\"h2\").find_element_by_tag_name(\"a\")\n twcount = haber.find_element_by_xpath(\"div[@class='tweet-count']\").find_element_by_tag_name(\"a\").text\n # print i, title.text , twcount, title.get_attribute(\"href\")\n df.loc[len(df)+1]=[title.text , twcount, title.get_attribute(\"href\")]\n except:\n errors.append(i)\n\ndf['twcount']=df['twcount'].astype(int)\ndf.to_csv(\"TR-news-headlines.csv\",encoding='utf-8',index=False)" ]
[ [ "pandas.DataFrame" ] ]
chkra/calc_batch_correct
[ "4dafcf77bc37539a4944b64658c85318c7a6b5b8" ]
[ "hello_genomics/main.py" ]
[ "#!/usr/bin/env python\r\n# coding: utf-8\r\n'''\r\n Combat batch correction app for FASTGenomics\r\n'''\r\nimport json\r\nimport pathlib\r\nimport random\r\nimport csv\r\nimport jinja2\r\nimport logging\r\nimport enum\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cm\r\n\r\n\r\nfrom sklearn import decomposition\r\nfrom scipy.spatial.distance import pdist\r\n\r\nfrom collections import defaultdict\r\nfrom fastgenomics import io as fg_io\r\nfrom hello_genomics import logging_config\r\n\r\nimport combat as cb\r\n\r\n# initialize logging\r\nlogging_config.configure_logging(level=logging.INFO)\r\nlogger = logging.getLogger('hello_genomics_calc')\r\n\r\n# set paths to jinja2-templates for summary.md etc.\r\nTEMPLATE_PATH = pathlib.Path(__file__).parent.parent / 'templates'\r\n\r\n\r\n\r\n\r\nclass Columns(str, enum.Enum):\r\n # @todo: this is horrible on so many levels ...\r\n CELLS = 'cellId*Ganzzahl'\r\n GENES = 'entrezId*Ganzzahl'\r\n EXPR = 'expressionValue*Zahl'\r\n # BATCH = '_generated_batch*Text'\r\n BATCH = 'batch'\r\n\r\n\r\ndef get_data():\r\n logger.info('Loading genes and cell annotation matrice')\r\n # @todo: tidy up\r\n # genes_path = fg_io.get_input_path('genes_data_input')\r\n expre_path = fg_io.get_input_path('expression_input')\r\n cells_meta = fg_io.get_input_path('cells_meta_input')\r\n\r\n # combat requires full matrix input - unstack input file\r\n # combat expects matrix of shape [genes x cells], so index columns accordingly\r\n # @todo: check if this truly makes sense\r\n # @todo: the Columns.enum-Trick sucks, this should be some global definition\r\n # @todo: will blow up for large data files - \r\n X = pd.read_csv(expre_path , sep='\\t')\r\n print(X.head(10))\r\n data = X.set_index([Columns.GENES, Columns.CELLS])\\\r\n .unstack() \\\r\n .fillna(0)\r\n # @todo this sucks as well - won't hurt to select this column, but\r\n # @todo I'd rather have a global data scheme\r\n # .loc[:, Columns.EXPR]\r\n\r\n pheno = pd.read_csv(cells_meta, sep='\\t')\r\n return data, pheno\r\n\r\n\r\ndef get_test_data():\r\n # @todo: how to test?\r\n \r\n logger.info('Loading genes and cell annotation matrice')\r\n genes_path = fg_io.get_input_path('test_genes_data_input')\r\n cells_meta = fg_io.get_input_path('test_cells_meta_input')\r\n\r\n #genes_path = './bladder-expr.txt'\r\n #cells_meta = './bladder-pheno.txt'\r\n \r\n data = pd.read_csv(genes_path, sep='\\t')\r\n pheno = pd.read_csv(cells_meta, sep='\\t')\r\n\r\n return data, pheno\r\n\r\n\r\ndef check_batch_distribution(X, batch_anno, axis, title=''):\r\n \r\n pca = decomposition.PCA(n_components=2)\r\n pca.fit(X)\r\n X_trans = pca.transform(X)\r\n\r\n all_batch_reps = []\r\n\r\n labels = set(batch_anno)\r\n colors = cm.spectral(np.linspace(0, 1, len(labels)))\r\n\r\n for val, col in zip(labels, colors):\r\n Z = X_trans[np.ix_((batch_anno == val))]\r\n rep = np.mean(Z, axis=0)\r\n all_batch_reps.append(rep)\r\n\r\n axis.scatter(Z[:, 0], Z[:, 1], label=val, marker='o', c=col, edgecolor='none')\r\n axis.add_artist(plt.Circle(rep, 5, color=col))\r\n\r\n axis.set_title(title)\r\n axis.legend(numpoints=1)\r\n\r\n all_batch_reps = np.array(all_batch_reps)\r\n return np.sum(pdist(all_batch_reps))\r\n\r\n\r\ndef make_output(data, corr, pheno, parameters):\r\n \r\n f, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 4))\r\n\r\n total_batch_dist = check_batch_distribution(data.values.T,\r\n pheno[Columns.BATCH],\r\n ax1,\r\n 'Before Batch Correction')\r\n\r\n total_batch_dist_corr = check_batch_distribution(corr.values.T,\r\n pheno[Columns.BATCH],\r\n ax2,\r\n 'After Batch Correction')\r\n\r\n logger.info('Batch center distance before correction: ' + str(total_batch_dist))\r\n logger.info('Batch center distance after correction: ' + str(total_batch_dist_corr))\r\n corr_ratio = total_batch_dist / total_batch_dist_corr\r\n\r\n if corr_ratio >= 1:\r\n logger.info('Batch completed without errors. Reduced batch center distance by ratio of '\r\n + str(np.round(corr_ratio, 2)))\r\n else:\r\n logger.error('Batch correction modified data in invald way!')\r\n logger.error('Batch center ratio is less than 1:' + str(np.round(corr_ratio, 2)))\r\n\r\n doc_img_path = fg_io.get_output_path('batch_corr_img')\r\n logger.info('Plotting PCA embedding of data for documentation.')\r\n # plt.savefig(doc_img_path, bbox_inches='tight')\r\n \r\n logger.info(\"Storing matrix of batch-corrected gene expressions.\")\r\n output_path = fg_io.get_output_path('batch_corr_matrix')\r\n corr.to_csv(output_path)\r\n\r\n results = {'num_batches': len(set(pheno[Columns.BATCH])),\r\n 'ctr_dist_before': total_batch_dist, \r\n 'ctr_dist_before': total_batch_dist_corr,\r\n 'ctr_ratio': corr_ratio}\r\n \r\n logger.debug(\"Loading Jinja2 summary template\")\r\n with open(TEMPLATE_PATH / 'summary.md.j2') as temp:\r\n template_str = temp.read()\r\n\r\n logger.debug(\"Rendering template\")\r\n template = jinja2.Template(template_str)\r\n summary = template.render(results=results, parameters=parameters)\r\n\r\n logger.info(\"Writing summary\")\r\n summary_path = fg_io.get_summary_path()\r\n with summary_path.open('w') as f_sum:\r\n f_sum.write(summary)\r\n\r\n \r\n\r\n\r\ndef main():\r\n '''\r\n main routine of batch correction with combat\r\n '''\r\n\r\n try:\r\n\r\n logger.info('Loading parameters')\r\n parameters = fg_io.get_parameters()\r\n\r\n random.seed(4711)\r\n parameters['random_seed'] = 4711\r\n\r\n # data, pheno = get_data()\r\n #data, pheno = get_test_data()\r\n\r\n logger.info('Received data matrix of shape (genes x cells) = ' + str(data.shape))\r\n logger.info('Found the following batches: ' + str(set(pheno[Columns.BATCH])))\r\n logger.info('Calling combat for batch correction.')\r\n\r\n corr = cb.combat(data, pheno[Columns.BATCH])\r\n\r\n make_output(data, corr, pheno, parameters)\r\n\r\n logger.info('Done.')\r\n\r\n except Exception as inst:\r\n logger.error(type(inst))\r\n logger.error(inst)\r\n raise inst\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" ]
[ [ "numpy.ix_", "pandas.read_csv", "matplotlib.use", "matplotlib.pyplot.subplots", "matplotlib.pyplot.Circle", "numpy.round", "scipy.spatial.distance.pdist", "numpy.mean", "numpy.array", "sklearn.decomposition.PCA" ] ]
nantutech/ntcore
[ "2daacad2435c30f116b76685aa579b4665bff9f7" ]
[ "client/examples/tf_keras_example.py" ]
[ "import tensorflow as tf\nprint(\"TensorFlow version:\", tf.__version__)\n\nfrom ntcore import client\nclient.set_endpoint('http://localhost:8000')\nclient.autolog('C8W60XEPH7DA3AAH3S41PJZ3OV')\n\nmnist = tf.keras.datasets.mnist\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10)\n])\n\nloss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\nmodel.compile(optimizer='adam', loss=loss_fn, metrics=['accuracy'])\n\nwith client.start_run():\n model.fit(x_train, y_train, epochs=2)\n model.evaluate(x_test, y_test, verbose=2)\n model.save('./model_serving')\n" ]
[ [ "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Dropout" ] ]
mattmcdermott/custodian
[ "ae3d3f903337ff5753db9288bd6f7c8f360f2f25" ]
[ "custodian/vasp/jobs.py" ]
[ "# coding: utf-8\n\nfrom __future__ import unicode_literals, division\nimport subprocess\nimport os\nimport shutil\nimport math\nimport logging\n\nimport numpy as np\n\nfrom pymatgen import Structure\nfrom pymatgen.io.vasp import VaspInput, Incar, Poscar, Outcar, Kpoints, Vasprun\nfrom monty.os.path import which\nfrom monty.shutil import decompress_dir\nfrom monty.serialization import dumpfn, loadfn\n\nfrom custodian.custodian import Job, SENTRY_DSN\nfrom custodian.utils import backup\nfrom custodian.vasp.interpreter import VaspModder\nfrom custodian.vasp.handlers import VASP_BACKUP_FILES\n\n\"\"\"\nThis module implements basic kinds of jobs for VASP runs.\n\"\"\"\n\n\nlogger = logging.getLogger(__name__)\n\n\n__author__ = \"Shyue Ping Ong\"\n__version__ = \"0.1\"\n__maintainer__ = \"Shyue Ping Ong\"\n__email__ = \"[email protected]\"\n__status__ = \"Beta\"\n__date__ = \"2/4/13\"\n\n\nVASP_INPUT_FILES = {\"INCAR\", \"POSCAR\", \"POTCAR\", \"KPOINTS\"}\n\nVASP_OUTPUT_FILES = ['DOSCAR', 'INCAR', 'KPOINTS', 'POSCAR', 'PROCAR',\n 'vasprun.xml', 'CHGCAR', 'CHG', 'EIGENVAL', 'OSZICAR',\n 'WAVECAR', 'CONTCAR', 'IBZKPT', 'OUTCAR']\n\nVASP_NEB_INPUT_FILES = {'INCAR', 'POTCAR', 'KPOINTS'}\n\nVASP_NEB_OUTPUT_FILES = ['INCAR', 'KPOINTS', 'POTCAR', 'vasprun.xml']\n\nVASP_NEB_OUTPUT_SUB_FILES = ['CHG', 'CHGCAR', 'CONTCAR', 'DOSCAR',\n 'EIGENVAL', 'IBZKPT', 'PCDAT', 'POSCAR',\n 'REPORT', 'PROCAR', 'OSZICAR', 'OUTCAR',\n 'WAVECAR', 'XDATCAR']\n\n\nclass VaspJob(Job):\n \"\"\"\n A basic vasp job. Just runs whatever is in the directory. But conceivably\n can be a complex processing of inputs etc. with initialization.\n \"\"\"\n\n def __init__(self, vasp_cmd, output_file=\"vasp.out\",\n stderr_file=\"std_err.txt\", suffix=\"\", final=True,\n backup=True, auto_npar=False, auto_gamma=True,\n settings_override=None, gamma_vasp_cmd=None,\n copy_magmom=False, auto_continue=False):\n \"\"\"\n This constructor is necessarily complex due to the need for\n flexibility. For standard kinds of runs, it's often better to use one\n of the static constructors. The defaults are usually fine too.\n\n Args:\n vasp_cmd (str): Command to run vasp as a list of args. For example,\n if you are using mpirun, it can be something like\n [\"mpirun\", \"pvasp.5.2.11\"]\n output_file (str): Name of file to direct standard out to.\n Defaults to \"vasp.out\".\n stderr_file (str): Name of file to direct standard error to.\n Defaults to \"std_err.txt\".\n suffix (str): A suffix to be appended to the final output. E.g.,\n to rename all VASP output from say vasp.out to\n vasp.out.relax1, provide \".relax1\" as the suffix.\n final (bool): Indicating whether this is the final vasp job in a\n series. Defaults to True.\n backup (bool): Whether to backup the initial input files. If True,\n the INCAR, KPOINTS, POSCAR and POTCAR will be copied with a\n \".orig\" appended. Defaults to True.\n auto_npar (bool): Whether to automatically tune NPAR to be sqrt(\n number of cores) as recommended by VASP for DFT calculations.\n Generally, this results in significant speedups. Defaults to\n True. Set to False for HF, GW and RPA calculations.\n auto_gamma (bool): Whether to automatically check if run is a\n Gamma 1x1x1 run, and whether a Gamma optimized version of\n VASP exists with \".gamma\" appended to the name of the VASP\n executable (typical setup in many systems). If so, run the\n gamma optimized version of VASP instead of regular VASP. You\n can also specify the gamma vasp command using the\n gamma_vasp_cmd argument if the command is named differently.\n settings_override ([dict]): An ansible style list of dict to\n override changes. For example, to set ISTART=1 for subsequent\n runs and to copy the CONTCAR to the POSCAR, you will provide::\n\n [{\"dict\": \"INCAR\", \"action\": {\"_set\": {\"ISTART\": 1}}},\n {\"file\": \"CONTCAR\",\n \"action\": {\"_file_copy\": {\"dest\": \"POSCAR\"}}}]\n gamma_vasp_cmd (str): Command for gamma vasp version when\n auto_gamma is True. Should follow the list style of\n subprocess. Defaults to None, which means \".gamma\" is added\n to the last argument of the standard vasp_cmd.\n copy_magmom (bool): Whether to copy the final magmom from the\n OUTCAR to the next INCAR. Useful for multi-relaxation runs\n where the CHGCAR and WAVECAR are sometimes deleted (due to\n changes in fft grid, etc.). Only applies to non-final runs.\n auto_continue (bool): Whether to automatically continue a run\n if a STOPCAR is present. This is very useful if using the\n wall-time handler which will write a read-only STOPCAR to\n prevent VASP from deleting it once it finishes\n \"\"\"\n self.vasp_cmd = vasp_cmd\n self.output_file = output_file\n self.stderr_file = stderr_file\n self.final = final\n self.backup = backup\n self.suffix = suffix\n self.settings_override = settings_override\n self.auto_npar = auto_npar\n self.auto_gamma = auto_gamma\n self.gamma_vasp_cmd = gamma_vasp_cmd\n self.copy_magmom = copy_magmom\n self.auto_continue = auto_continue\n \n if SENTRY_DSN:\n # if using Sentry logging, add specific VASP executable to scope\n from sentry_sdk import configure_scope\n with configure_scope() as scope:\n scope.set_tag(\"vasp_cmd\", which(vasp_cmd))\n\n def setup(self):\n \"\"\"\n Performs initial setup for VaspJob, including overriding any settings\n and backing up.\n \"\"\"\n decompress_dir('.')\n\n if self.backup:\n for f in VASP_INPUT_FILES:\n shutil.copy(f, \"{}.orig\".format(f))\n\n if self.auto_npar:\n try:\n incar = Incar.from_file(\"INCAR\")\n # Only optimized NPAR for non-HF and non-RPA calculations.\n if not (incar.get(\"LHFCALC\") or incar.get(\"LRPA\") or\n incar.get(\"LEPSILON\")):\n if incar.get(\"IBRION\") in [5, 6, 7, 8]:\n # NPAR should not be set for Hessian matrix\n # calculations, whether in DFPT or otherwise.\n del incar[\"NPAR\"]\n else:\n import multiprocessing\n # try sge environment variable first\n # (since multiprocessing counts cores on the current\n # machine only)\n ncores = os.environ.get('NSLOTS') or \\\n multiprocessing.cpu_count()\n ncores = int(ncores)\n for npar in range(int(math.sqrt(ncores)),\n ncores):\n if ncores % npar == 0:\n incar[\"NPAR\"] = npar\n break\n incar.write_file(\"INCAR\")\n except:\n pass\n\n if self.auto_continue:\n if os.path.exists(\"continue.json\"):\n actions = loadfn(\"continue.json\").get(\"actions\")\n logger.info(\"Continuing previous VaspJob. Actions: {}\".format(actions))\n backup(VASP_BACKUP_FILES, prefix=\"prev_run\")\n VaspModder().apply_actions(actions)\n\n else:\n # Default functionality is to copy CONTCAR to POSCAR and set\n # ISTART to 1 in the INCAR, but other actions can be specified\n if self.auto_continue is True:\n actions = [{\"file\": \"CONTCAR\",\n \"action\": {\"_file_copy\": {\"dest\": \"POSCAR\"}}},\n {\"dict\": \"INCAR\",\n \"action\": {\"_set\": {\"ISTART\": 1}}}]\n else:\n actions = self.auto_continue\n dumpfn({\"actions\": actions}, \"continue.json\")\n\n if self.settings_override is not None:\n VaspModder().apply_actions(self.settings_override)\n\n def run(self):\n \"\"\"\n Perform the actual VASP run.\n\n Returns:\n (subprocess.Popen) Used for monitoring.\n \"\"\"\n cmd = list(self.vasp_cmd)\n if self.auto_gamma:\n vi = VaspInput.from_directory(\".\")\n kpts = vi[\"KPOINTS\"]\n if kpts.style == Kpoints.supported_modes.Gamma \\\n and tuple(kpts.kpts[0]) == (1, 1, 1):\n if self.gamma_vasp_cmd is not None and which(\n self.gamma_vasp_cmd[-1]):\n cmd = self.gamma_vasp_cmd\n elif which(cmd[-1] + \".gamma\"):\n cmd[-1] += \".gamma\"\n logger.info(\"Running {}\".format(\" \".join(cmd)))\n with open(self.output_file, 'w') as f_std, \\\n open(self.stderr_file, \"w\", buffering=1) as f_err:\n # use line buffering for stderr\n p = subprocess.Popen(cmd, stdout=f_std, stderr=f_err)\n return p\n\n def postprocess(self):\n \"\"\"\n Postprocessing includes renaming and gzipping where necessary.\n Also copies the magmom to the incar if necessary\n \"\"\"\n for f in VASP_OUTPUT_FILES + [self.output_file]:\n if os.path.exists(f):\n if self.final and self.suffix != \"\":\n shutil.move(f, \"{}{}\".format(f, self.suffix))\n elif self.suffix != \"\":\n shutil.copy(f, \"{}{}\".format(f, self.suffix))\n\n if self.copy_magmom and not self.final:\n try:\n outcar = Outcar(\"OUTCAR\")\n magmom = [m['tot'] for m in outcar.magnetization]\n incar = Incar.from_file(\"INCAR\")\n incar['MAGMOM'] = magmom\n incar.write_file(\"INCAR\")\n except:\n logger.error('MAGMOM copy from OUTCAR to INCAR failed')\n\n # Remove continuation so if a subsequent job is run in\n # the same directory, will not restart this job.\n if os.path.exists(\"continue.json\"):\n os.remove(\"continue.json\")\n\n @classmethod\n def double_relaxation_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05,\n half_kpts_first_relax=False, auto_continue=False):\n \"\"\"\n Returns a list of two jobs corresponding to an AFLOW style double\n relaxation run.\n\n Args:\n vasp_cmd (str): Command to run vasp as a list of args. For example,\n if you are using mpirun, it can be something like\n [\"mpirun\", \"pvasp.5.2.11\"]\n auto_npar (bool): Whether to automatically tune NPAR to be sqrt(\n number of cores) as recommended by VASP for DFT calculations.\n Generally, this results in significant speedups. Defaults to\n True. Set to False for HF, GW and RPA calculations.\n ediffg (float): Force convergence criteria for subsequent runs (\n ignored for the initial run.)\n half_kpts_first_relax (bool): Whether to halve the kpoint grid\n for the first relaxation. Speeds up difficult convergence\n considerably. Defaults to False.\n\n Returns:\n List of two jobs corresponding to an AFLOW style run.\n \"\"\"\n incar_update = {\"ISTART\": 1}\n if ediffg:\n incar_update[\"EDIFFG\"] = ediffg\n settings_overide_1 = None\n settings_overide_2 = [\n {\"dict\": \"INCAR\",\n \"action\": {\"_set\": incar_update}},\n {\"file\": \"CONTCAR\",\n \"action\": {\"_file_copy\": {\"dest\": \"POSCAR\"}}}]\n if half_kpts_first_relax and os.path.exists(\"KPOINTS\") and \\\n os.path.exists(\"POSCAR\"):\n kpts = Kpoints.from_file(\"KPOINTS\")\n orig_kpts_dict = kpts.as_dict()\n # lattice vectors with length < 8 will get >1 KPOINT\n kpts.kpts = np.round(np.maximum(np.array(kpts.kpts) / 2,\n 1)).astype(int).tolist()\n low_kpts_dict = kpts.as_dict()\n settings_overide_1 = [\n {\"dict\": \"KPOINTS\",\n \"action\": {\"_set\": low_kpts_dict}}\n ]\n settings_overide_2.append(\n {\"dict\": \"KPOINTS\",\n \"action\": {\"_set\": orig_kpts_dict}}\n )\n\n return [VaspJob(vasp_cmd, final=False, suffix=\".relax1\",\n auto_npar=auto_npar, auto_continue=auto_continue,\n settings_override=settings_overide_1),\n VaspJob(vasp_cmd, final=True, backup=False, suffix=\".relax2\",\n auto_npar=auto_npar, auto_continue=auto_continue,\n settings_override=settings_overide_2)]\n\n @classmethod\n def metagga_opt_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05,\n half_kpts_first_relax=False, auto_continue=False):\n \"\"\"\n Returns a list of thres jobs to perform an optimization for any\n metaGGA functional. There is an initial calculation of the\n GGA wavefunction which is fed into the initial metaGGA optimization\n to precondition the electronic structure optimizer. The metaGGA\n optimization is performed using the double relaxation scheme\n \"\"\"\n\n incar = Incar.from_file(\"INCAR\")\n # Defaults to using the SCAN metaGGA\n metaGGA = incar.get(\"METAGGA\", \"SCAN\")\n\n # Pre optimze WAVECAR and structure using regular GGA\n pre_opt_setings = [{\"dict\": \"INCAR\",\n \"action\": {\"_set\": {\"METAGGA\": None,\n \"LWAVE\": True,\n \"NSW\": 0}}}]\n jobs = [VaspJob(vasp_cmd, auto_npar=auto_npar,\n final=False, suffix=\".precondition\",\n settings_override=pre_opt_setings)]\n\n # Finish with regular double relaxation style run using SCAN\n jobs.extend(VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar,\n ediffg=ediffg,\n half_kpts_first_relax=half_kpts_first_relax))\n\n # Ensure the first relaxation doesn't overwrite the original inputs\n jobs[1].backup = False\n\n # Update double_relaxation job to start from pre-optimized run\n post_opt_settings = [{\"dict\": \"INCAR\",\n \"action\": {\"_set\": {\"METAGGA\": metaGGA, \"ISTART\": 1,\n \"NSW\": incar.get(\"NSW\", 99),\n \"LWAVE\": incar.get(\"LWAVE\", False)}}},\n {\"file\": \"CONTCAR\",\n \"action\": {\"_file_copy\": {\"dest\": \"POSCAR\"}}}]\n if jobs[1].settings_override:\n post_opt_settings = jobs[1].settings_override + post_opt_settings\n jobs[1].settings_override = post_opt_settings\n\n return jobs\n\n @classmethod\n def full_opt_run(cls, vasp_cmd, vol_change_tol=0.02,\n max_steps=10, ediffg=-0.05, half_kpts_first_relax=False,\n **vasp_job_kwargs):\n \"\"\"\n Returns a generator of jobs for a full optimization run. Basically,\n this runs an infinite series of geometry optimization jobs until the\n % vol change in a particular optimization is less than vol_change_tol.\n\n Args:\n vasp_cmd (str): Command to run vasp as a list of args. For example,\n if you are using mpirun, it can be something like\n [\"mpirun\", \"pvasp.5.2.11\"]\n vol_change_tol (float): The tolerance at which to stop a run.\n Defaults to 0.05, i.e., 5%.\n max_steps (int): The maximum number of runs. Defaults to 10 (\n highly unlikely that this limit is ever reached).\n ediffg (float): Force convergence criteria for subsequent runs (\n ignored for the initial run.)\n half_kpts_first_relax (bool): Whether to halve the kpoint grid\n for the first relaxation. Speeds up difficult convergence\n considerably. Defaults to False.\n \\*\\*vasp_job_kwargs: Passthrough kwargs to VaspJob. See\n :class:`custodian.vasp.jobs.VaspJob`.\n\n Returns:\n Generator of jobs.\n \"\"\"\n for i in range(max_steps):\n if i == 0:\n settings = None\n backup = True\n if half_kpts_first_relax and os.path.exists(\"KPOINTS\") and \\\n os.path.exists(\"POSCAR\"):\n kpts = Kpoints.from_file(\"KPOINTS\")\n orig_kpts_dict = kpts.as_dict()\n kpts.kpts = np.maximum(np.array(kpts.kpts) / 2, 1).tolist()\n low_kpts_dict = kpts.as_dict()\n settings = [\n {\"dict\": \"KPOINTS\",\n \"action\": {\"_set\": low_kpts_dict}}\n ]\n else:\n backup = False\n initial = Poscar.from_file(\"POSCAR\").structure\n final = Poscar.from_file(\"CONTCAR\").structure\n vol_change = (final.volume - initial.volume) / initial.volume\n\n logger.info(\"Vol change = %.1f %%!\" % (vol_change * 100))\n if abs(vol_change) < vol_change_tol:\n logger.info(\"Stopping optimization!\")\n break\n else:\n incar_update = {\"ISTART\": 1}\n if ediffg:\n incar_update[\"EDIFFG\"] = ediffg\n settings = [\n {\"dict\": \"INCAR\",\n \"action\": {\"_set\": incar_update}},\n {\"file\": \"CONTCAR\",\n \"action\": {\"_file_copy\": {\"dest\": \"POSCAR\"}}}]\n if i == 1 and half_kpts_first_relax:\n settings.append({\"dict\": \"KPOINTS\",\n \"action\": {\"_set\": orig_kpts_dict}})\n logger.info(\"Generating job = %d!\" % (i+1))\n yield VaspJob(vasp_cmd, final=False, backup=backup,\n suffix=\".relax%d\" % (i+1), settings_override=settings,\n **vasp_job_kwargs)\n\n @classmethod\n def constrained_opt_run(cls, vasp_cmd, lattice_direction, initial_strain,\n atom_relax=True, max_steps=20, algo=\"bfgs\",\n **vasp_job_kwargs):\n \"\"\"\n Returns a generator of jobs for a constrained optimization run. Typical\n use case is when you want to approximate a biaxial strain situation,\n e.g., you apply a defined strain to a and b directions of the lattice,\n but allows the c-direction to relax.\n\n Some guidelines on the use of this method:\n i. It is recommended you do not use the Auto kpoint generation. The\n grid generated via Auto may fluctuate with changes in lattice\n param, resulting in numerical noise.\n ii. Make sure your EDIFF/EDIFFG is properly set in your INCAR. The\n optimization relies on these values to determine convergence.\n\n Args:\n vasp_cmd (str): Command to run vasp as a list of args. For example,\n if you are using mpirun, it can be something like\n [\"mpirun\", \"pvasp.5.2.11\"]\n lattice_direction (str): Which direction to relax. Valid values are\n \"a\", \"b\" or \"c\".\n initial_strain (float): An initial strain to be applied to the\n lattice_direction. This can usually be estimated as the\n negative of the strain applied in the other two directions.\n E.g., if you apply a tensile strain of 0.05 to the a and b\n directions, you can use -0.05 as a reasonable first guess for\n initial strain.\n atom_relax (bool): Whether to relax atomic positions.\n max_steps (int): The maximum number of runs. Defaults to 20 (\n highly unlikely that this limit is ever reached).\n algo (str): Algorithm to use to find minimum. Default is \"bfgs\",\n which is fast, but can be sensitive to numerical noise\n in energy calculations. The alternative is \"bisection\",\n which is more robust but can be a bit slow. The code does fall\n back on the bisection when bfgs gives a non-sensical result,\n e.g., negative lattice params.\n \\*\\*vasp_job_kwargs: Passthrough kwargs to VaspJob. See\n :class:`custodian.vasp.jobs.VaspJob`.\n\n Returns:\n Generator of jobs. At the end of the run, an \"EOS.txt\" is written\n which provides a quick look at the E vs lattice parameter.\n \"\"\"\n nsw = 99 if atom_relax else 0\n\n incar = Incar.from_file(\"INCAR\")\n\n # Set the energy convergence criteria as the EDIFFG (if present) or\n # 10 x EDIFF (which itself defaults to 1e-4 if not present).\n if incar.get(\"EDIFFG\") and incar.get(\"EDIFFG\") > 0:\n etol = incar[\"EDIFFG\"]\n else:\n etol = incar.get(\"EDIFF\", 1e-4) * 10\n\n if lattice_direction == \"a\":\n lattice_index = 0\n elif lattice_direction == \"b\":\n lattice_index = 1\n else:\n lattice_index = 2\n\n energies = {}\n\n for i in range(max_steps):\n if i == 0:\n settings = [\n {\"dict\": \"INCAR\",\n \"action\": {\"_set\": {\"ISIF\": 2, \"NSW\": nsw}}}]\n structure = Poscar.from_file(\"POSCAR\").structure\n x = structure.lattice.abc[lattice_index]\n backup = True\n else:\n backup = False\n v = Vasprun(\"vasprun.xml\")\n structure = v.final_structure\n energy = v.final_energy\n lattice = structure.lattice\n\n x = lattice.abc[lattice_index]\n\n energies[x] = energy\n\n if i == 1:\n x *= (1 + initial_strain)\n else:\n # Sort the lattice parameter by energies.\n min_x = min(energies.keys(), key=lambda e: energies[e])\n sorted_x = sorted(energies.keys())\n ind = sorted_x.index(min_x)\n if ind == 0:\n other = ind + 1\n elif ind == len(sorted_x) - 1:\n other = ind - 1\n else:\n other = ind + 1 \\\n if energies[sorted_x[ind + 1]] \\\n < energies[sorted_x[ind - 1]] \\\n else ind - 1\n if abs(energies[min_x]\n - energies[sorted_x[other]]) < etol:\n logger.info(\"Stopping optimization! Final %s = %f\"\n % (lattice_direction, min_x))\n break\n\n if ind == 0 and len(sorted_x) > 2:\n # Lowest energy lies outside of range of lowest value.\n # we decrease the lattice parameter in the next\n # iteration to find a minimum. This applies only when\n # there are at least 3 values.\n x = sorted_x[0] - abs(sorted_x[1] - sorted_x[0])\n logger.info(\"Lowest energy lies below bounds. \"\n \"Setting %s = %f.\" % (lattice_direction, x))\n elif ind == len(sorted_x) - 1 and len(sorted_x) > 2:\n # Lowest energy lies outside of range of highest value.\n # we increase the lattice parameter in the next\n # iteration to find a minimum. This applies only when\n # there are at least 3 values.\n x = sorted_x[-1] + abs(sorted_x[-1] - sorted_x[-2])\n logger.info(\"Lowest energy lies above bounds. \"\n \"Setting %s = %f.\" % (lattice_direction, x))\n else:\n if algo.lower() == \"bfgs\" and len(sorted_x) >= 4:\n try:\n # If there are more than 4 data points, we will\n # do a quadratic fit to accelerate convergence.\n x1 = list(energies.keys())\n y1 = [energies[j] for j in x1]\n z1 = np.polyfit(x1, y1, 2)\n pp = np.poly1d(z1)\n from scipy.optimize import minimize\n result = minimize(\n pp, min_x,\n bounds=[(sorted_x[0], sorted_x[-1])])\n if (not result.success) or result.x[0] < 0:\n raise ValueError(\n \"Negative lattice constant!\")\n x = result.x[0]\n logger.info(\"BFGS minimized %s = %f.\"\n % (lattice_direction, x))\n except ValueError as ex:\n # Fall back on bisection algo if the bfgs fails.\n logger.info(str(ex))\n x = (min_x + sorted_x[other]) / 2\n logger.info(\"Falling back on bisection %s = %f.\"\n % (lattice_direction, x))\n else:\n x = (min_x + sorted_x[other]) / 2\n logger.info(\"Bisection %s = %f.\"\n % (lattice_direction, x))\n\n lattice = lattice.matrix\n lattice[lattice_index] = lattice[lattice_index] / \\\n np.linalg.norm(lattice[lattice_index]) * x\n\n s = Structure(lattice, structure.species, structure.frac_coords)\n fname = \"POSCAR.%f\" % x\n s.to(filename=fname)\n\n incar_update = {\"ISTART\": 1, \"NSW\": nsw, \"ISIF\": 2}\n\n settings = [\n {\"dict\": \"INCAR\",\n \"action\": {\"_set\": incar_update}},\n {\"file\": fname,\n \"action\": {\"_file_copy\": {\"dest\": \"POSCAR\"}}}]\n\n logger.info(\"Generating job = %d with parameter %f!\" % (i + 1, x))\n yield VaspJob(vasp_cmd, final=False, backup=backup,\n suffix=\".static.%f\" % x,\n settings_override=settings, **vasp_job_kwargs)\n\n with open(\"EOS.txt\", \"wt\") as f:\n f.write(\"# %s energy\\n\" % lattice_direction)\n for k in sorted(energies.keys()):\n f.write(\"%f %f\\n\" % (k, energies[k]))\n\n def terminate(self):\n for k in self.vasp_cmd:\n if \"vasp\" in k:\n try:\n os.system(\"killall %s\" % k)\n except:\n pass\n\n\nclass VaspNEBJob(Job):\n \"\"\"\n A NEB vasp job, especially for CI-NEB running at PBS clusters.\n The class is added for the purpose of handling a different folder\n arrangement in NEB calculation.\n \"\"\"\n\n def __init__(self, vasp_cmd,\n output_file=\"neb_vasp.out\", stderr_file=\"neb_std_err.txt\",\n suffix=\"\", final=True, backup=True, auto_npar=True,\n half_kpts=False, auto_gamma=True, auto_continue=False,\n gamma_vasp_cmd=None, settings_override=None):\n \"\"\"\n This constructor is a simplified version of VaspJob, which satisfies\n the need for flexibility. For standard kinds of runs, it's often\n better to use one of the static constructors. The defaults are\n usually fine too.\n\n Args:\n vasp_cmd (str): Command to run vasp as a list of args. For example,\n if you are using mpirun, it can be something like\n [\"mpirun\", \"pvasp.5.2.11\"]\n output_file (str): Name of file to direct standard out to.\n Defaults to \"vasp.out\".\n stderr_file (str): Name of file to direct standard error to.\n Defaults to \"std_err.txt\".\n suffix (str): A suffix to be appended to the final output. E.g.,\n to rename all VASP output from say vasp.out to\n vasp.out.relax1, provide \".relax1\" as the suffix.\n final (bool): Indicating whether this is the final vasp job in a\n series. Defaults to True.\n backup (bool): Whether to backup the initial input files. If True,\n the INCAR, KPOINTS, POSCAR and POTCAR will be copied with a\n \".orig\" appended. Defaults to True.\n auto_npar (bool): Whether to automatically tune NPAR to be sqrt(\n number of cores) as recommended by VASP for DFT calculations.\n Generally, this results in significant speedups. Defaults to\n True. Set to False for HF, GW and RPA calculations.\n half_kpts (bool): Whether to halve the kpoint grid for NEB.\n Speeds up convergence considerably. Defaults to False.\n auto_gamma (bool): Whether to automatically check if run is a\n Gamma 1x1x1 run, and whether a Gamma optimized version of\n VASP exists with \".gamma\" appended to the name of the VASP\n executable (typical setup in many systems). If so, run the\n gamma optimized version of VASP instead of regular VASP. You\n can also specify the gamma vasp command using the\n gamma_vasp_cmd argument if the command is named differently.\n auto_continue (bool): Whether to automatically continue a run\n if a STOPCAR is present. This is very useful if using the\n wall-time handler which will write a read-only STOPCAR to\n prevent VASP from deleting it once it finishes.\n gamma_vasp_cmd (str): Command for gamma vasp version when\n auto_gamma is True. Should follow the list style of\n subprocess. Defaults to None, which means \".gamma\" is added\n to the last argument of the standard vasp_cmd.\n settings_override ([dict]): An ansible style list of dict to\n override changes. For example, to set ISTART=1 for subsequent\n runs and to copy the CONTCAR to the POSCAR, you will provide::\n\n [{\"dict\": \"INCAR\", \"action\": {\"_set\": {\"ISTART\": 1}}},\n {\"file\": \"CONTCAR\",\n \"action\": {\"_file_copy\": {\"dest\": \"POSCAR\"}}}]\n \"\"\"\n\n self.vasp_cmd = vasp_cmd\n self.output_file = output_file\n self.stderr_file = stderr_file\n self.final = final\n self.backup = backup\n self.suffix = suffix\n self.auto_npar = auto_npar\n self.half_kpts = half_kpts\n self.auto_gamma = auto_gamma\n self.gamma_vasp_cmd = gamma_vasp_cmd\n self.auto_continue = auto_continue\n self.settings_override = settings_override\n self.neb_dirs = [] # 00, 01, etc.\n self.neb_sub = [] # 01, 02, etc.\n\n for path in os.listdir(\".\"):\n if os.path.isdir(path) and path.isdigit():\n self.neb_dirs.append(path)\n self.neb_dirs = sorted(self.neb_dirs)\n self.neb_sub = self.neb_dirs[1: -1]\n\n def setup(self):\n \"\"\"\n Performs initial setup for VaspNEBJob, including overriding any settings\n and backing up.\n \"\"\"\n neb_dirs = self.neb_dirs\n\n if self.backup:\n # Back up KPOINTS, INCAR, POTCAR\n for f in VASP_NEB_INPUT_FILES:\n shutil.copy(f, \"{}.orig\".format(f))\n # Back up POSCARs\n for path in neb_dirs:\n poscar = os.path.join(path, \"POSCAR\")\n shutil.copy(poscar, \"{}.orig\".format(poscar))\n\n if self.half_kpts and os.path.exists(\"KPOINTS\"):\n kpts = Kpoints.from_file(\"KPOINTS\")\n kpts.kpts = np.maximum(np.array(kpts.kpts) / 2, 1)\n kpts.kpts = kpts.kpts.astype(int).tolist()\n if tuple(kpts.kpts[0]) == (1, 1, 1):\n kpt_dic = kpts.as_dict()\n kpt_dic[\"generation_style\"] = 'Gamma'\n kpts = Kpoints.from_dict(kpt_dic)\n kpts.write_file(\"KPOINTS\")\n\n if self.auto_npar:\n try:\n incar = Incar.from_file(\"INCAR\")\n import multiprocessing\n # Try sge environment variable first\n # (since multiprocessing counts cores on the current\n # machine only)\n ncores = os.environ.get('NSLOTS') or multiprocessing.cpu_count()\n ncores = int(ncores)\n for npar in range(int(math.sqrt(ncores)),\n ncores):\n if ncores % npar == 0:\n incar[\"NPAR\"] = npar\n break\n incar.write_file(\"INCAR\")\n except:\n pass\n\n if self.auto_continue and \\\n os.path.exists(\"STOPCAR\") and \\\n not os.access(\"STOPCAR\", os.W_OK):\n # Remove STOPCAR\n os.chmod(\"STOPCAR\", 0o644)\n os.remove(\"STOPCAR\")\n\n # Copy CONTCAR to POSCAR\n for path in self.neb_sub:\n contcar = os.path.join(path, \"CONTCAR\")\n poscar = os.path.join(path, \"POSCAR\")\n shutil.copy(contcar, poscar)\n\n if self.settings_override is not None:\n VaspModder().apply_actions(self.settings_override)\n\n def run(self):\n \"\"\"\n Perform the actual VASP run.\n\n Returns:\n (subprocess.Popen) Used for monitoring.\n \"\"\"\n cmd = list(self.vasp_cmd)\n if self.auto_gamma:\n kpts = Kpoints.from_file(\"KPOINTS\")\n if kpts.style == Kpoints.supported_modes.Gamma \\\n and tuple(kpts.kpts[0]) == (1, 1, 1):\n if self.gamma_vasp_cmd is not None and which(\n self.gamma_vasp_cmd[-1]):\n cmd = self.gamma_vasp_cmd\n elif which(cmd[-1] + \".gamma\"):\n cmd[-1] += \".gamma\"\n logger.info(\"Running {}\".format(\" \".join(cmd)))\n with open(self.output_file, 'w') as f_std, \\\n open(self.stderr_file, \"w\", buffering=1) as f_err:\n\n # Use line buffering for stderr\n p = subprocess.Popen(cmd, stdout=f_std, stderr=f_err)\n return p\n\n def postprocess(self):\n \"\"\"\n Postprocessing includes renaming and gzipping where necessary.\n \"\"\"\n # Add suffix to all sub_dir/{items}\n for path in self.neb_dirs:\n for f in VASP_NEB_OUTPUT_SUB_FILES:\n f = os.path.join(path, f)\n if os.path.exists(f):\n if self.final and self.suffix != \"\":\n shutil.move(f, \"{}{}\".format(f, self.suffix))\n elif self.suffix != \"\":\n shutil.copy(f, \"{}{}\".format(f, self.suffix))\n\n # Add suffix to all output files\n for f in VASP_NEB_OUTPUT_FILES + [self.output_file]:\n if os.path.exists(f):\n if self.final and self.suffix != \"\":\n shutil.move(f, \"{}{}\".format(f, self.suffix))\n elif self.suffix != \"\":\n shutil.copy(f, \"{}{}\".format(f, self.suffix))\n\n\nclass GenerateVaspInputJob(Job):\n\n def __init__(self, input_set, contcar_only=True, **kwargs):\n \"\"\"\n Generates a VASP input based on an existing directory. This is typically\n used to modify the VASP input files before the next VaspJob.\n\n Args:\n input_set (str): Full path to the input set. E.g.,\n \"pymatgen.io.vasp.sets.MPNonSCFSet\".\n contcar_only (bool): If True (default), only CONTCAR structures\n are used as input to the input set.\n \"\"\"\n self.input_set = input_set\n self.contcar_only = contcar_only\n self.kwargs = kwargs\n\n def setup(self):\n pass\n\n def run(self):\n if os.path.exists(\"CONTCAR\"):\n structure = Structure.from_file(\"CONTCAR\")\n elif (not self.contcar_only) and os.path.exists(\"POSCAR\"):\n structure = Structure.from_file(\"POSCAR\")\n else:\n raise RuntimeError(\"No CONTCAR/POSCAR detected to generate input!\")\n modname, classname = self.input_set.rsplit(\".\", 1)\n mod = __import__(modname, globals(), locals(), [classname], 0)\n vis = getattr(mod, classname)(structure, **self.kwargs)\n vis.write_input(\".\")\n\n def postprocess(self):\n pass\n" ]
[ [ "numpy.polyfit", "numpy.poly1d", "numpy.linalg.norm", "scipy.optimize.minimize", "numpy.array" ] ]
seung-lab/seuron
[ "81e462f8ef71f2e28b4c2ad4c835b27b251ae25a" ]
[ "dags/segmentation_dags.py" ]
[ "from airflow import DAG\n\nfrom airflow.operators.python_operator import PythonOperator\n\nfrom airflow.operators.dagrun_operator import TriggerDagRunOperator\nfrom airflow.utils.weight_rule import WeightRule\nfrom airflow.models import Variable\n\nfrom chunkiterator import ChunkIterator\n\nfrom slack_message import slack_message, task_start_alert, task_done_alert, task_retry_alert\nfrom segmentation_op import composite_chunks_batch_op, composite_chunks_overlap_op, composite_chunks_wrap_op, remap_chunks_batch_op\nfrom helper_ops import slack_message_op, scale_up_cluster_op, scale_down_cluster_op, wait_op, mark_done_op, reset_flags_op, reset_cluster_op, placeholder_op\n\nfrom param_default import param_default, default_args, CLUSTER_1_CONN_ID, CLUSTER_2_CONN_ID\nfrom igneous_and_cloudvolume import create_info, downsample_and_mesh, get_files_job, get_atomic_files_job, dataset_resolution\nfrom igneous_ops import create_igneous_ops\nimport numpy as np\nimport json\nimport urllib\nfrom collections import OrderedDict\n\ndef generate_ng_payload(param):\n ng_resolution = dataset_resolution(param[\"SEG_PATH\"])\n seg_resolution = ng_resolution\n layers = OrderedDict()\n if \"IMAGE_PATH\" in param:\n layers[\"img\"] = {\n \"source\": \"precomputed://\"+param[\"IMAGE_PATH\"],\n \"type\": \"image\"\n }\n if \"IMAGE_SHADER\" in param:\n layers[\"img\"][\"shader\"] = param[\"IMAGE_SHADER\"]\n\n ng_resolution = dataset_resolution(param[\"IMAGE_PATH\"])\n\n layers[\"aff\"] = {\n \"source\": \"precomputed://\"+param[\"AFF_PATH\"],\n \"shader\": param.get(\"AFF_SHADER\", \"void main() {\\n float r = toNormalized(getDataValue(0));\\n float g = toNormalized(getDataValue(1));\\n float b = toNormalized(getDataValue(2)); \\n emitRGB(vec3(r,g,b));\\n}\"),\n \"type\": \"image\",\n \"visible\": False\n }\n\n if \"SEM_PATH\" in param:\n layers[\"sem\"] = {\n \"source\": \"precomputed://\"+param[\"SEM_PATH\"],\n \"type\": \"segmentation\",\n \"visible\": False\n }\n\n layers[\"ws\"] = {\n \"source\": \"precomputed://\"+param[\"WS_PATH\"],\n \"type\": \"segmentation\",\n \"visible\": False\n }\n\n layers[\"seg\"] = {\n \"source\": \"precomputed://\"+param[\"SEG_PATH\"],\n \"type\": \"segmentation\"\n }\n\n if \"GT_PATH\" in param:\n layers[\"gt\"] = {\n \"source\": \"precomputed://\"+param[\"GT_PATH\"],\n \"type\": \"segmentation\"\n }\n\n layers[\"size\"] = {\n \"source\": \"precomputed://\"+param[\"SEG_PATH\"]+\"/size_map\",\n \"type\": \"image\"\n }\n\n bbox = param[\"BBOX\"]\n\n scale = [seg_resolution[i]/ng_resolution[i] for i in range(3)]\n center = [(bbox[i]+bbox[i+3])/2*scale[i] for i in range(3)]\n\n navigation = {\n \"pose\": {\n \"position\": {\n \"voxelSize\": ng_resolution,\n \"voxelCoordinates\": center\n }\n },\n \"zoomFactor\": 4\n }\n\n payload = OrderedDict([(\"layers\", layers),(\"navigation\", navigation),(\"showSlices\", False),(\"layout\", \"xy-3d\")])\n return payload\n\ndef generate_link(param, broadcast, **kwargs):\n ng_host = param.get(\"NG_HOST\", \"https://neuromancer-seung-import.appspot.com\")\n payload = generate_ng_payload(param)\n\n if not param.get(\"SKIP_AGG\", False):\n seglist = Variable.get(\"topsegs\")\n payload[\"layers\"][\"seg\"][\"hiddenSegments\"] = seglist.split(' ')\n\n url = \"<{host}/#!{payload}|*view the results in neuroglancer*>\".format(\n host=ng_host,\n payload=urllib.parse.quote(json.dumps(payload)))\n slack_message(url, broadcast=broadcast)\n\n\ndag_manager = DAG(\"segmentation\", default_args=default_args, schedule_interval=None)\n\ndag = dict()\n\ndag[\"ws\"] = DAG(\"watershed\", default_args=default_args, schedule_interval=None)\n\ndag[\"agg\"] = DAG(\"agglomeration\", default_args=default_args, schedule_interval=None)\n\ndag[\"cs\"] = DAG(\"contact_surface\", default_args=default_args, schedule_interval=None)\n\ndag_ws = dag[\"ws\"]\ndag_agg = dag[\"agg\"]\ndag_cs = dag[\"cs\"]\n\nVariable.setdefault(\"param\", param_default, deserialize_json=True)\nparam = Variable.get(\"param\", deserialize_json=True)\nimage = param[\"WORKER_IMAGE\"]\n\nfor p in [\"SCRATCH\", \"WS\", \"SEG\"]:\n path = \"{}_PATH\".format(p)\n if path not in param:\n param[path] = param[\"{}_PREFIX\".format(p)]+param[\"NAME\"]\n\n\ndef confirm_dag_run(context, dag_run_obj):\n skip_flag = context['params']['skip_flag']\n op = context['params']['op']\n if param.get(skip_flag, False):\n slack_message(\":exclamation: Skip {op}\".format(op=op))\n else:\n return dag_run_obj\n\n\ndef process_composite_tasks(c, cm, top_mip, params):\n local_batch_mip = batch_mip\n if top_mip < batch_mip:\n local_batch_mip = top_mip\n\n if c.mip_level() < local_batch_mip:\n return\n\n short_queue = \"atomic\"\n long_queue = \"composite\"\n\n composite_queue = short_queue if c.mip_level() < high_mip else long_queue+\"_\"+str(c.mip_level())\n\n top_tag = str(top_mip)+\"_0_0_0\"\n tag = str(c.mip_level()) + \"_\" + \"_\".join([str(i) for i in c.coordinate()])\n for stage, op in [(\"ws\", \"ws\"), (\"agg\", \"me\"), (\"cs\", \"cs\")]:\n if c.mip_level() > local_batch_mip:\n generate_chunks[stage][c.mip_level()][tag]=composite_chunks_wrap_op(image, dag[stage], cm, composite_queue, tag, stage, op, params)\n slack_ops[stage][c.mip_level()].set_upstream(generate_chunks[stage][c.mip_level()][tag])\n elif c.mip_level() == local_batch_mip:\n generate_chunks[stage][\"batch\"][tag]=composite_chunks_batch_op(image, dag[stage], cm, short_queue, local_batch_mip-1, tag, stage, op, params)\n generate_chunks[stage][c.mip_level()][tag]=composite_chunks_wrap_op(image, dag[stage], cm, composite_queue, tag, stage, op, params)\n generate_chunks[stage][\"batch\"][tag] >> generate_chunks[stage][c.mip_level()][tag]\n slack_ops[stage][c.mip_level()].set_upstream(generate_chunks[stage][c.mip_level()][tag])\n if stage != \"cs\":\n remap_chunks[stage][tag]=remap_chunks_batch_op(image, dag[stage], cm, short_queue, local_batch_mip, tag, stage, op, params)\n slack_ops[stage][\"remap\"].set_upstream(remap_chunks[stage][tag])\n generate_chunks[stage][top_mip][top_tag].set_downstream(remap_chunks[stage][tag])\n init[stage].set_downstream(generate_chunks[stage][\"batch\"][tag])\n if params.get('OVERLAP_MODE', False) and c.mip_level() == overlap_mip and stage == 'agg':\n overlap_chunks[tag] = composite_chunks_overlap_op(image, dag[stage], cm, composite_queue, tag, params)\n for n in c.neighbours():\n n_tag = str(n.mip_level()) + \"_\" + \"_\".join([str(i) for i in n.coordinate()])\n if n_tag in generate_chunks[stage][c.mip_level()]:\n overlap_chunks[tag].set_upstream(generate_chunks[stage][n.mip_level()][n_tag])\n if n_tag != tag:\n overlap_chunks[n_tag].set_upstream(generate_chunks[stage][c.mip_level()][tag])\n #slack_ops[stage][c.mip_level()].set_downstream(overlap_chunks[tag])\n slack_ops[stage]['overlap'].set_upstream(overlap_chunks[tag])\n\n if c.mip_level() < top_mip:\n parent_coord = [i//2 for i in c.coordinate()]\n parent_tag = str(c.mip_level()+1) + \"_\" + \"_\".join([str(i) for i in parent_coord])\n for stage in [\"ws\", \"agg\", \"cs\"]:\n if params.get(\"OVERLAP_MODE\", False) and c.mip_level() == overlap_mip and stage == \"agg\":\n overlap_chunks[tag].set_downstream(generate_chunks[stage][c.mip_level()+1][parent_tag])\n else:\n generate_chunks[stage][c.mip_level()][tag].set_downstream(generate_chunks[stage][c.mip_level()+1][parent_tag])\n\n\ndef generate_batches(param):\n v = ChunkIterator(param[\"BBOX\"], param[\"CHUNK_SIZE\"])\n top_mip = v.top_mip_level()\n batch_mip = 3\n batch_chunks = []\n high_mip_chunks = []\n current_mip = top_mip\n mip_tasks = 0\n if top_mip > batch_mip:\n for c in v:\n if c.mip_level() > batch_mip:\n if current_mip != c.mip_level():\n if current_mip > batch_mip and mip_tasks > 50:\n batch_mip = c.mip_level()\n current_mip = c.mip_level()\n mip_tasks = 1\n else:\n mip_tasks += 1\n high_mip_chunks.append(c)\n elif c.mip_level() < batch_mip:\n break\n elif c.mip_level() == batch_mip:\n batch_chunks.append(ChunkIterator(param[\"BBOX\"], param[\"CHUNK_SIZE\"], start_from = [batch_mip]+c.coordinate()))\n else:\n batch_chunks=[v]\n\n return high_mip_chunks, batch_chunks\n\n\ndef get_atomic_files(param, prefix):\n from joblib import Parallel, delayed\n\n high_mip_chunks, batch_chunks = generate_batches(param)\n contents = Parallel(n_jobs=-2)(delayed(get_atomic_files_job)(sv, param, prefix) for sv in batch_chunks)\n\n content = b''\n for c in contents:\n content+=c\n\n return content\n\ndef compare_segmentation(param, **kwargs):\n from io import BytesIO\n import os\n from collections import defaultdict\n from evaluate_segmentation import read_chunk, evaluate_rand, evaluate_voi, find_large_diff\n from igneous_and_cloudvolume import upload_json\n from airflow import configuration as conf\n prefix = \"agg/evaluation/evaluation\"\n content = get_atomic_files(param, prefix)\n f = BytesIO(content)\n s_i = defaultdict(int)\n t_j = defaultdict(int)\n p_ij = defaultdict(lambda: defaultdict(int))\n payload = generate_ng_payload(param)\n payload['layers']['size']['visible'] = False\n while True:\n if not read_chunk(f, s_i, t_j, p_ij):\n break\n rand_split, rand_merge = evaluate_rand(s_i, t_j, p_ij)\n voi_split, voi_merge = evaluate_voi(s_i, t_j, p_ij)\n seg_pairs = find_large_diff(s_i, t_j, p_ij, payload)\n scores = {\n 'rand_split': rand_split,\n 'rand_merge': rand_merge,\n 'voi_split': voi_split,\n 'voi_merge': voi_merge,\n }\n Variable.set(\"seg_eval\", scores, serialize_json=True)\n output = {\n \"ng_payload\": payload,\n \"seg_pairs\": seg_pairs\n }\n gs_log_path = conf.get('core', 'remote_log_folder')\n bucket_name = gs_log_path[5:].split('/')[0]\n\n upload_json(\"gs://\"+os.path.join(bucket_name,\"diff\"), \"{}.json\".format(param[\"NAME\"]), output)\n\n\ndef evaluate_results(param, **kwargs):\n if \"GT_PATH\" not in param:\n return\n\n diff_server = param.get(\"DIFF_SERVER\", \"https://diff-dot-neuromancer-seung-import.appspot.com\")\n\n scores = Variable.get(\"seg_eval\", deserialize_json=True)\n\n msg = '''*Evaluation against ground truth* `{gt_path}`:\nrand split: *{rand_split}*\nrand merge: *{rand_merge}*\nvoi split : *{voi_split}*\nvoi merge : *{voi_merge}*\nseg diff: {url}\n'''.format(\n gt_path=param[\"GT_PATH\"],\n rand_split=round(abs(scores['rand_split']),3),\n rand_merge=round(abs(scores['rand_merge']),3),\n voi_split=round(abs(scores['voi_split']),3),\n voi_merge=round(abs(scores['voi_merge']),3),\n url=\"{}/{}\".format(diff_server, param[\"NAME\"])\n )\n slack_message(msg, broadcast=True)\n\n\ndef plot_histogram(data, title, xlabel, ylabel, fn):\n import math\n import matplotlib.pyplot as plt\n plt.clf()\n max_bin = math.ceil(math.log10(max(data)))\n plt.hist(data, bins=np.logspace(0, max_bin, max_bin+1))\n plt.xscale('log')\n plt.yscale('log')\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.savefig(fn)\n\n\ndef contact_surfaces(param):\n prefix = \"cs/cs/complete_cs\"\n content = get_files(param, prefix)\n cs_type = [('s1', np.uint64), ('s2', np.uint64), ('sumx', np.uint64), ('sumy', np.uint64),('sumz', np.uint64), ('size', np.uint64), ('sizex', np.uint64), ('sizey', np.uint64),('sizez', np.uint64),\n ('minx', np.int64),('miny', np.int64),('minz', np.int64),('maxx', np.int64),('maxy', np.int64),('maxz', np.int64)]\n\n data = np.frombuffer(content, dtype=cs_type)\n\n title = \"Distribution of the contact sizes\"\n xlabel = \"Number of voxels in the contact surface\"\n ylabel = \"Number of contact surfaces\"\n plot_histogram(data['size'], title, xlabel, ylabel, '/tmp/hist.png')\n\n order = np.argsort(data['size'])[::-1]\n\n msg = '''*Finished extracting contact surfaces in* `{seg_path}`:\nnumber of contact surfaces: *{size_cs}*\naverage contact area: *{mean_cs}*\nmaximal contact area: *{max_cs}*\noutput location: `{url}`\n'''.format(\n seg_path=param[\"SEG_PATH\"],\n size_cs=len(data),\n mean_cs=int(np.mean(data['size'])),\n max_cs=data[order[0]]['size'],\n url=\"{}/{}\".format(param[\"SCRATCH_PATH\"], \"cs/cs\")\n )\n slack_message(msg, attachment='/tmp/hist.png')\n\n xlabel = \"Number of boundary voxels in the x direction\"\n plot_histogram(data['sizex'], title, xlabel, ylabel, '/tmp/histx.png')\n slack_message(\"\", attachment='/tmp/histx.png')\n xlabel = \"Number of boundary voxels in the y direction\"\n plot_histogram(data['sizey'], title, xlabel, ylabel, '/tmp/histy.png')\n slack_message(\"\", attachment='/tmp/histy.png')\n xlabel = \"Number of boundary voxels in the z direction\"\n plot_histogram(data['sizez'], title, xlabel, ylabel, '/tmp/histz.png')\n slack_message(\"\", attachment='/tmp/histz.png')\n\n\ndef get_files(param, prefix):\n from joblib import Parallel, delayed\n\n high_mip_chunks, batch_chunks = generate_batches(param)\n print(\"get {} high mip files\".format(len(high_mip_chunks)))\n content = get_files_job(high_mip_chunks, param, prefix)\n print(\"get lower mip files\")\n contents = Parallel(n_jobs=-2)(delayed(get_files_job)(sv, param, prefix) for sv in batch_chunks)\n\n for c in contents:\n content+=c\n\n return content\n\n\ndef process_infos(param, **kwargs):\n dt_count = np.dtype([('segid', np.uint64), ('count', np.uint64)])\n prefix = \"agg/info/info\"\n content = get_files(param, prefix)\n data = np.frombuffer(content, dtype=dt_count)\n title = \"Distribution of the segment sizes\"\n xlabel = \"Number of supervoxels in the segments\"\n ylabel = \"Number of segments\"\n plot_histogram(data['count'], title, xlabel, ylabel, '/tmp/hist.png')\n order = np.argsort(data['count'])[::-1]\n ntops = min(20,len(data))\n msg = '''*Agglomeration Finished*\n*{nseg}* segments (*{nsv}* supervoxels)\n\nLargest segments:\n{top20list}'''.format(\n nseg=len(data),\n nsv=np.sum(data['count']),\n top20list=\"\\n\".join(\"id: {} ({})\".format(data[order[i]][0], data[order[i]][1]) for i in range(ntops))\n )\n slack_message(msg, attachment='/tmp/hist.png')\n Variable.set(\"topsegs\", \" \".join(str(int(data[order[i]][0])) for i in range(ntops)))\n\n\nif \"BBOX\" in param and \"CHUNK_SIZE\" in param: #and \"AFF_MIP\" in param:\n data_bbox = param[\"BBOX\"]\n\n chunk_size = param[\"CHUNK_SIZE\"]\n\n\n #data_bbox = [126280+256, 64280+256, 20826-200, 148720-256, 148720-256, 20993]\n starting_msg ='''*Start Segmenting {name}*\n Affinity map: `{aff}`\n Affinity mip level: {mip}\n Bounding box: [{bbox}]'''.format(\n name = param[\"NAME\"],\n aff = param[\"AFF_PATH\"],\n bbox = \", \".join(str(x) for x in param[\"BBOX\"]),\n mip = param.get(\"AFF_MIP\",0)\n )\n\n ending_msg = '''*Finish Segmenting {name}*\n Watershed layer: `{ws}`\n Segmentation Layer: `{seg}`'''.format(\n name = param[\"NAME\"],\n ws = param[\"WS_PATH\"],\n seg = param[\"SEG_PATH\"]\n )\n\n no_rescale_msg = \":exclamation: Cannot rescale cluster\"\n rescale_message = \":heavy_check_mark: Rescaled cluster {} to {} instances\"\n\n starting_op = slack_message_op(dag_manager, \"start\", starting_msg)\n ending_op = slack_message_op(dag_manager, \"end\", ending_msg)\n\n reset_flags = reset_flags_op(dag_manager, param)\n\n init = dict()\n\n init[\"ws\"] = PythonOperator(\n task_id = \"Init_Watershed\",\n python_callable=create_info,\n op_args = [\"ws\", param],\n default_args=default_args,\n on_success_callback=task_start_alert,\n on_retry_callback=task_retry_alert,\n weight_rule=WeightRule.ABSOLUTE,\n dag=dag[\"ws\"],\n queue = \"manager\"\n )\n\n init[\"agg\"] = PythonOperator(\n task_id = \"Init_Agglomeration\",\n python_callable=create_info,\n op_args = [\"agg\", param],\n default_args=default_args,\n on_success_callback=task_start_alert,\n on_retry_callback=task_retry_alert,\n weight_rule=WeightRule.ABSOLUTE,\n dag=dag[\"agg\"],\n queue = \"manager\"\n )\n\n init['cs'] = slack_message_op(dag['cs'], \"init_cs\", \":arrow_forward: Start extracting contact surfaces\")\n\n generate_chunks = {\n \"ws\": {},\n \"agg\": {},\n \"cs\": {}\n }\n\n overlap_chunks = {}\n\n remap_chunks = {\n \"ws\": {},\n \"agg\": {}\n }\n\n slack_ops = {\n \"ws\": {},\n \"agg\": {},\n \"cs\": {}\n }\n\n scaling_ops = {\n \"ws\": {},\n \"agg\": {},\n \"cs\": {}\n }\n\n triggers = dict()\n wait = dict()\n mark_done = dict()\n\n triggers[\"ws\"] = TriggerDagRunOperator(\n task_id=\"trigger_ws\",\n trigger_dag_id=\"watershed\",\n python_callable=confirm_dag_run,\n params={'skip_flag': \"SKIP_WS\",\n 'op': \"watershed\"},\n queue=\"manager\",\n dag=dag_manager\n )\n\n wait[\"ws\"] = wait_op(dag_manager, \"ws_done\")\n\n mark_done[\"ws\"] = mark_done_op(dag[\"ws\"], \"ws_done\")\n\n triggers[\"agg\"] = TriggerDagRunOperator(\n task_id=\"trigger_agg\",\n trigger_dag_id=\"agglomeration\",\n python_callable=confirm_dag_run,\n params={'skip_flag': \"SKIP_AGG\",\n 'op': \"agglomeration\"},\n queue=\"manager\",\n dag=dag_manager\n )\n\n wait[\"agg\"] = wait_op(dag_manager, \"agg_done\")\n\n mark_done[\"agg\"] = mark_done_op(dag[\"agg\"], \"agg_done\")\n\n v = ChunkIterator(data_bbox, chunk_size)\n top_mip = v.top_mip_level()\n batch_mip = param.get(\"BATCH_MIP\", 3)\n high_mip = param.get(\"HIGH_MIP\", 5)\n if param.get(\"OVERLAP_MODE\", False):\n overlap_mip = param.get(\"OVERLAP_MIP\", batch_mip)\n local_batch_mip = batch_mip\n aux_queue = \"atomic\" if top_mip < high_mip else \"composite_\"+str(top_mip)\n\n\n check_seg = PythonOperator(\n task_id=\"Check_Segmentation\",\n python_callable=process_infos,\n provide_context=True,\n op_args=[param],\n default_args=default_args,\n on_success_callback=task_done_alert,\n on_retry_callback=task_retry_alert,\n dag=dag_agg,\n queue=aux_queue\n )\n\n aux_agg_tasks = [check_seg]\n\n summary_cs = PythonOperator(\n task_id=\"CS_Summary\",\n python_callable=contact_surfaces,\n op_args=[param],\n on_success_callback=task_done_alert,\n on_retry_callback=task_retry_alert,\n default_args=default_args,\n dag=dag_cs,\n queue=aux_queue\n )\n\n if \"GT_PATH\" in param:\n comp_seg_task = PythonOperator(\n task_id = \"Compare_Segmentation\",\n python_callable=compare_segmentation,\n provide_context=True,\n op_args=[param,],\n default_args=default_args,\n dag=dag_agg,\n on_success_callback=task_done_alert,\n on_retry_callback=task_retry_alert,\n queue=aux_queue\n )\n aux_agg_tasks.append(comp_seg_task)\n\n cm = [\"param\"]\n if \"MOUNT_SECRETES\" in param:\n cm += param[\"MOUNT_SECRETES\"]\n\n if top_mip < batch_mip:\n local_batch_mip = top_mip\n\n if top_mip == batch_mip:\n param[\"OVERLAP_MODE\"] = False\n\n if param.get(\"OVERLAP_MODE\", False):\n slack_ops['agg']['overlap'] = slack_message_op(dag['agg'], \"overlap_\"+str(overlap_mip), \":heavy_check_mark: {} MIP {} finished\".format(\"overlapped agglomeration at\", overlap_mip))\n\n for c in v:\n if c.mip_level() < local_batch_mip:\n break\n else:\n for k in [\"ws\",\"agg\",\"cs\"]:\n generate_chunks[k][\"batch\"] = {}\n if c.mip_level() not in generate_chunks[k]:\n generate_chunks[k][c.mip_level()] = {}\n\n if c.mip_level() not in slack_ops[k]:\n slack_ops[k][c.mip_level()] = slack_message_op(dag[k], k+str(c.mip_level()), \":heavy_check_mark: {}: MIP {} finished\".format(k, c.mip_level()))\n if c.mip_level() == local_batch_mip and k != \"cs\":\n slack_ops[k][\"remap\"] = slack_message_op(dag[k], \"remap_{}\".format(k), \":heavy_check_mark: {}: Remaping finished\".format(k))\n slack_ops[k][\"remap\"] >> mark_done[k]\n process_composite_tasks(c, cm, top_mip, param)\n\n cluster1_size = len(remap_chunks[\"ws\"])\n\n\n if cluster1_size >= 100:\n reset_cluster_after_ws = reset_cluster_op(dag['ws'], \"ws\", CLUSTER_1_CONN_ID, 20)\n slack_ops['ws']['remap'] >> reset_cluster_after_ws\n\n\n scaling_global_start = scale_up_cluster_op(dag_manager, \"global_start\", CLUSTER_1_CONN_ID, 20, cluster1_size, \"manager\")\n\n scaling_global_finish = scale_down_cluster_op(dag_manager, \"global_finish\", CLUSTER_1_CONN_ID, 0, \"manager\")\n\n scaling_cs_start = scale_up_cluster_op(dag_cs, \"cs_start\", CLUSTER_1_CONN_ID, 20, cluster1_size, \"manager\")\n\n scaling_cs_finish = scale_down_cluster_op(dag_cs, \"cs_finish\", CLUSTER_1_CONN_ID, 0, \"manager\")\n\n scaling_cs_start >> init['cs']\n slack_ops['cs'][top_mip] >> summary_cs >> scaling_cs_finish\n slack_ops['agg'][top_mip] >> check_seg >> mark_done['agg']\n if \"GT_PATH\" in param:\n slack_ops['agg']['remap'] >> comp_seg_task >> mark_done['agg']\n\n\n igneous_tasks = create_igneous_ops(param, dag_manager)\n\n scaling_igneous_finish = scale_down_cluster_op(dag_manager, \"igneous_finish\", \"igneous\", 0, \"manager\")\n\n starting_op >> reset_flags >> triggers[\"ws\"] >> wait[\"ws\"] >> triggers[\"agg\"] >> wait[\"agg\"] >> igneous_tasks[0]\n igneous_tasks[-1] >> ending_op\n reset_flags >> scaling_global_start\n igneous_tasks[-1]>> scaling_igneous_finish\n wait[\"agg\"] >> scaling_global_finish\n\n nglink_task = PythonOperator(\n task_id = \"Generate_neuroglancer_link\",\n provide_context=True,\n python_callable=generate_link,\n op_args = [param, True],\n default_args=default_args,\n dag=dag_manager,\n queue = \"manager\"\n )\n igneous_tasks[-1] >> nglink_task >> ending_op\n if \"GT_PATH\" in param:\n evaluation_task = PythonOperator(\n task_id = \"Evaluate_Segmentation\",\n provide_context=True,\n python_callable=evaluate_results,\n op_args = [param,],\n default_args=default_args,\n dag=dag_manager,\n queue = \"manager\"\n )\n igneous_tasks[-1] >> evaluation_task >> ending_op\n\n\n if min(high_mip, top_mip) - batch_mip > 2:\n for stage in [\"ws\", \"agg\", \"cs\"]:\n dsize = len(generate_chunks[stage][batch_mip+2])*2\n scaling_ops[stage][\"extra_down\"] = scale_down_cluster_op(dag[stage], stage, CLUSTER_1_CONN_ID, dsize, \"manager\")\n scaling_ops[stage][\"extra_down\"].set_upstream(slack_ops[stage][batch_mip+1])\n\n if top_mip >= high_mip:\n for stage in [\"ws\", \"agg\", \"cs\"]:\n scaling_ops[stage][\"down\"] = scale_down_cluster_op(dag[stage], stage, CLUSTER_1_CONN_ID, 0, \"manager\")\n scaling_ops[stage][\"down\"].set_upstream(slack_ops[stage][high_mip-1])\n\n\n cluster2_size = max(1, len(generate_chunks[stage][high_mip])//8)\n scaling_ops[stage][\"up_long\"] = scale_up_cluster_op(dag[stage], stage+\"_long\", CLUSTER_2_CONN_ID, 2, cluster2_size, \"manager\")\n\n for k in generate_chunks[stage][high_mip-1]:\n scaling_ops[stage][\"up_long\"].set_upstream(generate_chunks[stage][high_mip-1][k])\n\n scaling_ops[stage][\"down_long\"] = scale_down_cluster_op(dag[stage], stage+\"_long\", CLUSTER_2_CONN_ID, 0, \"manager\")\n if stage == \"cs\":\n scaling_ops[stage][\"down_long\"].set_upstream(summary_cs)\n elif stage == \"agg\":\n aux_agg_tasks >> scaling_ops[stage][\"down_long\"]\n else:\n scaling_ops[stage][\"down_long\"].set_upstream(slack_ops[stage][top_mip])\n\n if min(high_mip, top_mip) - batch_mip > 2 or top_mip >= high_mip:\n for stage in [\"ws\", \"agg\"]:\n scaling_ops[stage][\"up\"] = scale_up_cluster_op(dag[stage], stage, CLUSTER_1_CONN_ID, 20, cluster1_size, \"manager\")\n scaling_ops[stage][\"up\"].set_upstream(slack_ops[stage][top_mip])\n\n" ]
[ [ "matplotlib.pyplot.title", "numpy.logspace", "matplotlib.pyplot.yscale", "matplotlib.pyplot.savefig", "numpy.dtype", "numpy.frombuffer", "matplotlib.pyplot.clf", "numpy.mean", "matplotlib.pyplot.xlabel", "numpy.argsort", "matplotlib.pyplot.xscale", "numpy.sum", "matplotlib.pyplot.ylabel" ] ]
spatialucr/MapLinksPlot
[ "471dbc328858ad0bafd78d5b4b495379f8e69de9" ]
[ "PYTHON_Quantitative_Data_VIZ/Adaptive_Choropleth_Mapper.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n#!/usr/bin/env python\n# coding: utf-8\n\nimport json, math, copy\nfrom geosnap.io import store_ltdb\nfrom geosnap import Community, datasets\nfrom geosnap.io import store_census\nimport pandas as pd\nimport shapely.wkt\nimport shapely.geometry\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom pathlib import Path\nimport urllib.parse\nimport webbrowser\nimport os\nimport pprint\nimport numpy as np\nfrom notebook import notebookapp\nfrom IPython.core.display import display, HTML\nimport geopandas as gpd\n\ndef write_LOG(param):\n #Create a new folder where GEO_CONFIG.js GEO_JSON.js VARIABLES.js will be saved\n oDir = 'ACM_' + param['filename_suffix']\n path = Path(oDir + '/data')\n path.mkdir(parents=True, exist_ok=True)\n \n contents = pprint.pformat(param)\n #print(oDir+\"/data/param.log\")\n #print(contents)\n #write new outfiles: GEO_CONFIG.js GEO_JSON.js VARIABLES.js\n ofile = open(oDir+\"/data/param.log\", \"w\", encoding=\"utf-8\")\n create_at = datetime.now()\n ofile.write('%s %s\\r\\n' % (create_at.strftime('%Y-%m-%d'), create_at.strftime('%H:%M:%S')))\n #ofile.write('\\r\\n\\r\\n')\n ofile.write(' '+contents.replace('\\n', '\\n '))\n ofile.close()\n\ndef write_INDEX_html(param):\n \n #Create a new folder where CONFIG.js GEO_JSON.js VARIABLES.js will be saved\n oDir = 'ACM_' + param['filename_suffix']\n path = Path(oDir + '/data')\n path.mkdir(parents=True, exist_ok=True)\n \n contents = []\n #open Adaptive_Choropleth_Mapper.html (the excutable file for the visualization)\n ifile = open(\"template/Adaptive_Choropleth_Mapper.html\", \"r\", encoding=\"utf-8\")\n contents = ifile.read()\n \n #Replace variables based on the user's selection in each of four files below.\n contents = contents.replace(\"Adaptive Choropleth Mapper\", param['title'])\n contents = contents.replace(\"data/CONFIG.js\", \"data/CONFIG_\"+param['filename_suffix']+\".js\")\n contents = contents.replace(\"data/GEO_JSON.js\", \"data/GEO_JSON_\"+param['filename_suffix']+\".js\")\n contents = contents.replace(\"data/VARIABLES.js\", \"data/VARIABLES_\"+param['filename_suffix']+\".js\")\n \n #write new outfiles: CONFIG.js GEO_JSON.js VARIABLES.js\n ofile = open(oDir+\"/index.html\", \"w\")\n ofile.write(contents)\n ofile.close()\n\n\ndef write_CONFIG_js(param):\n # read CONFIG.js\n ifile = open(\"template/CONFIG.js\", \"r\", encoding=\"utf-8\")\n contents = ifile.read()\n \n # Automatically identify variables for \"NumOfMaps\" and \"InitialLayers\"\n '''when the user selects more than one year among 1970, 1980, 1990, 200 and 2010, \"NumOfMaps\" will be equal to the number of the selected years. However, when the user selects only one year among 5 years, \"NumOfMaps\" will be the number of variables that the user selected. (The maximum number of maps that can be visualized is 15) In this case, when the user selects more than 15 variables, the first 15 maps will be created at the initial view, and the rest of variables will be available in the dropdown box of the top of each map. In brief, there is no limit in terms of variables that the user can visualize, but the user can visualize upto 15 maps at the same time.'''\n NumOfMaps = len(param['years'])\n chart = param['chart'] if 'chart' in param else ''\n if (chart == \"Scatter Plot\"): NumOfMaps = 2\n InitialLayers = []\n if (NumOfMaps > 1):\n for i, year in enumerate(param['years']):\n InitialLayers.append(str(year)+' '+param['labels'][0])\n else:\n NumOfMaps = len(param['labels'])\n if ('NumOfMaps' in param): NumOfMaps = param['NumOfMaps']\n if (NumOfMaps > 15): NumOfMaps = 15\n for i, variable in enumerate(param['labels']):\n InitialLayers.append(str(param['years'][0])+' '+variable)\n \n # Automatically set Map_width, Map_height. \n Map_width = \"360px\"\n Map_height = \"360px\"\n if (NumOfMaps <= 4):\n Map_width = \"400px\"\n Map_height = \"400px\"\n if (NumOfMaps <= 3):\n Map_width = \"450px\"\n Map_height = \"450px\"\n if (NumOfMaps <= 2):\n Map_width = \"500px\"\n Map_height = \"500px\"\n if (NumOfMaps <= 1):\n Map_width = \"700px\"\n Map_height = \"700px\"\n \n # replace newly computed \"NumOfMaps\", \"InitialLayers\", \"Map_width\", \"Map_height\" in CONFIG.js. See the example replacement below\n '''\n NumOfMaps : 4 -> 'var NumOfMaps = 4;'\n InitialLayers : [ … ] -> 'var InitialLayers = [\"1980 p_nonhisp_white_persons\", \"1980 p_nonhisp_black_persons\", \"1980 p_hispanic_persons\", … ];'\n Map_width : \"400px\" -> 'var Map_width = \"400px\";'\n Map_height : \"400px\" -> 'var Map_height = \"400px\";'\n '''\n NumOfMaps = \"var NumOfMaps = \" + str(NumOfMaps) + \";\"\n InitialLayers = \"var InitialLayers = \" + json.dumps(InitialLayers) + \";\"\n Map_width = 'var Map_width = \"' + Map_width + '\";'\n Map_height = 'var Map_height = \"' + Map_height + '\";'\n \n contents = contents.replace(\"var NumOfMaps = 1;\", NumOfMaps)\n contents = contents.replace(\"var InitialLayers = [];\", InitialLayers)\n contents = contents.replace('var Map_width = \"400px\";', Map_width)\n contents = contents.replace('var Map_height = \"400px\";', Map_height)\n \n chart = param['chart'] if 'chart' in param else ''\n #print('chart: ' + chart )\n #print(chart == \"Stacked Chart\")\n \n Stacked_Chart = \"var Stacked_Chart = false;\"\n Correlogram = \"var Correlogram = false;\"\n Scatter_Plot = \"var Scatter_Plot = false;\"\n Parallel_Coordinates_Plot = \"var Parallel_Coordinates_Plot = false;\"\n \n if (chart == \"Stacked Chart\"): Stacked_Chart = \"var Stacked_Chart = true;\"\n elif (chart == \"Correlogram\"): Correlogram = \"var Correlogram = true;\"\n elif (chart == \"Scatter Plot\"): Scatter_Plot = \"var Scatter_Plot = true;\"\n elif (chart == \"Parallel Coordinates Plot\"): Parallel_Coordinates_Plot = \"var Parallel_Coordinates_Plot = true;\"\n else: Stacked_Chart = \"var Stacked_Chart = false;\"\n \n contents = contents.replace(\"var Stacked_Chart = false;\", Stacked_Chart)\n contents = contents.replace(\"var Correlogram = false;\", Correlogram)\n contents = contents.replace(\"var Scatter_Plot = false;\", Scatter_Plot)\n contents = contents.replace(\"var Parallel_Coordinates_Plot = false;\", Parallel_Coordinates_Plot)\n\n #Write output including the replacement above\n filename_CONFIG = \"ACM_\" + param['filename_suffix'] + \"/data/CONFIG_\"+param['filename_suffix']+\".js\"\n ofile = open(filename_CONFIG, 'w')\n ofile.write(contents)\n ofile.close()\n\n\ndef write_GEO_JSON_js(community, param):\n # query geometry for each tract\n geoid = community.gdf.columns[0]\n tracts = community.gdf[[geoid, 'geometry']].copy()\n tracts.drop_duplicates(subset=geoid, inplace=True) # get unique geoid\n #print(tracts)\n \n # open GEO_JSON.js write heading for geojson format\n filename_GEO_JSON = \"ACM_\" + param['filename_suffix'] + \"/data/GEO_JSON_\"+param['filename_suffix']+\".js\"\n ofile = open(filename_GEO_JSON, 'w')\n ofile.write('var GEO_JSON =\\n')\n ofile.write('{\"type\":\"FeatureCollection\", \"features\": [\\n')\n \n #Convert geometry in GEOJSONP to geojson format\n for tract in tracts.itertuples():\n feature = {\"type\":\"Feature\"}\n if (tract.geometry is None): # check is NaN?\n #print(tract.geometry)\n continue\n feature[\"geometry\"] = shapely.geometry.mapping(tract.geometry)\n #feature[\"properties\"] = {geoid: tract.__getattribute__(geoid), \"tractID\": tract.__getattribute__(geoid)}\n feature[\"properties\"] = {geoid: tract.__getattribute__(geoid)}\n ofile.write(json.dumps(feature)+',\\n')\n # complete the geojosn format by adding parenthesis at the end. \n ofile.write(']}\\n')\n ofile.close()\n\n\ndef write_VARIABLES_js(community, param):\n #print(param) \n geoid = community.gdf.columns[0]\n years = param['years']\n variables = param['variables']\n \n ## filtering by years\n #community.gdf = community.gdf[community.gdf.year.isin(years)]\n #print(community.gdf)\n #selectedCommunity = community.gdf[variables]\n #print(community.gdf)\n #return\n \n #make heading: community.gdf.columns[0] has \"geoid\" (string)\n heading = [geoid]\n for i, year in enumerate(years):\n for j, variable in enumerate(param['labels']):\n heading.append(str(year)+' '+variable)\n \n #Make Dictionary\n mydictionary = {} # key: geoid, value: variables by heading\n h = -1\n selectedColumns = [geoid]\n selectedColumns.extend(variables)\n #print(\"selectedColumns:\", type(selectedColumns), selectedColumns)\n for i, year in enumerate(years):\n aYearDF = community.gdf[community.gdf.year==year][selectedColumns]\n #print(year, type(aYearDF), aYearDF)\n for j, variable in enumerate(variables):\n h += 1\n for index, row in aYearDF.iterrows():\n #print(index, row)\n key = row[geoid]\n val = row[variable]\n if (math.isnan(val)): #converts Nan in GEOSNAP data to -9999\n #print(i, j, key, year, val)\n val = -9999\n if (key in mydictionary):\n value = mydictionary[key]\n value[h] = val\n else:\n value = [-9999] * (len(heading) - 1) \n value[h] = val\n mydictionary[key] = value\n \n #Select keys in the Dictionary and sort\n keys = list(mydictionary.keys())\n keys.sort()\n # use Keys and Dictionary created above and write them VARIABLES.js\n filename_VARIABLES = \"ACM_\" + param['filename_suffix'] + \"/data/VARIABLES_\"+param['filename_suffix']+\".js\"\n ofile = open(filename_VARIABLES, 'w')\n ofile.write('var GEO_VARIABLES =\\n')\n ofile.write('[\\n')\n ofile.write(' '+json.dumps(heading)+',\\n')\n for i, key in enumerate(keys):\n values = mydictionary[key]\n values.insert(0, key)\n #print(key, values)\n ofile.write(' '+json.dumps(values)+',\\n')\n ofile.write(']\\n')\n ofile.close()\n\n\ndef Adaptive_Choropleth_Mapper_viz(param):\n \n # convert year, variable to years, variables in the param\n if ('years' not in param and 'year' in param): param['years'] = [param['year']]\n if ('years' not in param and 'year' not in param and 'periods' in param): param['years'] = param['periods']\n if ('years' not in param and 'year' not in param and 'periods' not in param and 'period' in param): param['years'] = [param['period']]\n if ('variables' not in param and 'variable' in param): param['variables'] = [param['variable']]\n #print(param['years'])\n \n # select community by state_fips, msa_fips, county_fips\n community = None\n if ('msa_fips' in param and param['msa_fips']):\n community = Community.from_ltdb(years=param['years'], msa_fips=param['msa_fips'])\n #community = Community.from_ltdb(msa_fips=param['msa_fips'])\n elif ('county_fips' in param and param['county_fips']):\n community = Community.from_ltdb(years=param['years'], county_fips=param['county_fips'])\n elif ('state_fips' in param and param['state_fips']):\n community = Community.from_ltdb(years=param['years'], state_fips=param['state_fips'])\n #print(community.gdf)\n\n# if the user enters CSV and shapefile, use the files from the user\n\n#### This is executed when the user enter attributes in csv file and geometroy in shapefile ###################### \n if (community is None and 'inputCSV' in param):\n community = Community()\n #community.gdf = pd.read_csv(param['inputCSV'], dtype={'geoid':str})\n community.gdf = param[\"inputCSV\"]\n #print(community.gdf)\n geoid = community.gdf.columns[0]\n #community.gdf = community.gdf.astype(str)\n #print(\"inputCSV: \" + community.gdf.geoid) \n community.gdf[community.gdf.columns[0]] = community.gdf[geoid].astype(str)\n #print(\"community.gdf.columns[0]:\", community.gdf.columns[0])\n \n # read shape file to df_shape\n #df_shape = gpd.read_file(param['shapefile'])\n df_shape = param['shapefile']\n df_shape = df_shape.astype(str) \n #print(\"shapefile: \" + df_shape.GEOID10)\n geokey = df_shape.columns[0]\n #print(geokey) \n df_shape = df_shape.set_index(geokey)\n \n # insert geometry to community.gdf\n geometry = []\n for index, row in community.gdf.iterrows():\n tractid = row[geoid]\n try:\n tract = df_shape.loc[tractid]\n geometry.append(shapely.wkt.loads(tract.geometry))\n except KeyError:\n #print(\"Tract ID [{}] is not found in the shape file {}\".format(tractid, param['shapefile']))\n geometry.append(None)\n # print( \"geometry\" in community.gdf ) \n #f hasattr(community.gdf, \"geoemtry\"):\n #if (community.gdf[\"geoemtry\"] is None):\n # pass \n #else:\n if((\"geometry\" in community.gdf) == False):\n community.gdf.insert(len(community.gdf.columns), \"geometry\", geometry)\n community.gdf.rename(columns={'period':'year'}, inplace=True)\n #print(community.gdf)\n################################################################################################################ \n \n community.gdf = community.gdf.replace([np.inf, -np.inf], np.nan)\n # check if geometry is not null for Spatial Clustering \n community.gdf = community.gdf[pd.notnull(community.gdf['geometry'])]\n #print(community.gdf)\n\n codebook = pd.read_csv('template/conversion_table_codebook.csv')\n codebook.set_index(keys='variable', inplace=True)\n labels = copy.deepcopy(param['variables'])\n label = 'short_name' # default\n if (param['label'] == 'variable'): label = 'variable'\n if (param['label'] == 'full_name'): label = 'full_name'\n if (param['label'] == 'short_name'): label = 'short_name'\n if (label != 'variable'):\n for idx, variable in enumerate(param['variables']):\n try:\n codeRec = codebook.loc[variable]\n labels[idx] = codeRec[label]\n except:\n print(\"variable not found in codebook. variable:\", variable)\n param['labels'] = labels\n \n write_INDEX_html(param)\n write_CONFIG_js(param)\n write_VARIABLES_js(community, param)\n write_GEO_JSON_js(community, param)\n \n '''\n #Create directory for local machine\n local_dir = os.path.dirname(os.path.realpath(__file__))\n fname =urllib.parse.quote('index.html')\n template_dir = os.path.join(local_dir, 'ACM_' + param['filename_suffix'])\n url = 'file:' + os.path.join(template_dir, fname)\n webbrowser.open(url)\n \n print('Please run ' + '\"ACM_' + param['filename_suffix']+'/index.html\"'+' to your web browser.')\n print('Advanced options are available in ' + '\"ACM_' + param['filename_suffix']+'/data/CONFIG.js\"')\n '''\n\n #Create directory for Visualization \n servers = list(notebookapp.list_running_servers())\n servers1 = 'https://cybergisx.cigi.illinois.edu'+servers[0][\"base_url\"]+ 'view'\n servers2 = 'https://cybergisx.cigi.illinois.edu'+servers[0][\"base_url\"]+ 'edit' \n cwd = os.getcwd()\n prefix_cwd = \"/home/jovyan/work\"\n cwd = cwd.replace(prefix_cwd, \"\")\n \n # This is for Jupyter notebbok installed in your PC\n local_dir1 = cwd\n local_dir2 = cwd \n \n #This is for CyberGISX. Uncomment two command lines below when you run in CyberGIX Environment\n #local_dir1 = servers1 + cwd\n #local_dir2 = servers2 + cwd \n \n \n #print(local_dir)\n fname =urllib.parse.quote('index.html')\n template_dir = os.path.join(local_dir1, 'ACM_' + param['filename_suffix'])\n #url = 'file:' + os.path.join(template_dir, fname)\n url = os.path.join(template_dir, fname) \n webbrowser.open(url)\n print('To see your visualization, click the URL below (or locate the files):')\n print(url) \n print('Advanced options are available in ') \n print(local_dir2 + '/'+ 'ACM_' + param['filename_suffix']+'/data/CONFIG_' + param['filename_suffix']+'.js') \n \n \nif __name__ == '__main__':\n started_datetime = datetime.now()\n dateYYMMDD = started_datetime.strftime('%Y%m%d')\n timeHHMMSS = started_datetime.strftime('%H%M%S')\n print('This program started at %s %s' % (started_datetime.strftime('%Y-%m-%d'), started_datetime.strftime('%H:%M:%S')))\n \n #sample = \"downloads/LTDB_Std_All_Sample.zip\"\n #full = \"downloads/LTDB_Std_All_fullcount.zip\"\n #store_ltdb(sample=sample, fullcount=full)\n #store_census()\n #geosnap.io.store_census()\n\n input_attributes = pd.read_csv(\"attributes/Chicago_1980_1990_2000_2010.csv\", dtype={'geoid':str})\n input_attributes = input_attributes.rename(columns={'geoid': 'tractID'})\n shapefile = gpd.read_file(\"shp/Cook_County_Tract.shp\")\n shapefile = shapefile.rename(columns={'GEOID10': 'tractID'})\n\n param = {\n 'title': \"Adaptive Choropleth Mapper with Scatter Plot\",\n 'filename_suffix': \"Chicago_ACM_Scatter\",\n 'inputCSV': input_attributes, \n 'shapefile': shapefile, \n 'year': 2000,\n 'label': \"short_name\", #Pick variable,short_name,full_name from template/conversion_table_codebook.csv \n 'variables': [ #enter variable names of the column you selected above.\n \"p_nonhisp_white_persons\",\n \"p_nonhisp_black_persons\",\n \"p_hispanic_persons\",\n \"p_asian_persons\",\n \"p_foreign_born_pop\",\n \"p_edu_college_greater\",\n \"p_unemployment_rate\",\n \"p_employed_manufacturing\",\n \"p_poverty_rate\",\n \"p_vacant_housing_units\",\n \"p_owner_occupied_units\",\n \"p_housing_units_multiunit_structures\",\n \"median_home_value\",\n \"p_structures_30_old\",\n \"p_household_recent_move\",\n \"p_persons_under_18\",\n \"p_persons_over_60\", \n ],\n 'chart': \"Scatter Plot\", \n }\n \n input_attributes = pd.read_csv(\"attributes/Chicago_1980_1990_2000_2010.csv\", dtype={'geoid':str})\n input_attributes = input_attributes.rename(columns={'geoid': 'tractID'})\n shapefile = gpd.read_file(\"shp/Cook_County_Tract.shp\")\n shapefile = shapefile.rename(columns={'GEOID10': 'tractID'})\n\n param1 = {\n 'title': \"Adaptive Choropleth Mapper with Correlogram\",\n 'filename_suffix': \"Chicago_ACM_Correlogram\",\n 'inputCSV': input_attributes, \n 'shapefile': shapefile,\n 'period': 2010,\n 'NumOfMaps': 4, \n 'label': \"short_name\", #Pick variable,short_name,full_name from template/conversion_table_codebook.csv \n 'variables': [ #enter variable names of the column you selected above.\n \"p_nonhisp_white_persons\",\n \"p_nonhisp_black_persons\",\n \"p_hispanic_persons\",\n \"p_asian_persons\", \n \"p_other_language\",\n \"p_female_headed_families\",\n \"median_income_blackhh\",\n \"median_income_hispanichh\",\n \"median_income_asianhh\",\n \"per_capita_income\", \n ],\n 'chart': \"Correlogram\", \n }\n \n input_attributes = pd.read_csv(\"attributes/Copy of San_Diego_ACS_2010.csv\", dtype={'geoid':str})\n shapefile = gpd.read_file(\"shp/San_Diego2010.shp\")\n \n param2 = {\n 'title': \"Adaptive Choropleth Mapper with Correlogram\",\n 'filename_suffix': \"SD_correlogram\",\n 'state_fips': None,\n 'msa_fips': \"41740\", #For more options: http://osnap.cloud/~suhan/LNE/pick_POI.html\n 'county_fips': None,\n 'year': 2000,\n 'NumOfMaps': 6,\n 'variables': [\n \"p_other_language\",\n \"p_female_headed_families\",\n \"median_income_blackhh\",\n \"median_income_hispanichh\",\n \"median_income_asianhh\",\n \"per_capita_income\", \n ],\n 'chart': \"Correlogram\",\n 'label': \"short_name\", # variable, short_name or full_name\n }\n input_attributes = pd.read_csv(\"attributes/Copy of San_Diego_ACS_2010.csv\", dtype={'geoid':str})\n shapefile = gpd.read_file(\"shp/San_Diego2010.shp\")\n\n param3 = {\n 'title': \"Adaptive Choropleth Mapper with Correlogram\",\n 'filename_suffix': \"SD_correlogram_from_csv\",\n 'inputCSV': input_attributes, \n 'shapefile': shapefile,\n 'year': 2000,\n 'NumOfMaps': 6,\n 'variables': [\n \"p_other_language\",\n \"p_female_headed_families\",\n \"median_income_blackhh\",\n \"median_income_hispanichh\",\n \"median_income_asianhh\",\n \"per_capita_income\", \n ],\n 'label': \"short_name\", # variable, short_name or full_name\n #'chart': \"Stacked Chart\", \n #'chart': \"Correlogram\",\n #'chart': \"Scatter Plot\", \n #'chart': \"Parallel Coordinates Plot\", \n }\n \n Adaptive_Choropleth_Mapper_viz(param)\n \n ended_datetime = datetime.now()\n elapsed = ended_datetime - started_datetime\n total_seconds = int(elapsed.total_seconds())\n hours, remainder = divmod(total_seconds,60*60)\n minutes, seconds = divmod(remainder,60) \n print('This program ended at %s %s Elapsed %02d:%02d:%02d' % (ended_datetime.strftime('%Y-%m-%d'), ended_datetime.strftime('%H:%M:%S'), hours, minutes, seconds))\n " ]
[ [ "pandas.notnull", "pandas.read_csv" ] ]
kentaro/CodeXGLUE
[ "d956beaca2902f305ce229fb97737146f9bc1df5" ]
[ "Code-Code/Defect-detection/code/model.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\nimport torch\nimport torch.nn as nn\nimport torch\nfrom torch.autograd import Variable\nimport copy\nfrom transformers.modeling_bert import BertLayerNorm\nimport torch.nn.functional as F\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\n \n \nclass Model(nn.Module): \n def __init__(self, encoder,config,tokenizer,args):\n super(Model, self).__init__()\n self.encoder = encoder\n self.config=config\n self.tokenizer=tokenizer\n self.args=args\n \n \n def forward(self, input_ids=None,labels=None): \n outputs=self.encoder(input_ids,attention_mask=input_ids.ne(1))[0]\n logits=outputs\n prob=F.sigmoid(logits)\n if labels is not None:\n labels=labels.float()\n loss=torch.log(prob[:,0]+1e-10)*labels+torch.log((1-prob)[:,0]+1e-10)*(1-labels)\n loss=-loss.mean()\n return loss,prob\n else:\n return prob\n \n \n \n" ]
[ [ "torch.nn.functional.sigmoid", "torch.log" ] ]
josephmaa/pyuoi
[ "f546900791c3c2176e5ac2ac6c553bae065a3127", "f546900791c3c2176e5ac2ac6c553bae065a3127" ]
[ "pyuoi/linear_model/base.py", "classifier/rat7m_classifier.py" ]
[ "import abc as _abc\nimport numpy as np\nimport logging\nfrom sklearn.linear_model._base import SparseCoefMixin\nfrom sklearn.metrics import r2_score, accuracy_score, log_loss\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import check_X_y\nfrom sklearn.preprocessing import StandardScaler\n\nfrom scipy.sparse import issparse, csr_matrix\n\nfrom pyuoi import utils\nfrom pyuoi.mpi_utils import (Gatherv_rows, Bcast_from_root)\nfrom pyuoi.utils import write_timestamped_numpy_binary\n\nfrom .utils import stability_selection_to_threshold, intersection\nfrom ..utils import check_logger\n\n\nclass AbstractUoILinearModel(SparseCoefMixin, metaclass=_abc.ABCMeta):\n r\"\"\"An abstract base class for UoI ``linear_model`` classes.\n\n Parameters\n ----------\n n_boots_sel : int\n The number of data bootstraps to use in the selection module.\n Increasing this number will make selection more strict.\n n_boots_est : int\n The number of data bootstraps to use in the estimation module.\n Increasing this number will relax selection and decrease variance.\n selection_frac : float\n The fraction of the dataset to use for training in each resampled\n bootstrap, during the selection module. Small values of this parameter\n imply larger \"perturbations\" to the dataset.\n estimation_frac : float\n The fraction of the dataset to use for training in each resampled\n bootstrap, during the estimation module. The remaining data is used\n to obtain validation scores. Small values of this parameters imply\n larger \"perturbations\" to the dataset.\n stability_selection : int, float, or array-like\n If int, treated as the number of bootstraps that a feature must appear\n in to guarantee placement in selection profile. If float, must be\n between 0 and 1, and is instead the proportion of bootstraps. If\n array-like, must consist of either ints or floats between 0 and 1.\n In this case, each entry in the array-like object will act as a\n separate threshold for placement in the selection profile.\n fit_intercept : bool\n Whether to calculate the intercept for this model. If set to False,\n no intercept will be used in calculations (e.g. data is expected to be\n already centered).\n standardize : bool\n If True, the regressors X will be standardized before regression by\n subtracting the mean and dividing by their standard deviations.\n shared_support : bool\n For models with more than one output (multinomial logistic regression)\n this determines whether all outputs share the same support or can\n have independent supports.\n max_iter : int\n Maximum number of iterations for iterative fitting methods.\n random_state : int, RandomState instance, or None\n The seed of the pseudo random number generator that selects a random\n feature to update. If int, random_state is the seed used by the random\n number generator; If RandomState instance, random_state is the random\n number generator; If None, the random number generator is the\n RandomState instance used by ``np.random``.\n comm : MPI communicator\n If passed, the selection and estimation steps are parallelized.\n logger : Logger\n The logger to use for messages when ``verbose=True`` in ``fit``.\n If *None* is passed, a logger that writes to ``sys.stdout`` will be\n used.\n\n Attributes\n ----------\n coef_ : array, shape (n_features,) or (n_targets, n_features)\n Estimated coefficients for the linear regression problem.\n intercept_ : float\n Independent term in the linear model.\n supports_ : array, shape\n Boolean array indicating whether a given regressor (column) is selected\n for estimation for a given regularization parameter value (row).\n \"\"\"\n\n def __init__(self, n_boots_sel=24, n_boots_est=24, selection_frac=0.9,\n estimation_frac=0.9, stability_selection=1.,\n fit_intercept=True, standardize=True,\n shared_support=True, max_iter=None, random_state=None,\n comm=None, logger=None):\n # data split fractions\n self.selection_frac = selection_frac\n self.estimation_frac = estimation_frac\n # number of bootstraps\n self.n_boots_sel = n_boots_sel\n self.n_boots_est = n_boots_est\n # other hyperparameters\n self.stability_selection = stability_selection\n self.fit_intercept = fit_intercept\n self.standardize = standardize\n self.shared_support = shared_support\n self.max_iter = max_iter\n self.comm = comm\n # preprocessing\n if isinstance(random_state, int):\n # make sure ranks use different seed\n if self.comm is not None:\n random_state += self.comm.rank\n self.random_state = np.random.RandomState(random_state)\n else:\n if random_state is None:\n self.random_state = np.random\n else:\n self.random_state = random_state\n\n # extract selection thresholds from user provided stability selection\n self.selection_thresholds_ = stability_selection_to_threshold(\n self.stability_selection, self.n_boots_sel)\n\n self.n_supports_ = None\n\n self._logger = check_logger(logger, 'uoi_linear_model', self.comm)\n\n @_abc.abstractproperty\n def estimation_score(self):\n pass\n\n @_abc.abstractmethod\n def get_reg_params(self):\n pass\n\n @_abc.abstractstaticmethod\n def _score_predictions(self, metric, fitter, X, y, supports, boot_idxs):\n pass\n\n @_abc.abstractmethod\n def intersect(self, coef, thresholds):\n \"\"\"Intersect coefficients across all thresholds.\"\"\"\n pass\n\n def _pre_fit(self, X, y):\n \"\"\"Perform class-specific setup for fit().\"\"\"\n if self.standardize:\n if self.fit_intercept and issparse(X):\n msg = (\"Cannot center sparse matrices: \"\n \"pass `fit_intercept=False`\")\n raise ValueError(msg)\n self._X_scaler = StandardScaler(with_mean=self.fit_intercept)\n X = self._X_scaler.fit_transform(X)\n if y.ndim == 2:\n self.output_dim = y.shape[1]\n else:\n self.output_dim = 1\n return X, y\n\n def _post_fit(self, X, y):\n \"\"\"Perform class-specific cleanup for fit().\"\"\"\n if self.standardize:\n sX = self._X_scaler\n self.coef_ /= sX.scale_[np.newaxis]\n\n @_abc.abstractmethod\n def _fit_intercept(self, X, y):\n \"\"\"Fit a model with an intercept and fixed coefficients.\n\n This is used to re-fit the intercept after the coefficients are\n estimated.\n \"\"\"\n pass\n\n @_abc.abstractmethod\n def _fit_intercept_no_features(self, y):\n \"\"\"Fit a model with only an intercept.\n\n This is used in cases where the model has no support selected.\n \"\"\"\n pass\n\n def fit(self, X, y, stratify=None, verbose=False):\n \"\"\"Fit data according to the UoI algorithm.\n\n Parameters\n ----------\n X : ndarray or scipy.sparse matrix, (n_samples, n_features)\n The design matrix.\n y : ndarray, shape (n_samples,)\n Response vector. Will be cast to X's dtype if necessary.\n Currently, this implementation does not handle multiple response\n variables.\n stratify : array-like or None\n Ensures groups of samples are alloted to training/test sets\n proportionally. Labels for each group must be an int greater\n than zero. Must be of size equal to the number of samples, with\n further restrictions on the number of groups.\n verbose : bool\n A switch indicating whether the fitting should print out messages\n displaying progress.\n \"\"\"\n if verbose:\n self._logger.setLevel(logging.DEBUG)\n else:\n self._logger.setLevel(logging.WARNING)\n\n X, y = self._pre_fit(X, y)\n\n X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],\n y_numeric=True, multi_output=True)\n\n # extract model dimensions\n n_features = X.shape[1]\n n_coef = self.get_n_coef(X, y)\n\n # check if the response variable is constant\n if np.unique(y).size == 1:\n self.coef_ = np.zeros((self.output_dim, n_features))\n self._fit_intercept(X, y)\n self._post_fit(X, y)\n return self\n\n ####################\n # Selection Module #\n ####################\n # choose the regularization parameters for selection sweep\n self.reg_params_ = self.get_reg_params(X, y)\n self.n_reg_params_ = len(self.reg_params_)\n\n rank = 0\n size = 1\n if self.comm is not None:\n rank = self.comm.rank\n size = self.comm.size\n\n # initialize selection\n if size > self.n_boots_sel:\n tasks = np.array_split(np.arange(self.n_boots_sel *\n self.n_reg_params_), size)[rank]\n selection_coefs = np.empty((tasks.size, n_coef))\n my_boots = dict((task_idx // self.n_reg_params_, None)\n for task_idx in tasks)\n else:\n # split up bootstraps into processes\n tasks = np.array_split(np.arange(self.n_boots_sel),\n size)[rank]\n selection_coefs = np.empty((tasks.size, self.n_reg_params_,\n n_coef))\n my_boots = dict((task_idx, None) for task_idx in tasks)\n\n for boot in range(self.n_boots_sel):\n if size > 1:\n if rank == 0:\n rvals = train_test_split(np.arange(X.shape[0]),\n test_size=1 - self.selection_frac,\n stratify=stratify,\n random_state=self.random_state)\n else:\n rvals = [None] * 2\n rvals = [Bcast_from_root(rval, self.comm, root=0)\n for rval in rvals]\n if boot in my_boots.keys():\n my_boots[boot] = rvals\n else:\n my_boots[boot] = train_test_split(\n np.arange(X.shape[0]),\n test_size=1 - self.selection_frac,\n stratify=stratify,\n random_state=self.random_state)\n # (Joseph) Write out linear coefficients later to binary .npy file.\n x_linear_coefficients = []\n y_linear_coefficients = []\n\n # iterate over bootstraps\n curr_boot_idx = None\n for ii, task_idx in enumerate(tasks):\n if size > self.n_boots_sel:\n boot_idx = task_idx // self.n_reg_params_\n reg_idx = task_idx % self.n_reg_params_\n my_reg_params = [self.reg_params_[reg_idx]]\n else:\n boot_idx = task_idx\n my_reg_params = self.reg_params_\n # Never warm start across bootstraps\n if (curr_boot_idx != boot_idx):\n if hasattr(self._selection_lm, 'coef_'):\n self._selection_lm.coef_ *= 0.\n if hasattr(self._selection_lm, 'intercept_'):\n self._selection_lm.intercept_ *= 0.\n curr_boot_idx = boot_idx\n\n # draw a resampled bootstrap\n idxs_train, idxs_test = my_boots[boot_idx]\n X_rep = X[idxs_train]\n y_rep = y[idxs_train]\n\n # (Joseph) Write out the coefficients for regularization.\n x_linear_coefficients.append(X_rep)\n y_linear_coefficients.append(y_rep)\n\n # fit the coefficients\n if size > self.n_boots_sel:\n msg = (\"selection bootstrap %d, \"\n \"regularization parameter set %d\"\n % (boot_idx, reg_idx))\n self._logger.info(msg)\n\n else:\n self._logger.info(\"selection bootstrap %d\" % (boot_idx))\n selection_coefs[ii] = np.squeeze(\n self.uoi_selection_sweep(X_rep, y_rep, my_reg_params))\n\n write_timestamped_numpy_binary(filename=\"/Users/josephgmaa/pyuoi/pyuoi/data/features/coefficients/coefficients\",\n x_coefficients=np.array(\n x_linear_coefficients),\n y_coefficients=np.array(y_linear_coefficients))\n\n # if distributed, gather selection coefficients to 0,\n # perform intersection, and broadcast results\n if size > 1:\n selection_coefs = Gatherv_rows(selection_coefs, self.comm, root=0)\n if rank == 0:\n if size > self.n_boots_sel:\n selection_coefs = selection_coefs.reshape(\n self.n_boots_sel,\n self.n_reg_params_,\n n_coef)\n supports = self.intersect(\n selection_coefs,\n self.selection_thresholds_).astype(int)\n else:\n supports = None\n supports = Bcast_from_root(supports, self.comm, root=0)\n self.supports_ = supports.astype(bool)\n else:\n self.supports_ = self.intersect(selection_coefs,\n self.selection_thresholds_)\n\n self.n_supports_ = self.supports_.shape[0]\n\n if rank == 0:\n self._logger.info(\"Found %d supports\" % self.n_supports_)\n\n #####################\n # Estimation Module #\n #####################\n # set up data arrays\n tasks = np.array_split(np.arange(self.n_boots_est *\n self.n_supports_), size)[rank]\n my_boots = dict((task_idx // self.n_supports_, None)\n for task_idx in tasks)\n estimates = np.zeros((tasks.size, n_coef))\n\n for boot in range(self.n_boots_est):\n if size > 1:\n if rank == 0:\n rvals = train_test_split(np.arange(X.shape[0]),\n test_size=1 - self.estimation_frac,\n stratify=stratify,\n random_state=self.random_state)\n else:\n rvals = [None] * 2\n rvals = [Bcast_from_root(rval, self.comm, root=0)\n for rval in rvals]\n if boot in my_boots.keys():\n my_boots[boot] = rvals\n else:\n my_boots[boot] = train_test_split(\n np.arange(X.shape[0]),\n test_size=1 - self.estimation_frac,\n stratify=stratify,\n random_state=self.random_state)\n\n # score (r2/AIC/AICc/BIC) for each bootstrap for each support\n scores = np.zeros(tasks.size)\n\n # iterate over bootstrap samples and supports\n for ii, task_idx in enumerate(tasks):\n boot_idx = task_idx // self.n_supports_\n support_idx = task_idx % self.n_supports_\n support = self.supports_[support_idx]\n # draw a resampled bootstrap\n idxs_train, idxs_test = my_boots[boot_idx]\n X_rep = X[idxs_train]\n y_rep = y[idxs_train]\n self._logger.info(\"estimation bootstrap %d, support %d\"\n % (boot_idx, support_idx))\n if np.any(support):\n\n # compute the estimate and store the fitted coefficients\n if self.shared_support:\n self._estimation_lm.fit(X_rep[:, support], y_rep)\n estimates[ii, np.tile(support, self.output_dim)] = \\\n self._estimation_lm.coef_.ravel()\n else:\n self._estimation_lm.fit(X_rep, y_rep, coef_mask=support)\n estimates[ii] = self._estimation_lm.coef_.ravel()\n\n scores[ii] = self._score_predictions(\n metric=self.estimation_score,\n fitter=self._estimation_lm,\n X=X, y=y,\n support=support,\n boot_idxs=my_boots[boot_idx])\n else:\n fitter = self._fit_intercept_no_features(y_rep)\n if issparse(X):\n X_ = csr_matrix(X.shape, dtype=X.dtype)\n else:\n X_ = np.zeros_like(X)\n scores[ii] = self._score_predictions(\n metric=self.estimation_score,\n fitter=fitter,\n X=X_, y=y,\n support=np.zeros(X.shape[1], dtype=bool),\n boot_idxs=my_boots[boot_idx])\n\n if size > 1:\n estimates = Gatherv_rows(send=estimates, comm=self.comm,\n root=0)\n scores = Gatherv_rows(send=scores, comm=self.comm,\n root=0)\n self.rp_max_idx_ = None\n best_estimates = None\n coef = None\n self.intercept_ = None\n if rank == 0:\n estimates = estimates.reshape(self.n_boots_est,\n self.n_supports_, n_coef)\n scores = scores.reshape(self.n_boots_est, self.n_supports_)\n self.rp_max_idx_ = np.argmax(scores, axis=1)\n best_estimates = estimates[np.arange(self.n_boots_est),\n self.rp_max_idx_]\n # take the median across estimates for the final estimate\n coef = np.median(best_estimates,\n axis=0).reshape(self.output_dim, n_features)\n self.coef_ = coef\n self._fit_intercept(X, y)\n self.estimates_ = Bcast_from_root(estimates, self.comm, root=0)\n self.scores_ = Bcast_from_root(scores, self.comm, root=0)\n self.coef_ = Bcast_from_root(coef, self.comm, root=0)\n self.intercept_ = Bcast_from_root(self.intercept_,\n self.comm, root=0)\n self.rp_max_idx_ = self.comm.bcast(self.rp_max_idx_, root=0)\n else:\n self.estimates_ = estimates.reshape(self.n_boots_est,\n self.n_supports_, n_coef)\n self.scores_ = scores.reshape(self.n_boots_est, self.n_supports_)\n self.rp_max_idx_ = np.argmax(self.scores_, axis=1)\n # extract the estimates over bootstraps from model with best\n # regularization parameter value\n best_estimates = self.estimates_[np.arange(self.n_boots_est),\n self.rp_max_idx_, :]\n # take the median across estimates for the final, bagged estimate\n self.coef_ = np.median(best_estimates,\n axis=0).reshape(self.output_dim, n_features)\n self._fit_intercept(X, y)\n self._post_fit(X, y)\n\n return self\n\n def uoi_selection_sweep(self, X, y, reg_param_values):\n \"\"\"Perform selection regression on a dataset over a sweep of\n regularization parameter values.\n\n Parameters\n ----------\n X : ndarray or scipy.sparse matrix, shape (n_samples, n_features)\n The design matrix.\n y : ndarray, shape (n_samples,)\n Response vector.\n reg_param_values: list of dicts\n A list of dictionaries containing the regularization parameter\n values to iterate over.\n\n Returns\n -------\n coefs : ndarray, shape (n_param_values, n_features)\n Predicted parameter values for each regularization strength.\n \"\"\"\n\n n_param_values = len(reg_param_values)\n n_coef = self.get_n_coef(X, y)\n\n coefs = np.zeros((n_param_values, n_coef))\n\n # apply the selection regression to bootstrapped datasets\n for reg_param_idx, reg_params in enumerate(reg_param_values):\n # reset the regularization parameter\n self._selection_lm.set_params(**reg_params)\n # rerun fit\n self._selection_lm.fit(X, y)\n # store coefficients\n coefs[reg_param_idx] = self._selection_lm.coef_.ravel()\n\n return coefs\n\n def get_n_coef(self, X, y):\n \"\"\"Return the number of coefficients that will be estimated\n\n This should return the shape of X.\n \"\"\"\n return X.shape[1] * self.output_dim\n\n\nclass AbstractUoILinearRegressor(AbstractUoILinearModel,\n metaclass=_abc.ABCMeta):\n \"\"\"An abstract base class for UoI linear regression classes.\"\"\"\n\n _valid_estimation_metrics = ('r2', 'AIC', 'AICc', 'BIC')\n\n _train_test_map = {'train': 0, 'test': 1}\n\n _default_est_targets = {'r2': 1, 'AIC': 0, 'AICc': 0, 'BIC': 0}\n\n def __init__(self, n_boots_sel=24, n_boots_est=24, selection_frac=0.9,\n estimation_frac=0.9, stability_selection=1.,\n estimation_score='r2', estimation_target=None,\n copy_X=True, fit_intercept=True,\n standardize=True, random_state=None, max_iter=None,\n comm=None, logger=None):\n super(AbstractUoILinearRegressor, self).__init__(\n n_boots_sel=n_boots_sel,\n n_boots_est=n_boots_est,\n selection_frac=selection_frac,\n estimation_frac=estimation_frac,\n stability_selection=stability_selection,\n fit_intercept=fit_intercept,\n standardize=standardize,\n max_iter=max_iter,\n random_state=random_state,\n comm=comm,\n logger=logger)\n\n if estimation_score not in self._valid_estimation_metrics:\n raise ValueError(\n \"invalid estimation metric: '%s'\" % estimation_score)\n\n self.__estimation_score = estimation_score\n\n if estimation_target is not None:\n if estimation_target not in ['train', 'test']:\n raise ValueError(\n \"invalid estimation target: %s\" % estimation_target)\n else:\n estimation_target = self._train_test_map[estimation_target]\n else:\n estimation_target = self._default_est_targets[estimation_score]\n self._estimation_target = estimation_target\n\n def _pre_fit(self, X, y):\n X, y = super()._pre_fit(X, y)\n if y.ndim == 1:\n y = y[:, np.newaxis]\n elif y.ndim == 2:\n if y.shape[1] > 1:\n raise ValueError('y should either have shape ' +\n '(n_samples, ) or (n_samples, 1).')\n else:\n raise ValueError('y should either have shape ' +\n '(n_samples, ) or (n_samples, 1).')\n if self.standardize:\n self._y_scaler = StandardScaler(with_mean=self.fit_intercept)\n y = self._y_scaler.fit_transform(y)\n y = np.squeeze(y)\n self.output_dim = 1\n return X, y\n\n def _post_fit(self, X, y):\n super()._post_fit(X, y)\n if self.standardize:\n sX = self._X_scaler\n sy = self._y_scaler\n self.coef_ *= sy.scale_[:, np.newaxis]\n if self.fit_intercept:\n self.intercept_ *= sy.scale_\n self.intercept_ += sy.mean_ - np.dot(sX.mean_,\n self.coef_.T)\n self.coef_ = np.squeeze(self.coef_)\n\n def intersect(self, coef, thresholds):\n \"\"\"Intersect coefficients accross all thresholds.\"\"\"\n return intersection(coef, thresholds)\n\n @property\n def estimation_score(self):\n return self.__estimation_score\n\n def _score_predictions(self, metric, fitter, X, y, support, boot_idxs):\n \"\"\"Score, according to some metric, predictions provided by a model.\n\n The resulting score will be negated if an information criterion is\n specified.\n\n Parameters\n ----------\n metric : string\n The type of score to run on the prediction. Valid options include\n 'r2' (explained variance), 'BIC' (Bayesian information criterion),\n 'AIC' (Akaike information criterion), and 'AICc' (corrected AIC).\n fitter : object\n Must contain .predict and .predict_proba methods.\n X : array-like\n The design matrix.\n y : array-like\n Response vector.\n supports : array-like\n The value of the supports for the model\n boot_idxs : 2-tuple of array-like objects\n Tuple of (train_idxs, test_idxs) generated from a bootstrap\n sample. If this is specified, then the appropriate set of\n data will be used for evaluating scores: test data for r^2,\n and training data for information criteria\n\n Returns\n -------\n score : float\n The score.\n \"\"\"\n\n # Select the data relevant for the estimation_score\n X = X[boot_idxs[self._estimation_target]]\n y = y[boot_idxs[self._estimation_target]]\n\n if y.ndim == 2:\n if y.shape[1] > 1:\n raise ValueError('y should either have shape ' +\n '(n_samples, ) or (n_samples, 1).')\n y = np.squeeze(y)\n elif y.ndim > 2:\n raise ValueError('y should either have shape ' +\n '(n_samples, ) or (n_samples, 1).')\n\n y_pred = fitter.predict(X[:, support])\n if y.shape != y_pred.shape:\n raise ValueError('Targets and predictions are not the same shape.')\n\n if metric == 'r2':\n score = r2_score(y, y_pred)\n else:\n ll = utils.log_likelihood_glm(model='normal',\n y_true=y,\n y_pred=y_pred)\n n_features = np.count_nonzero(support)\n n_samples = X.shape[0]\n if metric == 'BIC':\n score = utils.BIC(ll, n_features, n_samples)\n elif metric == 'AIC':\n score = utils.AIC(ll, n_features)\n elif metric == 'AICc':\n score = utils.AICc(ll, n_features, n_samples)\n else:\n raise ValueError(metric + ' is not a valid option.')\n # negate the score since lower information criterion is preferable\n score = -score\n return score\n\n def _fit_intercept_no_features(self, y):\n \"\"\"Fit a model with only an intercept.\n\n This is used in cases where the model has no support selected.\n \"\"\"\n return LinearInterceptFitterNoFeatures(y)\n\n def _fit_intercept(self, X, y):\n \"\"\"Fit the intercept.\"\"\"\n if self.fit_intercept:\n self.intercept_ = (y.mean(axis=0) -\n np.dot(X.mean(axis=0), self.coef_.T))\n else:\n self.intercept_ = np.zeros(1)\n\n\nclass LinearInterceptFitterNoFeatures(object):\n def __init__(self, y):\n self.intercept_ = y.mean()\n\n def predict(self, X):\n n_samples = X.shape[0]\n return np.tile(self.intercept_, n_samples)\n\n\nclass AbstractUoIGeneralizedLinearRegressor(AbstractUoILinearModel,\n metaclass=_abc.ABCMeta):\n \"\"\"An abstract base class for UoI linear classifier classes.\"\"\"\n\n _valid_estimation_metrics = ('log', 'BIC', 'AIC', 'AICc', 'acc')\n\n _train_test_map = {'train': 0, 'test': 1}\n\n _default_est_targets = {'log': 1, 'AIC': 0, 'AICc': 0,\n 'BIC': 0, 'acc': 1}\n\n def __init__(self, n_boots_sel=24, n_boots_est=24, selection_frac=0.9,\n estimation_frac=0.9, stability_selection=1.,\n estimation_score='acc', estimation_target=None,\n copy_X=True, fit_intercept=True, standardize=True,\n random_state=None, max_iter=None, shared_support=True,\n comm=None, logger=None):\n super(AbstractUoIGeneralizedLinearRegressor, self).__init__(\n n_boots_sel=n_boots_sel,\n n_boots_est=n_boots_est,\n selection_frac=selection_frac,\n estimation_frac=estimation_frac,\n stability_selection=stability_selection,\n random_state=random_state,\n fit_intercept=fit_intercept,\n standardize=standardize,\n shared_support=shared_support,\n max_iter=max_iter,\n comm=comm,\n logger=logger)\n\n if estimation_score not in self._valid_estimation_metrics:\n raise ValueError(\n \"invalid estimation metric: '%s'\" % estimation_score)\n self.__estimation_score = estimation_score\n\n if estimation_target is not None:\n if estimation_target not in ['train', 'test']:\n raise ValueError(\n \"invalid estimation target: %s\" % estimation_target)\n else:\n estimation_target = self._train_test_map[estimation_target]\n else:\n estimation_target = self._default_est_targets[estimation_score]\n\n self._estimation_target = estimation_target\n\n def _post_fit(self, X, y):\n super()._post_fit(X, y)\n if self.standardize and self.fit_intercept:\n sX = self._X_scaler\n self.intercept_ += np.dot(sX.mean_ * sX.scale_,\n self.coef_.T)\n\n def intersect(self, coef, thresholds):\n \"\"\"Intersect coefficients accross all thresholds.\n\n This implementation will account for multi-class classification.\n \"\"\"\n supports = intersection(coef, thresholds)\n if self.output_dim > 1 and self.shared_support:\n n_features = supports.shape[-1] // self.output_dim\n supports = supports.reshape((-1, self.output_dim, n_features))\n supports = np.sum(supports, axis=-2).astype(bool)\n supports = np.unique(supports, axis=0)\n return supports\n\n @property\n def estimation_score(self):\n return self.__estimation_score\n\n def _score_predictions(self, metric, fitter, X, y, support, boot_idxs):\n \"\"\"Score, according to some metric, predictions provided by a model.\n\n The resulting score will be negated if an information criterion is\n specified.\n\n Parameters\n ----------\n metric : string\n The type of score to run on the prediction. Valid options include\n 'r2' (explained variance), 'BIC' (Bayesian information criterion),\n 'AIC' (Akaike information criterion), and 'AICc' (corrected AIC).\n fitter : object\n Must contain .predict and .predict_proba methods.\n X : array-like\n The design matrix.\n y : array-like\n Response vector.\n supports : array-like\n The value of the supports for the model\n boot_idxs : 2-tuple of array-like objects\n Tuple of (train_idxs, test_idxs) generated from a bootstrap\n sample. If this is specified, then the appropriate set of\n data will be used for evaluating scores: test data for r^2,\n and training data for information criteria\n\n Returns\n -------\n score : float\n The score.\n \"\"\"\n\n # Select the data relevant for the estimation_score\n X = X[boot_idxs[self._estimation_target]]\n y = y[boot_idxs[self._estimation_target]]\n\n if metric == 'acc':\n if self.shared_support:\n y_pred = fitter.predict(X[:, support])\n else:\n y_pred = fitter.predict(X)\n score = accuracy_score(y, y_pred)\n else:\n\n if self.shared_support:\n y_pred = fitter.predict_proba(X[:, support])\n else:\n y_pred = fitter.predict_proba(X)\n ll = -log_loss(y, y_pred, labels=self.classes_)\n if metric == 'log':\n score = ll\n else:\n n_features = np.count_nonzero(support)\n n_samples = X.shape[0]\n if metric == 'BIC':\n score = utils.BIC(n_samples * ll, n_features, n_samples)\n elif metric == 'AIC':\n score = utils.AIC(n_samples * ll, n_features)\n elif metric == 'AICc':\n score = utils.AICc(n_samples * ll, n_features, n_samples)\n else:\n raise ValueError(metric + ' is not a valid metric.')\n # negate the score since lower information criterion is\n # preferable\n score = -score\n\n return score\n", "from pyuoi import UoI_L1Logistic\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom pyuoi.utils import write_timestamped_numpy_binary, dump_json\nfrom scipy.special import rel_entr\nfrom scipy.spatial.distance import jensenshannon\nfrom pyuoi.datasets import make_classification\nimport xarray as xr\nimport numpy as np\nimport pandas as pd\nimport argparse\nimport sys\n\n\ndef initialize_arg_parser():\n parser = argparse.ArgumentParser(\n description='Read in numpy files and visualize frequency differences')\n parser.add_argument('--input_files',\n help='Path to the input files.',\n default=[\n \"/Users/josephgmaa/pyuoi/pyuoi/data/features/nolj_Recording_day7_overnight_636674151185633714_5_nolj.c3d.1851.features.netcdf\"],\n nargs='+')\n parser.add_argument('--column_names',\n help='The column names to be used for parsing',\n default=\"PCA_mavg_velocity_relative_0\",\n nargs='+')\n parser.add_argument(\"--dump\",\n help=\"A boolean flag whether to dump the results to a JSON file.\",\n default=True)\n parser.add_argument(\"--training_seed\",\n help=\"Seed for setting train_test_split.\",\n default=10, type=int)\n parser.add_argument(\"--model_seed\",\n help=\"Seed for setting internal model random state.\",\n default=10, type=int)\n parser.add_argument(\"--use_small_dataset\",\n help=\"Whether to use a small dataset for debugging\",\n default=False, type=bool)\n return parser\n\n\ndef main(parsed_args: argparse.Namespace):\n \"\"\"\n Run argument parser with commands:\n\n >>> python classifier/rat7m_classifier.py --input_files /Users/josephgmaa/pyuoi/pyuoi/data/features/PCs/PCs-mavg-velocity_relative.netcdf PCA_mavg_velocity_relative_0\n\n >>> python classifier/rat7m_classifier.py --input_files /Users/josephgmaa/pyuoi/pyuoi/data/features/PCs/PCs-mavg-velocity_relative.netcdf --column_names PCA_mavg_velocity_relative_0 PCA_mavg_velocity_relative_1 PCA_mavg_velocity_relative_2 PCA_mavg_velocity_relative_3 PCA_mavg_velocity_relative_4\n\n To run with a small dataset for debugging, pass in the flag --use_small_dataset:\n\n >>> python classifier/rat7m_classifier.py --use_small_dataset True\n \"\"\"\n if parsed_args.use_small_dataset:\n n_features = 20\n n_inf = 10\n x, y, _, _ = make_classification(n_samples=200,\n random_state=10,\n n_classes=5,\n n_informative=n_inf,\n n_features=n_features,\n shared_support=True,\n w_scale=4.)\n\n x_train, x_test, y_train, y_test = train_test_split(\n x, y, random_state=parsed_args.training_seed)\n\n else:\n df = pd.DataFrame()\n for filename in parsed_args.input_files:\n df = pd.concat([df, xr.load_dataset(\n filename, engine='h5netcdf').to_dataframe()])\n\n row_indices = np.arange(start=0, stop=df.shape[0]).tolist()\n df = df.iloc[row_indices, :]\n column_indices = [\n i for i in df.columns if any(i in word for word in parsed_args.column_names)]\n y = df['behavior_name'].to_numpy()\n\n x_train, x_test, y_train, y_test = train_test_split(\n df.loc[:, column_indices].to_numpy(), y, random_state=parsed_args.training_seed)\n\n assert x_train.shape[\n 1] > 0, f\"X train dataset should have at least 1 input feature. Try checking that the column names match the input dataset: {list(df.columns)}\"\n\n l1log = UoI_L1Logistic(random_state=parsed_args.model_seed, multi_class='multinomial').fit(\n x_train, y_train, verbose=True)\n\n y_hat = l1log.predict(x_test)\n\n accuracy = accuracy_score(y_test, y_hat)\n print('y_test: ', y_test)\n print('y_hat: ', y_hat)\n y_test_freq, y_hat_freq = np.bincount(y_test), np.bincount(y_hat)\n print('y_test_freq: ', y_test_freq)\n print('y_hat_freq: ', y_hat_freq)\n\n kl_divergence_y_hat_given_y = rel_entr(y_hat_freq, y_test_freq)\n kl_divergence_y_given_y_hat = rel_entr(y_test_freq, y_hat_freq)\n jensenshannon_predictions = jensenshannon(y_test_freq, y_hat_freq)\n\n print(f\"Accuracy: {accuracy}\")\n print(f\"Resulting values: {y_hat}\")\n\n filename = \"/Users/josephgmaa/pyuoi/pyuoi/data/features/run_parameters/run_parameters\"\n\n if parsed_args.dump:\n dump_json(model=l1log, filename=\"/Users/josephgmaa/pyuoi/pyuoi/data/features/run_parameters/run_parameters\",\n results={\"accuracy\": accuracy,\n \"predicted_output\": y_hat,\n \"kl_divergence_y_hat_given_y\": kl_divergence_y_hat_given_y,\n \"kl_divergence_y_given_y_hat\": kl_divergence_y_given_y_hat,\n \"jensenshannon_predictions\":\n jensenshannon_predictions,\n \"input_files\": parsed_args.input_files,\n \"column_names\": parsed_args.column_names, \"train_test_split_seed\": parsed_args.training_seed, \"l1log_seed\": parsed_args.model_seed})\n\n write_timestamped_numpy_binary(filename=filename, data=y_hat)\n\n\nif __name__ == \"__main__\":\n arg_parser = initialize_arg_parser()\n parsed_args = arg_parser.parse_args(sys.argv[1:])\n main(parsed_args=parsed_args)\n" ]
[ [ "numpy.dot", "sklearn.metrics.r2_score", "numpy.squeeze", "numpy.zeros_like", "numpy.any", "sklearn.utils.check_X_y", "scipy.sparse.issparse", "numpy.unique", "numpy.arange", "numpy.argmax", "numpy.count_nonzero", "numpy.zeros", "numpy.median", "scipy.sparse.csr_matrix", "sklearn.metrics.log_loss", "numpy.array", "numpy.random.RandomState", "numpy.sum", "numpy.tile", "sklearn.preprocessing.StandardScaler", "numpy.empty", "sklearn.metrics.accuracy_score" ], [ "scipy.special.rel_entr", "numpy.arange", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "scipy.spatial.distance.jensenshannon", "numpy.bincount", "sklearn.metrics.accuracy_score" ] ]
ricklentz/nglod
[ "6cb8d10cc4b44f709f855dc98fa8e6e58ba7c1fd" ]
[ "sdf-net/lib/tracer/BaseTracer.py" ]
[ "# The MIT License (MIT)\n#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport numpy as np\n\nfrom lib.utils import setparam\n\nclass BaseTracer(object):\n \"\"\"Virtual base class for tracer\"\"\"\n\n def __init__(self,\n args = None,\n camera_clamp : list = None,\n step_size : float = None,\n grad_method : str = None,\n num_steps : int = None, # samples for raymaching, iterations for sphere trace\n min_dis : float = None): \n\n self.args = args\n self.camera_clamp = setparam(args, camera_clamp, 'camera_clamp')\n self.step_size = setparam(args, step_size, 'step_size')\n self.grad_method = setparam(args, grad_method, 'grad_method')\n self.num_steps = setparam(args, num_steps, 'num_steps')\n self.min_dis = setparam(args, min_dis, 'min_dis')\n\n self.inv_num_steps = 1.0 / self.num_steps\n self.diagonal = np.sqrt(3) * 2.0\n \n def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n\n def forward(self, net, ray_o, ray_d):\n \"\"\"Base implementation for forward\"\"\"\n raise NotImplementedError\n" ]
[ [ "numpy.sqrt" ] ]
alasdairtran/facenet
[ "ab0b64d1dd5c978cc57ac0d85579a97018de02d1" ]
[ "tmp/align_dlib.py" ]
[ "# Copyright 2015-2016 Carnegie Mellon University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module for dlib-based alignment.\"\"\"\n\n# NOTE: This file has been copied from the openface project.\n# https://github.com/cmusatyalab/openface/blob/master/openface/align_dlib.py\n\nimport cv2\nimport numpy as np\n\nimport dlib\n\nTEMPLATE = np.float32([\n (0.0792396913815, 0.339223741112), (0.0829219487236, 0.456955367943),\n (0.0967927109165, 0.575648016728), (0.122141515615, 0.691921601066),\n (0.168687863544, 0.800341263616), (0.239789390707, 0.895732504778),\n (0.325662452515, 0.977068762493), (0.422318282013, 1.04329000149),\n (0.531777802068, 1.06080371126), (0.641296298053, 1.03981924107),\n (0.738105872266, 0.972268833998), (0.824444363295, 0.889624082279),\n (0.894792677532, 0.792494155836), (0.939395486253, 0.681546643421),\n (0.96111933829, 0.562238253072), (0.970579841181, 0.441758925744),\n (0.971193274221, 0.322118743967), (0.163846223133, 0.249151738053),\n (0.21780354657, 0.204255863861), (0.291299351124, 0.192367318323),\n (0.367460241458, 0.203582210627), (0.4392945113, 0.233135599851),\n (0.586445962425, 0.228141644834), (0.660152671635, 0.195923841854),\n (0.737466449096, 0.182360984545), (0.813236546239, 0.192828009114),\n (0.8707571886, 0.235293377042), (0.51534533827, 0.31863546193),\n (0.516221448289, 0.396200446263), (0.517118861835, 0.473797687758),\n (0.51816430343, 0.553157797772), (0.433701156035, 0.604054457668),\n (0.475501237769, 0.62076344024), (0.520712933176, 0.634268222208),\n (0.565874114041, 0.618796581487), (0.607054002672, 0.60157671656),\n (0.252418718401, 0.331052263829), (0.298663015648, 0.302646354002),\n (0.355749724218, 0.303020650651), (0.403718978315, 0.33867711083),\n (0.352507175597, 0.349987615384), (0.296791759886, 0.350478978225),\n (0.631326076346, 0.334136672344), (0.679073381078, 0.29645404267),\n (0.73597236153, 0.294721285802), (0.782865376271, 0.321305281656),\n (0.740312274764, 0.341849376713), (0.68499850091, 0.343734332172),\n (0.353167761422, 0.746189164237), (0.414587777921, 0.719053835073),\n (0.477677654595, 0.706835892494), (0.522732900812, 0.717092275768),\n (0.569832064287, 0.705414478982), (0.635195811927, 0.71565572516),\n (0.69951672331, 0.739419187253), (0.639447159575, 0.805236879972),\n (0.576410514055, 0.835436670169), (0.525398405766, 0.841706377792),\n (0.47641545769, 0.837505914975), (0.41379548902, 0.810045601727),\n (0.380084785646, 0.749979603086), (0.477955996282, 0.74513234612),\n (0.523389793327, 0.748924302636), (0.571057789237, 0.74332894691),\n (0.672409137852, 0.744177032192), (0.572539621444, 0.776609286626),\n (0.5240106503, 0.783370783245), (0.477561227414, 0.778476346951)])\n\nINV_TEMPLATE = np.float32([\n (-0.04099179660567834, -0.008425234314031194, 2.575498465013183),\n (0.04062510634554352, -\n 0.009678089746831375, -1.2534351452524177),\n (0.0003666902601348179, 0.01810332406086298, -0.32206331976076663)])\n\nTPL_MIN, TPL_MAX = np.min(TEMPLATE, axis=0), np.max(TEMPLATE, axis=0)\nMINMAX_TEMPLATE = (TEMPLATE - TPL_MIN) / (TPL_MAX - TPL_MIN)\n\n\nclass AlignDlib:\n \"\"\"\n Use `dlib's landmark estimation <http://blog.dlib.net/2014/08/real-time-face-pose-estimation.html>`_ to align faces.\n\n The alignment preprocess faces for input into a neural network.\n Faces are resized to the same size (such as 96x96) and transformed\n to make landmarks (such as the eyes and nose) appear at the same\n location on every image.\n\n Normalized landmarks:\n\n .. image:: ../images/dlib-landmark-mean.png\n \"\"\"\n\n #: Landmark indices corresponding to the inner eyes and bottom lip.\n INNER_EYES_AND_BOTTOM_LIP = [39, 42, 57]\n\n #: Landmark indices corresponding to the outer eyes and nose.\n OUTER_EYES_AND_NOSE = [36, 45, 33]\n\n def __init__(self, facePredictor):\n \"\"\"\n Instantiate an 'AlignDlib' object.\n\n :param facePredictor: The path to dlib's\n :type facePredictor: str\n \"\"\"\n assert facePredictor is not None\n\n #pylint: disable=no-member\n self.detector = dlib.get_frontal_face_detector()\n self.predictor = dlib.shape_predictor(facePredictor)\n\n def getAllFaceBoundingBoxes(self, rgbImg):\n \"\"\"\n Find all face bounding boxes in an image.\n\n :param rgbImg: RGB image to process. Shape: (height, width, 3)\n :type rgbImg: numpy.ndarray\n :return: All face bounding boxes in an image.\n :rtype: dlib.rectangles\n \"\"\"\n assert rgbImg is not None\n\n try:\n return self.detector(rgbImg, 1)\n except Exception as e: # pylint: disable=broad-except\n print(\"Warning: {}\".format(e))\n # In rare cases, exceptions are thrown.\n return []\n\n def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):\n \"\"\"\n Find the largest face bounding box in an image.\n\n :param rgbImg: RGB image to process. Shape: (height, width, 3)\n :type rgbImg: numpy.ndarray\n :param skipMulti: Skip image if more than one face detected.\n :type skipMulti: bool\n :return: The largest face bounding box in an image, or None.\n :rtype: dlib.rectangle\n \"\"\"\n assert rgbImg is not None\n\n faces = self.getAllFaceBoundingBoxes(rgbImg)\n if (not skipMulti and len(faces) > 0) or len(faces) == 1:\n return max(faces, key=lambda rect: rect.width() * rect.height())\n else:\n return None\n\n def findLandmarks(self, rgbImg, bb):\n \"\"\"\n Find the landmarks of a face.\n\n :param rgbImg: RGB image to process. Shape: (height, width, 3)\n :type rgbImg: numpy.ndarray\n :param bb: Bounding box around the face to find landmarks for.\n :type bb: dlib.rectangle\n :return: Detected landmark locations.\n :rtype: list of (x,y) tuples\n \"\"\"\n assert rgbImg is not None\n assert bb is not None\n\n points = self.predictor(rgbImg, bb)\n # return list(map(lambda p: (p.x, p.y), points.parts()))\n return [(p.x, p.y) for p in points.parts()]\n\n #pylint: disable=dangerous-default-value\n def align(self, imgDim, rgbImg, bb=None,\n landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP,\n skipMulti=False, scale=1.0):\n r\"\"\"align(imgDim, rgbImg, bb=None, landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP)\n\n Transform and align a face in an image.\n\n :param imgDim: The edge length in pixels of the square the image is resized to.\n :type imgDim: int\n :param rgbImg: RGB image to process. Shape: (height, width, 3)\n :type rgbImg: numpy.ndarray\n :param bb: Bounding box around the face to align. \\\n Defaults to the largest face.\n :type bb: dlib.rectangle\n :param landmarks: Detected landmark locations. \\\n Landmarks found on `bb` if not provided.\n :type landmarks: list of (x,y) tuples\n :param landmarkIndices: The indices to transform to.\n :type landmarkIndices: list of ints\n :param skipMulti: Skip image if more than one face detected.\n :type skipMulti: bool\n :param scale: Scale image before cropping to the size given by imgDim.\n :type scale: float\n :return: The aligned RGB image. Shape: (imgDim, imgDim, 3)\n :rtype: numpy.ndarray\n \"\"\"\n assert imgDim is not None\n assert rgbImg is not None\n assert landmarkIndices is not None\n\n if bb is None:\n bb = self.getLargestFaceBoundingBox(rgbImg, skipMulti)\n if bb is None:\n return\n\n if landmarks is None:\n landmarks = self.findLandmarks(rgbImg, bb)\n\n npLandmarks = np.float32(landmarks)\n npLandmarkIndices = np.array(landmarkIndices)\n\n #pylint: disable=maybe-no-member\n H = cv2.getAffineTransform(npLandmarks[npLandmarkIndices],\n imgDim * MINMAX_TEMPLATE[npLandmarkIndices]*scale + imgDim*(1-scale)/2)\n thumbnail = cv2.warpAffine(rgbImg, H, (imgDim, imgDim))\n\n return thumbnail\n" ]
[ [ "numpy.max", "numpy.array", "numpy.float32", "numpy.min" ] ]
oricalworks/openpilot-1
[ "aec0baa6f36a614c90ce2a757339d63b5354c4d6" ]
[ "selfdrive/controls/lib/lateral_planner.py" ]
[ "import os\nimport math\nimport numpy as np\nfrom common.realtime import sec_since_boot, DT_MDL\nfrom common.numpy_fast import interp, clip\nfrom selfdrive.swaglog import cloudlog\nfrom selfdrive.controls.lib.lateral_mpc import libmpc_py\nfrom selfdrive.controls.lib.drive_helpers import MPC_COST_LAT, MPC_N, CAR_ROTATION_RADIUS\nfrom selfdrive.controls.lib.lane_planner import LanePlanner, TRAJECTORY_SIZE\nfrom selfdrive.config import Conversions as CV\nimport cereal.messaging as messaging\nfrom cereal import log\nfrom common.op_params import opParams\n\nLaneChangeState = log.LateralPlan.LaneChangeState\nLaneChangeDirection = log.LateralPlan.LaneChangeDirection\n\nLOG_MPC = os.environ.get('LOG_MPC', False)\n\nLANE_CHANGE_SPEED_MIN = 19 * CV.MPH_TO_MS\nLANE_CHANGE_TIME_MAX = 10.\n# this corresponds to 80deg/s and 20deg/s steering angle in a toyota corolla\nMAX_CURVATURE_RATES = [0.03762194918267951, 0.003441203371932992]\nMAX_CURVATURE_RATE_SPEEDS = [0, 35]\n\nsadBP = [0., 5., 10., 22., 25., 40.]\nsadV = [.0, .05, .1, .22, .35, .45]\n\nDESIRES = {\n LaneChangeDirection.none: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.none,\n },\n LaneChangeDirection.left: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeLeft,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeLeft,\n },\n LaneChangeDirection.right: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeRight,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeRight,\n },\n}\n\n\nclass LateralPlanner():\n def __init__(self, CP, use_lanelines=True, wide_camera=False):\n self.use_lanelines = use_lanelines\n self.LP = LanePlanner(wide_camera)\n\n self.last_cloudlog_t = 0\n self.steer_rate_cost = CP.steerRateCost\n\n self.setup_mpc()\n self.solution_invalid_cnt = 0\n self.lane_change_state = LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n self.lane_change_timer = 0.0\n self.lane_change_ll_prob = 1.0\n self.prev_one_blinker = False\n self.desire = log.LateralPlan.Desire.none\n self.pre_auto_LCA_timer = 0.0\n self.auto_lca_activate_prev = False\n self.torque_opposed = False\n self.path_xyz = np.zeros((TRAJECTORY_SIZE,3))\n self.path_xyz_stds = np.ones((TRAJECTORY_SIZE,3))\n self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))\n self.t_idxs = np.arange(TRAJECTORY_SIZE)\n self.y_pts = np.zeros(TRAJECTORY_SIZE)\n\n def setup_mpc(self):\n self.libmpc = libmpc_py.libmpc\n self.libmpc.init()\n\n self.mpc_solution = libmpc_py.ffi.new(\"log_t *\")\n self.cur_state = libmpc_py.ffi.new(\"state_t *\")\n self.cur_state[0].x = 0.0\n self.cur_state[0].y = 0.0\n self.cur_state[0].psi = 0.0\n self.cur_state[0].curvature = 0.0\n\n self.desired_curvature = 0.0\n self.safe_desired_curvature = 0.0\n self.desired_curvature_rate = 0.0\n self.safe_desired_curvature_rate = 0.0\n\n def update(self, sm, CP):\n v_ego = sm['carState'].vEgo\n active = sm['controlsState'].active\n measured_curvature = sm['controlsState'].curvature\n\n md = sm['modelV2']\n self.LP.parse_model(sm['modelV2'])\n if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:\n self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])\n self.t_idxs = np.array(md.position.t)\n self.plan_yaw = list(md.orientation.z)\n if len(md.orientation.xStd) == TRAJECTORY_SIZE:\n self.path_xyz_stds = np.column_stack([md.position.xStd, md.position.yStd, md.position.zStd])\n\n # Lane change logic\n one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker\n below_lane_change_speed = v_ego < LANE_CHANGE_SPEED_MIN\n\n if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX):\n self.lane_change_state = LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n self.pre_auto_LCA_timer = 0.\n else:\n blindspot_detected = ((sm['carState'].leftBlindspot and self.lane_change_direction == LaneChangeDirection.left) or\n (sm['carState'].rightBlindspot and self.lane_change_direction == LaneChangeDirection.right))\n torque_opposed = (sm['carState'].steeringPressed and\n ((sm['carState'].steeringTorque < -10 and self.lane_change_direction == LaneChangeDirection.left) or\n (sm['carState'].steeringTorque > 10 and self.lane_change_direction == LaneChangeDirection.right)))\n\n if self.lane_change_state == LaneChangeState.preLaneChange and opParams().get('nonudgeLCA'):\n if not torque_opposed and not blindspot_detected and v_ego > opParams().get('nonudgeLCAspeed') * CV.MPH_TO_MS:\n self.pre_auto_LCA_timer += DT_MDL\n else:\n self.pre_auto_LCA_timer = -1.4\n else:\n self.pre_auto_LCA_timer = 0.\n\n torque_applied = (sm['carState'].steeringPressed and\n ((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or\n (sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right)))\n\n auto_lc_activate = (1.4 > self.pre_auto_LCA_timer > .9) and not blindspot_detected\n\n lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob\n\n # State transitions\n # off\n if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:\n if sm['carState'].leftBlinker:\n self.lane_change_direction = LaneChangeDirection.left\n elif sm['carState'].rightBlinker:\n self.lane_change_direction = LaneChangeDirection.right\n\n self.lane_change_state = LaneChangeState.preLaneChange\n self.lane_change_ll_prob = 1.0\n self.auto_lca_activate_prev = False\n self.torque_opposed = False\n\n # pre\n elif self.lane_change_state == LaneChangeState.preLaneChange:\n if not one_blinker or below_lane_change_speed or torque_opposed:\n self.lane_change_state = LaneChangeState.off\n self.torque_opposed = True\n elif (torque_applied and not blindspot_detected) or (not self.auto_lca_activate_prev and auto_lc_activate):\n self.lane_change_state = LaneChangeState.laneChangeStarting\n self.auto_lca_activate_prev = True\n\n # starting\n elif self.lane_change_state == LaneChangeState.laneChangeStarting:\n # fade out over .5s\n self.lane_change_ll_prob = max(self.lane_change_ll_prob - 2*DT_MDL, 0.0)\n # 98% certainty\n if lane_change_prob < 0.02 and self.lane_change_ll_prob < 0.01:\n self.lane_change_state = LaneChangeState.laneChangeFinishing\n elif torque_opposed:\n self.lane_change_state = LaneChangeState.off\n self.torque_opposed = True\n\n # finishing\n elif self.lane_change_state == LaneChangeState.laneChangeFinishing:\n # fade in laneline over 1s\n self.lane_change_ll_prob = min(self.lane_change_ll_prob + DT_MDL, 1.0)\n if one_blinker and self.lane_change_ll_prob > 0.99:\n self.lane_change_state = LaneChangeState.preLaneChange\n elif self.lane_change_ll_prob > 0.99 or torque_opposed:\n self.lane_change_state = LaneChangeState.off\n self.torque_opposed = True\n\n if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:\n self.lane_change_timer = 0.0\n else:\n self.lane_change_timer += DT_MDL\n\n self.prev_one_blinker = one_blinker\n\n self.desire = DESIRES[self.lane_change_direction][self.lane_change_state]\n\n # Turn off lanes during lane change\n if self.desire == log.LateralPlan.Desire.laneChangeRight or self.desire == log.LateralPlan.Desire.laneChangeLeft:\n self.LP.lll_prob *= self.lane_change_ll_prob\n self.LP.rll_prob *= self.lane_change_ll_prob\n if self.use_lanelines:\n d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)\n self.libmpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)\n else:\n d_path_xyz = self.path_xyz\n path_cost = np.clip(abs(self.path_xyz[0,1]/self.path_xyz_stds[0,1]), 0.5, 5.0) * MPC_COST_LAT.PATH\n # Heading cost is useful at low speed, otherwise end of plan can be off-heading\n heading_cost = interp(v_ego, [5.0, 10.0], [MPC_COST_LAT.HEADING, 0.0])\n self.libmpc.set_weights(path_cost, heading_cost, CP.steerRateCost)\n y_pts = np.interp(v_ego * self.t_idxs[:MPC_N + 1], np.linalg.norm(d_path_xyz, axis=1), d_path_xyz[:,1])\n heading_pts = np.interp(v_ego * self.t_idxs[:MPC_N + 1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw)\n self.y_pts = y_pts\n\n assert len(y_pts) == MPC_N + 1\n assert len(heading_pts) == MPC_N + 1\n self.libmpc.run_mpc(self.cur_state, self.mpc_solution,\n float(v_ego),\n CAR_ROTATION_RADIUS,\n list(y_pts),\n list(heading_pts))\n # init state for next\n self.cur_state.x = 0.0\n self.cur_state.y = 0.0\n self.cur_state.psi = 0.0\n self.cur_state.curvature = interp(DT_MDL, self.t_idxs[:MPC_N + 1], self.mpc_solution.curvature)\n\n sad = interp(v_ego, sadBP, sadV)\n\n # TODO this needs more thought, use .2s extra for now to estimate other delays\n delay = CP.steerActuatorDelay + sad\n current_curvature = self.mpc_solution.curvature[0]\n psi = interp(delay, self.t_idxs[:MPC_N + 1], self.mpc_solution.psi)\n next_curvature_rate = self.mpc_solution.curvature_rate[0]\n\n # MPC can plan to turn the wheel and turn back before t_delay. This means\n # in high delay cases some corrections never even get commanded. So just use\n # psi to calculate a simple linearization of desired curvature\n curvature_diff_from_psi = psi / (max(v_ego, 1e-1) * delay) - current_curvature\n next_curvature = current_curvature + 2 * curvature_diff_from_psi\n\n self.desired_curvature = next_curvature\n self.desired_curvature_rate = next_curvature_rate\n max_curvature_rate = interp(v_ego, MAX_CURVATURE_RATE_SPEEDS, MAX_CURVATURE_RATES)\n self.safe_desired_curvature_rate = clip(self.desired_curvature_rate,\n -max_curvature_rate,\n max_curvature_rate)\n self.safe_desired_curvature = clip(self.desired_curvature,\n self.safe_desired_curvature - max_curvature_rate/DT_MDL,\n self.safe_desired_curvature + max_curvature_rate/DT_MDL)\n\n # Check for infeasable MPC solution\n mpc_nans = any(math.isnan(x) for x in self.mpc_solution.curvature)\n t = sec_since_boot()\n if mpc_nans:\n self.libmpc.init()\n self.cur_state.curvature = measured_curvature\n\n if t > self.last_cloudlog_t + 5.0:\n self.last_cloudlog_t = t\n cloudlog.warning(\"Lateral mpc - nan: True\")\n\n if self.mpc_solution[0].cost > 20000. or mpc_nans: # TODO: find a better way to detect when MPC did not converge\n self.solution_invalid_cnt += 1\n else:\n self.solution_invalid_cnt = 0\n\n def publish(self, sm, pm):\n plan_solution_valid = self.solution_invalid_cnt < 2\n plan_send = messaging.new_message('lateralPlan')\n plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'modelV2'])\n plan_send.lateralPlan.laneWidth = float(self.LP.lane_width)\n plan_send.lateralPlan.dPathPoints = [float(x) for x in self.y_pts]\n plan_send.lateralPlan.lProb = float(self.LP.lll_prob)\n plan_send.lateralPlan.rProb = float(self.LP.rll_prob)\n plan_send.lateralPlan.dProb = float(self.LP.d_prob)\n\n plan_send.lateralPlan.rawCurvature = float(self.desired_curvature)\n plan_send.lateralPlan.rawCurvatureRate = float(self.desired_curvature_rate)\n plan_send.lateralPlan.curvature = float(self.safe_desired_curvature)\n plan_send.lateralPlan.curvatureRate = float(self.safe_desired_curvature_rate)\n\n plan_send.lateralPlan.mpcSolutionValid = bool(plan_solution_valid)\n\n plan_send.lateralPlan.desire = self.desire\n plan_send.lateralPlan.laneChangeState = self.lane_change_state\n plan_send.lateralPlan.laneChangeDirection = self.lane_change_direction\n\n pm.send('lateralPlan', plan_send)\n\n if LOG_MPC:\n dat = messaging.new_message('liveMpc')\n dat.liveMpc.x = list(self.mpc_solution.x)\n dat.liveMpc.y = list(self.mpc_solution.y)\n dat.liveMpc.psi = list(self.mpc_solution.psi)\n dat.liveMpc.curvature = list(self.mpc_solution.curvature)\n dat.liveMpc.cost = self.mpc_solution.cost\n pm.send('liveMpc', dat)\n" ]
[ [ "numpy.arange", "numpy.linalg.norm", "numpy.ones", "numpy.column_stack", "numpy.array", "numpy.zeros" ] ]
ChengZheWu/Medical-Image-Analysis
[ "83b7e34eaa9691d35ac29d52ca26656cec3e411d" ]
[ "kflod.py" ]
[ "from trainer import Trainer\nfrom preprocessing import get_dataset\nfrom os import path\nfrom torch.utils.data import DataLoader\nimport torch as T\nimport numpy as np\nimport random\nfrom sklearn import metrics\nfrom os import path\n\ndef get_metrics(target, pred):\n prec, recall, _, _ = metrics.precision_recall_fscore_support(target, pred>0.5, average='binary')\n fpr, tpr, thresholds = metrics.roc_curve(target, pred)\n auc = metrics.auc(fpr, tpr)\n return prec, recall, auc\n\n\ndef calc_accuracy(x, y):\n x_th = (x > 0.5).long()\n matches = x_th == y.long()\n return matches\n\ndef reset_rand():\n seed = 1000\n T.manual_seed(seed)\n T.cuda.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\ndef kfold(src_path,\n batch_size,\n n_epochs,\n model_optimizer,\n loss,\n name,\n device,\n deterministic=False,\n parallel=False,\n dataset_func=get_dataset):\n\n print(f'Experiment {name}')\n all_pred = T.zeros(849)\n all_targets = T.zeros(849)\n i =0\n f = open(path.join('results', f'{name}.txt'), 'w')\n f.write(f'{batch_size} {n_epochs} {model_optimizer}\\n')\n for fold in range(10):\n\n reset_rand()\n\n print(f'------------ fold {fold+1} ------------')\n f.write(f'------------ fold {fold+1} ------------\\n')\n trset, testset = dataset_func(path.join(src_path,str(fold)))\n print(f'Training Size: {len(trset)}, Validation Size: {len(testset)}')\n trset = DataLoader(trset, batch_size, shuffle=True)\n testset = DataLoader(testset, batch_size, shuffle=False)\n model,optimizer = model_optimizer()\n tr = Trainer(\n trset,\n testset,\n batch_size,\n n_epochs,\n model,\n optimizer,\n loss,\n f'{name}_{fold}',\n device,\n deterministic,\n parallel\n )\n\n tr.run()\n\n pred, target = tr.predict()\n all_pred[i:i+pred.shape[0]] = pred\n all_targets[i:i+target.shape[0]] = target\n i += target.shape[0]\n\n prec, recall, auc = get_metrics(target, pred)\n print(f'AUC: {auc:.4f}, precession: {prec:.4f}, Recall: {recall:.4f}')\n f.write(f'AUC: {auc:.4f}, precession: {prec:.4f}, Recall: {recall:.4f}\\n')\n\n del tr\n\n\n matches = calc_accuracy(all_pred, all_targets)\n acc = matches.float().mean()\n all_pred = all_pred.numpy()\n all_targets = all_targets.numpy()\n\n prec, recall, auc = get_metrics(all_targets, all_pred)\n print(f'accuray: {acc:.4f}, AUC: {auc:.4f}, precision: {prec:.4f}, Recall: {recall:.4f}')\n f.write(f'accuray: {acc}, AUC: {auc}, precision: {prec}, Recall: {recall}')\n result = {'all_pred': all_pred, 'all_targets': all_targets}\n T.save(result, path.join('results',f'{name}_result'))\n f.close()\n\n\n\n" ]
[ [ "torch.cuda.manual_seed", "numpy.random.seed", "torch.zeros", "torch.manual_seed", "torch.utils.data.DataLoader", "sklearn.metrics.roc_curve", "sklearn.metrics.precision_recall_fscore_support", "sklearn.metrics.auc" ] ]
TrueOctopus/classifierBack
[ "5ebe20b8bd18aa74bbeb229403963b331c5013c2" ]
[ "textRank/textrank4zh/util.py" ]
[ "import os\nimport math\nimport networkx as nx\nimport numpy as np\n\nsentence_delimiters = ['?', '!', ';', '?', '!', '。', ';', '……', '…', '\\n']\nallow_speech_tags = ['an', 'i', 'j', 'l', 'n', 'nr', 'nrfg', 'ns', 'nt', 'nz', 't', 't', 'vd', 'vn', 'eng']\n\ntext_type = str\nstring_types = (str,)\nxrange = range\n\n\ndef as_text(t):\n if t is None:\n return None\n elif isinstance(t, bytes):\n return t.decode('utf-8', errors='ignore')\n elif isinstance(t, str):\n return t\n else:\n raise ValueError('Unknown type %r' % type(t))\n\n\ndef is_text(t):\n return isinstance(t, text_type)\n\n\n__DEBUG = None\n\n\ndef debug(*args):\n global __DEBUG\n if __DEBUG is None:\n try:\n if os.environ['DEBUG'] == '1':\n __DEBUG = True\n else:\n __DEBUG = False\n except:\n __DEBUG = False\n if __DEBUG:\n print(' '.join([str(arg) for arg in args]))\n\n\nclass AttrDict(dict):\n \"\"\"Dict that can get attribute by dot\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self\n\n\ndef combine(word_list, window=2):\n \"\"\"构造在window下的单词组合,用来构造单词之间的边。\n \n Keyword arguments:\n word_list -- list of str, 由单词组成的列表。\n windows -- int, 窗口大小。\n \"\"\"\n if window < 2: window = 2\n for x in xrange(1, window):\n if x >= len(word_list):\n break\n word_list2 = word_list[x:]\n res = zip(word_list, word_list2)\n for r in res:\n yield r\n\n\ndef get_similarity(word_list1, word_list2):\n \"\"\"默认的用于计算两个句子相似度的函数。\n\n Keyword arguments:\n word_list1, word_list2 -- 分别代表两个句子,都是由单词组成的列表\n \"\"\"\n words = list(set(word_list1 + word_list2))\n vector1 = [float(word_list1.count(word)) for word in words]\n vector2 = [float(word_list2.count(word)) for word in words]\n\n vector3 = [vector1[x] * vector2[x] for x in xrange(len(vector1))]\n vector4 = [1 for num in vector3 if num > 0.]\n co_occur_num = sum(vector4)\n\n if abs(co_occur_num) <= 1e-12:\n return 0.\n\n denominator = math.log(float(len(word_list1))) + math.log(float(len(word_list2))) # 分母\n\n if abs(denominator) < 1e-12:\n return 0.\n\n return co_occur_num / denominator\n\n\ndef sort_words(vertex_source, edge_source, window=2, pagerank_config={'alpha': 0.85, }):\n \"\"\"将单词按关键程度从大到小排序\n\n Keyword arguments:\n vertex_source -- 二维列表,子列表代表句子,子列表的元素是单词,这些单词用来构造pagerank中的节点\n edge_source -- 二维列表,子列表代表句子,子列表的元素是单词,根据单词位置关系构造pagerank中的边\n window -- 一个句子中相邻的window个单词,两两之间认为有边\n pagerank_config -- pagerank的设置\n \"\"\"\n sorted_words = []\n word_index = {}\n index_word = {}\n _vertex_source = vertex_source\n _edge_source = edge_source\n words_number = 0\n for word_list in _vertex_source:\n for word in word_list:\n if not word in word_index:\n word_index[word] = words_number\n index_word[words_number] = word\n words_number += 1\n\n graph = np.zeros((words_number, words_number))\n\n for word_list in _edge_source:\n for w1, w2 in combine(word_list, window):\n if w1 in word_index and w2 in word_index:\n index1 = word_index[w1]\n index2 = word_index[w2]\n graph[index1][index2] = 1.0\n graph[index2][index1] = 1.0\n\n debug('graph:\\n', graph)\n\n nx_graph = nx.from_numpy_matrix(graph)\n scores = nx.pagerank(nx_graph, **pagerank_config) # this is a dict\n sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True)\n for index, score in sorted_scores:\n item = AttrDict(word=index_word[index], weight=score)\n sorted_words.append(item)\n\n return sorted_words\n\n\ndef sort_sentences(sentences, words, sim_func=get_similarity, pagerank_config={'alpha': 0.85, }):\n \"\"\"将句子按照关键程度从大到小排序\n\n Keyword arguments:\n sentences -- 列表,元素是句子\n words -- 二维列表,子列表和sentences中的句子对应,子列表由单词组成\n sim_func -- 计算两个句子的相似性,参数是两个由单词组成的列表\n pagerank_config -- pagerank的设置\n \"\"\"\n sorted_sentences = []\n _source = words\n sentences_num = len(_source)\n graph = np.zeros((sentences_num, sentences_num))\n\n for x in xrange(sentences_num):\n for y in xrange(x, sentences_num):\n similarity = sim_func(_source[x], _source[y])\n graph[x, y] = similarity\n graph[y, x] = similarity\n\n nx_graph = nx.from_numpy_matrix(graph)\n scores = nx.pagerank(nx_graph, **pagerank_config) # this is a dict\n sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True)\n\n for index, score in sorted_scores:\n item = AttrDict(index=index, sentence=sentences[index], weight=score)\n sorted_sentences.append(item)\n\n return sorted_sentences\n\n\nif __name__ == '__main__':\n pass\n" ]
[ [ "numpy.zeros" ] ]
istepanov/CarND-Capstone
[ "b5ead07b7a8f513208418be178c3efad70bc138b" ]
[ "ros/src/tl_detector/tl_detector.py" ]
[ "#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nimport tf\nimport cv2\nimport yaml\n\nfrom scipy.spatial import KDTree\n\n\nSTATE_COUNT_THRESHOLD = 2\nCLASSIFY_EVERY_NTH_IMAGE = 4 # due to performance issues, classify every Nth image\n\nclass TLDetector(object):\n def __init__(self):\n rospy.init_node('tl_detector')\n\n self.image_cb_calls_count = -1\n\n self.pose = None\n self.waypoints = None\n self.camera_image = None\n self.lights = []\n\n self.waypoints_2d = None\n self.waypoint_tree = None\n\n\n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n '''\n /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and\n helps you acquire an accurate ground truth data source for the traffic light\n classifier by sending the current color state of all traffic lights in the\n simulator. When testing on the vehicle, the color state will not be available. You'll need to\n rely on the position of the light and the camera image to predict it.\n '''\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)\n\n self.bridge = CvBridge()\n self.light_classifier = TLClassifier()\n self.listener = tf.TransformListener()\n\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.last_wp = -1\n self.state_count = 0\n\n rospy.spin()\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints\n if not self.waypoints_2d:\n self.waypoints_2d = [[waypoint.pose.pose.position.x,waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]\n self.waypoint_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n def image_cb(self, msg):\n \"\"\"Identifies red lights in the incoming camera image and publishes the index\n of the waypoint closest to the red light's stop line to /traffic_waypoint\n Args:\n msg (Image): image from car-mounted camera\n \"\"\"\n\n # due to performance issues, classify every Nth image\n self.image_cb_calls_count += 1\n if self.image_cb_calls_count % CLASSIFY_EVERY_NTH_IMAGE == 0:\n self.image_cb_calls_count = 0\n else:\n return\n\n self.has_image = True\n self.camera_image = msg\n light_wp, state = self.process_traffic_lights()\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n if self.state != state:\n self.state_count = 0\n self.state = state\n self.image_cb_calls_count = -1 # state change detected, don't skip next image\n elif self.state_count >= STATE_COUNT_THRESHOLD:\n self.last_state = self.state\n light_wp = light_wp if state == TrafficLight.RED else -1\n self.last_wp = light_wp\n self.upcoming_red_light_pub.publish(Int32(light_wp))\n else:\n self.upcoming_red_light_pub.publish(Int32(self.last_wp))\n self.state_count += 1\n\n def get_closest_waypoint(self, x, y):\n \"\"\"Identifies the closest path waypoint to the given position\n https://en.wikipedia.org/wiki/Closest_pair_of_points_problem\n Args:\n pose (Pose): position to match a waypoint to\n Returns:\n int: index of the closest waypoint in self.waypoints\n \"\"\"\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n return closest_idx\n\n def get_light_state(self, light):\n \"\"\"Determines the current color of the traffic light\n Args:\n light (TrafficLight): light to classify\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n \"\"\"\n\n # uncomment to disable the classifier\n # return light.state\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n # Get classification\n return self.light_classifier.get_classification(cv_image)\n\n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n Returns:\n int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n \"\"\"\n closest_light = None\n line_wp_idx = None\n\n # List of positions that correspond to the line to stop in front of for a given intersection\n stop_line_positions = self.config['stop_line_positions']\n if self.pose:\n car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)\n\n #TODO find the closest visible traffic light (if one exists)\n diff = len(self.waypoints.waypoints)\n for i, light in enumerate(self.lights):\n #Get the stop line waypoint index\n line = stop_line_positions[i]\n temp_wp_idx = self.get_closest_waypoint(line[0],line[1])\n # Find closest stop line waypoint index\n d = temp_wp_idx - car_wp_idx\n if d>= 0 and d< diff:\n diff = d\n closest_light = light\n line_wp_idx = temp_wp_idx\n\n if closest_light:\n state = self.get_light_state(closest_light)\n return line_wp_idx, state\n\n return -1, TrafficLight.UNKNOWN\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n" ]
[ [ "scipy.spatial.KDTree" ] ]
tommyod/paretoset
[ "1744f1ece7ac188570c35d585c364a9263cf3852" ]
[ "paretoset/algorithms_numpy.py" ]
[ "import numpy as np\nimport paretoset.user_interface # Import like this to avoid circular import issues\n\n\ndef paretoset_naive(costs, distinct=True):\n \"\"\"Naive implementation.\n\n Parameters\n ----------\n costs : (np.ndarray) Array of shape (n_costs, n_objectives).\n\n Returns\n -------\n mask : (np.ndarray) Boolean array indicating the paretoset.\n\n \"\"\"\n n_costs, n_objectives = costs.shape\n\n # Assume all points/costs are inefficient\n is_efficient = np.zeros(n_costs, dtype=np.bool_)\n\n for i in range(n_costs):\n this_cost = costs[i, :]\n\n # Here `NOT(ANY(a_i > b_i))` is equal to ALL(a_i <= b_i), but faster.\n at_least_as_good = np.logical_not(np.any(costs > this_cost, axis=1))\n any_better = np.any(costs < this_cost, axis=1)\n\n dominated_by = np.logical_and(at_least_as_good, any_better)\n\n # If we're looking for distinct points and it's already in the\n # pareto set, disregard this value.\n if distinct and np.any(is_efficient):\n if np.any(np.all(costs[is_efficient] == this_cost, axis=1)):\n continue\n\n if not (np.any(dominated_by[:i]) or np.any(dominated_by[i + 1 :])):\n is_efficient[i] = True\n\n return is_efficient\n\n\ndef paretoset_efficient(costs, distinct=True):\n \"\"\"An efficient vectorized algorhtm.\n\n This algorithm was given by Peter in this answer on Stack Overflow:\n - https://stackoverflow.com/questions/32791911/fast-calculation-of-pareto-front-in-python\n \"\"\"\n costs = costs.copy() # The algorithm mutates the `costs` variable, so we take a copy\n n_costs, n_objectives = costs.shape\n\n is_efficient = np.arange(n_costs)\n\n next_point_index = 0 # Next index in the is_efficient array to search for\n while next_point_index < len(costs):\n\n this_cost = costs[next_point_index]\n\n # Two points `a` and `b` are *incomparable* if neither dom(a, b) nor dom(b, a).\n # Points that are incomparable to `this_cost`, or dominate `this_cost`.\n # In 2D, these points are below or to the left of `this_cost`.\n current_efficient_points = np.any(costs < this_cost, axis=1)\n\n # If we're not looking for distinct, keep points equal to this cost\n if not distinct:\n no_smaller = np.logical_not(current_efficient_points)\n equal_to_this_cost = np.all(costs[no_smaller] == this_cost, axis=1)\n current_efficient_points[no_smaller] = np.logical_or(\n current_efficient_points[no_smaller], equal_to_this_cost\n )\n\n # Any point is incomparable to itself, so keep this point\n current_efficient_points[next_point_index] = True\n\n # Remove dominated points\n is_efficient = is_efficient[current_efficient_points]\n costs = costs[current_efficient_points]\n\n # Re-adjust the index\n next_point_index = np.sum(current_efficient_points[:next_point_index]) + 1\n\n # Create a boolean mask from indices and return it\n is_efficient_mask = np.zeros(n_costs, dtype=np.bool_)\n is_efficient_mask[is_efficient] = True\n return is_efficient_mask\n\n\ndef pareto_rank_naive(costs, distinct=True, use_numba=True):\n \"\"\"Naive implementation of Pareto ranks.\"\"\"\n\n n_costs, n_objectives = costs.shape\n ranks = np.zeros(n_costs, dtype=np.int_)\n remaining = np.ones(n_costs, dtype=np.bool_)\n processed = np.zeros(n_costs, dtype=np.bool_)\n\n current_rank = 1\n while np.sum(remaining) > 0:\n\n # Mark the costs that have rank `i`\n frontier_mask = paretoset.user_interface.paretoset(costs[remaining], distinct=distinct, use_numba=use_numba)\n\n # Processed costs in this iteration (not processed already, and in the frontier)\n processed[np.logical_not(processed)] = frontier_mask\n\n # Set the rank of the processed costs that are still remaining (remaining not updated yet)\n ranks[np.logical_and(processed, remaining)] = current_rank\n\n # Update the remaining values\n remaining[remaining] = np.logical_not(frontier_mask)\n current_rank += 1\n\n return ranks\n\n\ndef crowding_distance(costs):\n \"\"\"Compute the crowding distance of (non-NaN) numerical data.\n\n The input data in `costs` must be a NumPy ndarray of shape\n (observations, objectives). The user is responsible for dealing with NaN\n values, duplicate rows and constant columns *before* calling this function.\n\n Parameters\n ----------\n costs : np.ndarray\n Numerical data of shape (observations, objectives).\n\n Returns\n -------\n distances : np.ndarray\n The crowding distance of each observation (row).\n\n Examples\n --------\n >>> import numpy as np\n >>> costs = np.array([1, 3, 5, 9, 11]).reshape(-1, 1)\n >>> crowding_distance(costs)\n array([inf, 0.4, 0.6, 0.6, inf])\n \"\"\"\n msg = \"The `costs` argument must be np.ndarray of shape (observations, objectives)\"\n if not isinstance(costs, np.ndarray):\n raise ValueError(msg)\n if not costs.ndim == 2:\n raise ValueError(msg)\n\n # =============================================================================\n # SETUP ARRAYS USED FOR COMPUTATION\n # =============================================================================\n\n # Get the shape of the inputs\n n_costs, n_objectives = costs.shape\n if n_costs <= 2:\n return np.ones(n_costs) * np.inf\n\n # Prepare the distance matrix\n distances = np.zeros_like(costs, dtype=np.float) # Must use floats to allow np.inf\n\n # Used several times below, so pre-compute it here\n arange_objectives = np.arange(n_objectives)\n\n # Used to inverse the sort\n sorted_inds_reversed = np.zeros(costs.shape, dtype=np.int)\n\n # =============================================================================\n # PERFORM THE COMPUTATION\n # =============================================================================\n\n # Sort every column individually\n sorted_inds = np.argsort(costs, axis=0)\n sorted_matrix = np.take_along_axis(costs, sorted_inds, axis=0)\n\n # Compute the diff of a_i as a_{i+1} - a_{i-1}\n diffs = sorted_matrix[2:, :] - sorted_matrix[:-2, :]\n\n # Compute max minus min for each column, using the already sorted matrix\n max_minus_min = sorted_matrix[-1, :] - sorted_matrix[0, :]\n\n # Bottom and top rows are set to infinity - these points have 1 neighbor\n infinity_row = (np.ones(n_objectives) * np.inf).reshape(1, -1)\n\n # Create the full matrix of differences\n diffs = np.vstack((infinity_row, diffs, infinity_row))\n assert diffs.shape == costs.shape\n\n # Prepare a matrix of reverse-sorted indices\n index = sorted_inds, arange_objectives\n\n # Equivalent to: np.outer(np.arange(n_costs), np.ones(n_objectives))\n sorted_inds_reversed[index] = np.tile(np.arange(n_costs), (n_objectives, 1)).T\n\n # Distances (diffs) in original data order\n distances = diffs[sorted_inds_reversed, arange_objectives]\n\n # Divide each column by MAX minus MIN to normalize\n distances = np.divide(distances, max_minus_min, out=distances)\n\n # Sum over each row, divide by number of columns and return\n return distances.sum(axis=1) / n_objectives\n\n\nif __name__ == \"__main__\":\n import pytest\n\n pytest.main(args=[\".\", \"--doctest-modules\", \"--color\", \"yes\"])\n\nif __name__ == \"__main__\":\n\n pass\n" ]
[ [ "numpy.take_along_axis", "numpy.logical_not", "numpy.arange", "numpy.divide", "numpy.ones", "numpy.all", "numpy.logical_or", "numpy.zeros_like", "numpy.any", "numpy.argsort", "numpy.logical_and", "numpy.zeros", "numpy.sum", "numpy.vstack" ] ]
rafikk/data
[ "ee6de10a53922df0257c0b6e395a05cc337b0b64" ]
[ "scripts/us_census/geojsons_low_res/plotter.py" ]
[ "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nPlots and compares two Geojson files: before and after running simplify.py.\n\n Typical usage:\n python3 plotter.py --original_path original-data/geoId-01.geojson\n --simplified_path simplified-data/geoId-01-simple.geojson\n\"\"\"\n\nfrom absl import flags\nfrom absl import app\nimport geopandas as gpd\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\n_, (ax1, ax2) = plt.subplots(ncols=2, sharex=True, sharey=True)\n\n\ndef compare_plots(geojson1, geojson2, show=True):\n \"\"\"Compares geopandas objects by plotting them side by side.\n\n Args:\n geojson1: A geopandas GeoDataFrame of the first object to compare.\n geojson2: A geopandas GeoDataFrame of the second object to compare.\n show: If True, it shows the plot after it is created. If False, it does\n not automatically show, waiting for the user to explicitly call\n plt.show(). This allows multiple GeoDataFrames to be compared at\n once.\n \"\"\"\n if show:\n _, (new_ax1, new_ax2) = plt.subplots(ncols=2, sharex=True, sharey=True)\n f1 = geojson1.plot(ax=new_ax1)\n f2 = geojson2.plot(ax=new_ax2)\n f1.set_title('Original.')\n f2.set_title('Simplified.')\n plt.show()\n else:\n f1 = geojson1.plot(ax=ax1)\n f2 = geojson2.plot(ax=ax2)\n f1.set_title('Original.')\n f2.set_title('Simplified.')\n\n\ndef main(_):\n original = gpd.read_file(FLAGS.original_path)\n simple = gpd.read_file(FLAGS.simplified_path)\n compare_plots(original, simple)\n\n\nif __name__ == '__main__':\n FLAGS = flags.FLAGS\n flags.DEFINE_string('original_path',\n default=None,\n help='Path to original geojson to be compared.')\n flags.DEFINE_string('simplified_path',\n default=None,\n help='Path to simplified geojson to be compared.')\n flags.mark_flag_as_required('original_path')\n flags.mark_flag_as_required('simplified_path')\n app.run(main)\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
laloc2496/cdn_configuration_optimization
[ "58cf2278456d0ef8796570f12f1d00fd68aec686" ]
[ "nsga2.py" ]
[ "import os, sys\nos.environ['OMP_NUM_THREADS'] = '1' # speed up\nsys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))\n\nfrom argparse import ArgumentParser\nimport numpy as np\nfrom time import time\nimport yaml\nfrom multiprocessing import cpu_count\nfrom pymoo.algorithms.nsga2 import NSGA2\nfrom pymoo.optimize import minimize\nfrom pymoo.factory import get_performance_indicator\nfrom pymoo.util.nds.non_dominated_sorting import NonDominatedSorting\nfrom pymoo.operators.sampling.random_sampling import FloatRandomSampling\nfrom pymoo.operators.sampling.latin_hypercube_sampling import LatinHypercubeSampling\nfrom MMO.problems.common import build_problem\nfrom MMO.external import lhs\nfrom MMO.baselines.data_export import DataExport\nfrom MMO.mobo.utils import calc_hypervolume\nfrom MMO.utils import get_result_dir, setup_logger\nfrom src.util.utils import *\nimport random, string\nfrom src.net.topology import NetTopology\nfrom pymoo.factory import get_mutation, get_crossover, get_sampling\n\n\ndef get_args():\n parser = ArgumentParser()\n\n parser.add_argument('--problem', type=str, default='cdn_ram', \n help='optimization problem')\n parser.add_argument('--n-var', type=int, default=13, \n help='number of design variables')\n parser.add_argument('--n-obj', type=int, default=2, \n help='number of objectives')\n parser.add_argument('--n-init-sample', type=int, default=100, \n help='number of initial design samples')\n parser.add_argument('--n-iter', type=int, default=30, \n help='number of optimization iterations')\n parser.add_argument('--ref-point', type=float, nargs='+', default=None, \n help='reference point for calculating hypervolume')\n parser.add_argument('--batch-size', type=int, default=10, \n help='size of the evaluated batch in one iteration')\n parser.add_argument('--pop-init-method', type=str, choices=['nds', 'random', 'lhs'], default='nds', \n help='method to init population')\n \n parser.add_argument('--seed', type=int, default=0, \n help='random seed')\n parser.add_argument('--subfolder', type=str, default='default',\n help='subfolder name for storing results, directly store under result/ as default')\n parser.add_argument('--exp-name', type=str, default=None,\n help='custom experiment name to distinguish between experiments on same problem and same algorithm')\n parser.add_argument('--log-to-file', default=False, action='store_true',\n help='log output to file rather than print by stdout')\n parser.add_argument('--n-process', type=int, default=cpu_count(),\n help='number of processes to be used for parallelization')\n\n args = parser.parse_args()\n return args\n\n\ndef get_ref_point(problem, n_var=6, n_obj=2, n_init_sample=50, seed=0, n_process=cpu_count()):\n\n np.random.seed(seed)\n jsonFile = \"/home/picarib_home/cdnet/config/json/france_cdn.json\"\n configDirPath = \"/home/picarib_home/cdnet/config/france_cdn/\"\n dataPath = \"/home/picarib_home/cdnet/data/\"\n \n deleteCachePath = \"/home/picarib_home/cdn_configuration_optimization/tmp/france_cdn\" \n config = loadJSON(jsonFile)\n interval = 1 if \"custom\" not in config[\"RequestModels\"] else config[\"RequestModels\"][\"custom\"][\"interval\"]\n isLoadRTable = config[\"isLoadRTable\"]\n isLoadSeparatorRank = config[\"isLoadSeparatorRank\"]\n mode = config[\"RoutingMode\"] # [no-cache, no-color, tag-color, full-color]\n fileSize = config[\"FileSize\"]\n runReqNums = config[\"RunReqNums\"] if \"RunReqNums\" in config else -1\n warmUpReqNums = config[\"WarmUpReqNums\"] if \"WarmUpReqNums\" in config else -1\n colorNums = config[\"colorNums\"]\n separatorRankIncrement = config[\"separatorRankIncrement\"]\n \n colorList = [''.join(random.choices(string.ascii_uppercase + string.digits, k=6)) for i in range(colorNums)]\n \n topo = NetTopology(config, configDirPath, mode, warmUpReqNums, fileSize, colorList)\n topo.build()\n \n extra_params = (topo, fileSize, mode, colorList, runReqNums, warmUpReqNums, separatorRankIncrement, deleteCachePath)\n _, _, Y_init = build_problem(problem, n_var, n_obj, n_init_sample, n_process, extra_params=extra_params)\n\n return np.max(Y_init, axis=0)\n\n\ndef save_args(args):\n '''\n Save arguments to yaml file\n '''\n result_dir = get_result_dir(args)\n args_path = os.path.join(result_dir, 'args.yml')\n os.makedirs(os.path.dirname(args_path), exist_ok=True)\n with open(args_path, 'w') as f:\n yaml.dump(args, f, default_flow_style=False, sort_keys=False)\n\n\ndef main():\n # get argument values\n args = get_args()\n # get reference point\n if args.ref_point is None:\n args.ref_point = get_ref_point(args.problem, args.n_var, args.n_obj, args.n_init_sample)\n\n t0 = time()\n\n # set seed\n np.random.seed(args.seed)\n\n # build problem, get initial samples\n jsonFile = \"/home/picarib_home/cdnet/config/json/france_cdn.json\"\n configDirPath = \"/home/picarib_home/cdnet/config/france_cdn/\"\n dataPath = \"/home/picarib_home/cdnet/data/\"\n deleteCachePath = \"/home/picarib_home/cdn_configuration_optimization/tmp/france_cdn\" \n \n config = loadJSON(jsonFile)\n interval = 1 if \"custom\" not in config[\"RequestModels\"] else config[\"RequestModels\"][\"custom\"][\"interval\"]\n isLoadRTable = config[\"isLoadRTable\"]\n isLoadSeparatorRank = config[\"isLoadSeparatorRank\"]\n mode = config[\"RoutingMode\"] # [no-cache, no-color, tag-color, full-color]\n fileSize = config[\"FileSize\"]\n runReqNums = config[\"RunReqNums\"] if \"RunReqNums\" in config else -1\n warmUpReqNums = config[\"WarmUpReqNums\"] if \"WarmUpReqNums\" in config else -1\n colorNums = config[\"colorNums\"]\n separatorRankIncrement = config[\"separatorRankIncrement\"]\n \n colorList = [''.join(random.choices(string.ascii_uppercase + string.digits, k=6)) for i in range(colorNums)]\n \n topo = NetTopology(config, configDirPath, mode, warmUpReqNums, fileSize, colorList)\n topo.build()\n \n extra_params = (topo, fileSize, mode, colorList, runReqNums, warmUpReqNums, separatorRankIncrement, deleteCachePath)\n problem, X_init, Y_init = build_problem(args.problem, args.n_var, args.n_obj, args.n_init_sample, args.n_process, extra_params=extra_params)\n args.n_var, args.n_obj, args.algo = problem.n_var, problem.n_obj, 'nsga2'\n\n # save arguments and setup logger\n save_args(args)\n logger = setup_logger(args)\n print(problem)\n\n # initialize data exporter\n exporter = DataExport(X_init, Y_init, args)\n\n # initialize population\n if args.pop_init_method == 'lhs':\n sampling = LatinHypercubeSampling()\n elif args.pop_init_method == 'nds':\n sorted_indices = NonDominatedSorting().do(Y_init)\n sampling = X_init[np.concatenate(sorted_indices)][:args.batch_size]\n if len(sampling) < args.batch_size:\n rest_sampling = lhs(X_init.shape[1], args.batch_size - len(sampling))\n sampling = np.vstack([sampling, rest_sampling])\n elif args.pop_init_method == 'random':\n sampling = FloatRandomSampling()\n else:\n raise NotImplementedError\n \n \n \n # initialize evolutionary algorithm\n ea_algorithm = NSGA2(pop_size=args.batch_size, sampling=sampling, crossover=get_crossover(\"int_one_point\"),\n mutation=get_mutation(\"int_pm\", prob=1.0/13))\n\n # find Pareto front\n res = minimize(problem, ea_algorithm, ('n_gen', args.n_iter), save_history=True)\n X_history = np.array([algo.pop.get('X') for algo in res.history])\n Y_history = np.array([algo.pop.get('F') for algo in res.history])\n\n # update data exporter\n for X_next, Y_next in zip(X_history, Y_history):\n exporter.update(X_next, Y_next)\n\n # export all result to csv\n exporter.write_csvs()\n\n # statistics\n final_hv = calc_hypervolume(exporter.Y, exporter.ref_point)\n print('========== Result ==========')\n print('Total runtime: %.2fs' % (time() - t0))\n print('Total evaluations: %d, hypervolume: %.4f\\n' % (args.batch_size * args.n_iter, final_hv))\n\n # close logger\n if logger is not None:\n logger.close()\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.concatenate", "numpy.max", "numpy.vstack", "numpy.random.seed" ] ]
microsoft/fnl_paper
[ "a93a8081dab472fb53effd26a384e28670383485" ]
[ "deficient-efficient/models/blocks.py" ]
[ "# blocks and convolution definitions\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\nfrom torch.utils.checkpoint import checkpoint, checkpoint_sequential\n\nif __name__ == 'blocks' or __name__ == '__main__':\n from hashed import HashedConv2d, HalfHashedSeparable, HashedSeparable\n from decomposed import TensorTrain, Tucker, CP\nelse:\n from .hashed import HashedConv2d, HalfHashedSeparable, HashedSeparable\n from .decomposed import TensorTrain, Tucker, CP\n\ndef HashedDecimate(in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=False):\n # Hashed Conv2d using 1/10 the original parameters\n original_params = out_channels*in_channels*kernel_size*kernel_size // groups\n budget = original_params//10\n return HashedConv2d(in_channels, out_channels, kernel_size, budget,\n stride=stride, padding=padding, dilation=dilation, groups=groups,\n bias=bias)\n\ndef SepHashedDecimate(in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=False):\n # Hashed Conv2d using 1/10 the original parameters\n assert groups == 1\n original_params = out_channels*in_channels*kernel_size*kernel_size\n budget = original_params//10\n conv = HalfHashedSeparable(in_channels, out_channels, kernel_size,\n budget, stride=stride, padding=padding, dilation=dilation,\n groups=groups, bias=bias)\n n_params = sum([p.numel() for p in conv.parameters()])\n budget = budget + conv.hashed.bias.numel()\n assert n_params <= budget, f\"{n_params} > {budget}\"\n return conv\n\n\ntry:\n from pytorch_acdc.layers import FastStackedConvACDC\nexcept ImportError:\n print(\"Failed to import ACDC\")\n\n\ndef ACDC(in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=False):\n return FastStackedConvACDC(in_channels, out_channels, kernel_size, 12,\n stride=stride, padding=padding, dilation=dilation, groups=groups,\n bias=bias)\n\ndef OriginalACDC(in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=False):\n return FastStackedConvACDC(in_channels, out_channels, kernel_size, 12,\n stride=stride, padding=padding, dilation=dilation, groups=groups,\n bias=bias, original=True)\n\n\nclass GenericLowRank(nn.Module):\n \"\"\"A generic low rank layer implemented with a linear bottleneck, using two\n Conv2ds in sequence. Preceded by a depthwise grouped convolution in keeping\n with the other low-rank layers here.\"\"\"\n def __init__(self, in_channels, out_channels, kernel_size, rank, stride=1,\n padding=0, dilation=1, groups=1, bias=False):\n assert groups == 1\n super(GenericLowRank, self).__init__()\n if kernel_size > 1:\n self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size,\n stride=stride, padding=padding, dilation=dilation,\n groups=in_channels, bias=False)\n self.lowrank_contract = nn.Conv2d(in_channels, rank, 1, bias=False)\n self.lowrank_expand = nn.Conv2d(rank, out_channels, 1, bias=bias)\n else:\n self.grouped = None\n self.lowrank_contract = nn.Conv2d(in_channels, rank, 1, stride=stride,\n dilation=dilation, bias=False)\n self.lowrank_expand = nn.Conv2d(rank, out_channels, 1, bias=bias)\n\n def forward(self, x):\n if self.grouped is not None:\n x = self.grouped(x)\n x = self.lowrank_contract(x)\n return self.lowrank_expand(x)\n\n\nclass LowRank(nn.Module):\n \"\"\"A generic low rank layer implemented with a linear bottleneck, using two\n Conv2ds in sequence. Preceded by a depthwise grouped convolution in keeping\n with the other low-rank layers here.\"\"\"\n def __init__(self, in_channels, out_channels, kernel_size, rank, stride=1,\n padding=0, dilation=1, groups=1, bias=False):\n assert groups == 1\n assert out_channels%in_channels == 0\n self.upsample = out_channels//in_channels\n super(LowRank, self).__init__()\n if kernel_size > 1:\n self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size,\n stride=stride, padding=padding, dilation=dilation,\n groups=in_channels, bias=False)\n self.lowrank = nn.Conv2d(self.upsample*in_channels, rank, 1,\n bias=bias)\n else:\n self.grouped = None\n self.lowrank = nn.Conv2d(self.upsample*in_channels, rank, 1,\n stride=stride, dilation=dilation, bias=bias)\n\n def forward(self, x):\n if self.grouped is not None:\n x = self.grouped(x)\n if self.upsample > 1:\n x = x.repeat(1,self.upsample,1,1)\n x = F.conv2d(x, self.lowrank.weight, None, self.lowrank.stride,\n self.lowrank.padding, self.lowrank.dilation,\n self.lowrank.groups)\n return F.conv2d(x, self.lowrank.weight.permute(1,0,2,3),\n self.lowrank.bias)\n\n\n\n# from: https://github.com/kuangliu/pytorch-cifar/blob/master/models/shufflenet.py#L10-L19\nclass ShuffleBlock(nn.Module):\n def __init__(self, groups):\n super(ShuffleBlock, self).__init__()\n self.groups = groups\n\n def forward(self, x):\n '''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''\n N,C,H,W = x.size()\n g = self.groups\n return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).contiguous().view(N,C,H,W)\n\n\nclass LinearShuffleNet(nn.Module):\n \"\"\"Linear version of the ShuffleNet block, minus the shortcut connection,\n as we assume relevant shortcuts already exist in the network having a\n substitution. When linear, this can be viewed as a low-rank tensor\n decomposition.\"\"\"\n def __init__(self, in_channels, out_channels, kernel_size, shuffle_groups,\n stride=1, padding=0, dilation=1, groups=1, bias=False):\n assert groups == 1\n super(LinearShuffleNet, self).__init__()\n # why 4? https://github.com/jaxony/ShuffleNet/blob/master/model.py#L67\n bottleneck_channels = out_channels // 4\n self.shuffle_gconv1 = nn.Conv2d(in_channels, bottleneck_channels, 1,\n groups=shuffle_groups, bias=False)\n self.shuffle = ShuffleBlock(shuffle_groups)\n self.shuffle_dwconv = nn.Conv2d(bottleneck_channels, bottleneck_channels,\n kernel_size, stride=stride, padding=padding, dilation=dilation,\n groups=bottleneck_channels, bias=False)\n self.shuffle_gconv2 = nn.Conv2d(bottleneck_channels, out_channels, 1,\n groups=shuffle_groups, bias=bias)\n\n def forward(self, x):\n x = self.shuffle_gconv1(x)\n x = self.shuffle(x)\n x = self.shuffle_dwconv(x)\n return self.shuffle_gconv2(x)\n\n\ndef cant_be_shuffled(shuffle_groups, in_channels, out_channels):\n # utility function, true if we can't instance shufflenet block using this\n divides_in = in_channels%shuffle_groups == 0\n divides_out = out_channels%shuffle_groups == 0\n divides_bottleneck = (out_channels//4)%shuffle_groups == 0\n return not (divides_in and divides_out and divides_bottleneck)\n\n\nclass DepthwiseSep(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=True):\n\n super(DepthwiseSep, self).__init__()\n assert groups == 1\n if kernel_size > 1:\n self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size,\n stride=stride, padding=padding, dilation=dilation,\n groups=in_channels, bias=False)\n self.pointwise = nn.Conv2d(in_channels, out_channels, 1, bias=bias)\n else:\n self.pointwise = nn.Conv2d(in_channels, out_channels, 1,\n stride=stride, padding=padding, dilation=dilation,\n bias=bias)\n\n def forward(self, x):\n if hasattr(self, 'grouped'):\n out = self.grouped(x)\n else:\n out = x\n return self.pointwise(out)\n\n\nclass Conv(nn.Module):\n def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=1,\n dilation=1, bias=False):\n super(Conv, self).__init__()\n # Dumb normal conv incorporated into a class\n self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,\n stride=stride, padding=padding, bias=bias, dilation=dilation)\n\n def forward(self, x):\n return self.conv(x)\n\n\ndef conv_function(convtype):\n \n # if convtype contains an underscore, it must have a hyperparam in it\n if \"_\" in convtype:\n convtype, hyperparam = convtype.split(\"_\")\n if convtype == 'ACDC':\n # then hyperparam controls how many layers in each conv\n n_layers = int(round(float(hyperparam)))\n def conv(in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=False):\n return FastStackedConvACDC(in_channels, out_channels,\n kernel_size, n_layers, stride=stride,\n padding=padding, dilation=dilation, groups=groups,\n bias=bias)\n elif convtype == 'Hashed':\n # then hyperparam controls relative budget for each layer\n budget_scale = float(hyperparam)\n def conv(in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=False):\n # Hashed Conv2d using 1/10 the original parameters\n original_params = out_channels*in_channels*kernel_size*kernel_size // groups\n budget = int(original_params*budget_scale)\n return HashedConv2d(in_channels, out_channels, kernel_size,\n budget, stride=stride, padding=padding,\n dilation=dilation, groups=groups, bias=bias)\n elif convtype == 'SepHashed':\n # then hyperparam controls relative budget for each layer\n budget_scale = float(hyperparam)\n def conv(in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=False):\n original_params = out_channels*in_channels // groups\n budget = int(original_params*budget_scale)\n if kernel_size > 1: # budget for a grouped convolution\n budget += in_channels*kernel_size*kernel_size\n return HalfHashedSeparable(in_channels, out_channels, kernel_size,\n budget, stride=stride, padding=padding,\n dilation=dilation, groups=groups, bias=bias)\n elif convtype == 'Generic':\n rank_scale = float(hyperparam)\n def conv(in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=False):\n full_rank = max(in_channels,out_channels)\n rank = int(rank_scale*full_rank)\n return GenericLowRank(in_channels, out_channels, kernel_size,\n rank, stride=stride, padding=padding,\n dilation=dilation, groups=groups, bias=bias)\n elif convtype == 'LR':\n rank_scale = float(hyperparam)\n def conv(in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=False):\n full_rank = max(in_channels,out_channels)\n rank = int(rank_scale*full_rank)\n return LowRank(in_channels, out_channels, kernel_size,\n rank, stride=stride, padding=padding,\n dilation=dilation, groups=groups, bias=bias)\n elif convtype == 'TensorTrain':\n rank_scale = float(hyperparam)\n def conv(in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=False):\n return TensorTrain(in_channels, out_channels, kernel_size,\n rank_scale, 3, stride=stride, padding=padding,\n dilation=dilation, groups=groups, bias=bias)\n elif convtype == 'Tucker':\n rank_scale = float(hyperparam)\n def conv(in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=False):\n return Tucker(in_channels, out_channels, kernel_size,\n rank_scale, 3, stride=stride, padding=padding,\n dilation=dilation, groups=groups, bias=bias)\n elif convtype == 'CP':\n assert False, \"Deprecated\"\n rank_scale = float(hyperparam)\n def conv(in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=False):\n return CP(in_channels, out_channels, kernel_size,\n rank_scale, stride=stride, padding=padding,\n dilation=dilation, groups=groups, bias=bias)\n elif convtype == 'Shuffle':\n def conv(in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=False):\n shuffle_groups = int(hyperparam)\n while cant_be_shuffled(shuffle_groups, in_channels, out_channels):\n shuffle_groups += -1\n return LinearShuffleNet(in_channels, out_channels, kernel_size,\n shuffle_groups, stride=stride, padding=padding,\n dilation=dilation, groups=groups, bias=bias)\n else:\n if convtype == 'Conv':\n conv = Conv\n elif convtype =='ACDC':\n conv = ACDC\n elif convtype =='OriginalACDC':\n conv = OriginalACDC\n elif convtype == 'HashedDecimate':\n conv = HashedDecimate\n elif convtype == 'SepHashedDecimate':\n conv = SepHashedDecimate\n elif convtype == 'Sep':\n conv = DepthwiseSep\n else:\n raise ValueError('Conv \"%s\" not recognised'%convtype)\n return conv\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, in_planes, out_planes, stride, dropRate=0.0, conv=Conv):\n super(BasicBlock, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv1 = conv(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(out_planes)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv2 = conv(out_planes, out_planes, kernel_size=3, stride=1,\n padding=1, bias=False)\n #assert self.conv2.grouped.padding[0] == 1\n self.droprate = dropRate\n self.equalInOut = (in_planes == out_planes)\n self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,\n padding=0, bias=False) or None\n def forward(self, x):\n if not self.equalInOut:\n x = self.relu1(self.bn1(x))\n else:\n out = self.relu1(self.bn1(x))\n out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))\n if self.droprate > 0:\n out = F.dropout(out, p=self.droprate, training=self.training)\n out = self.conv2(out)\n return torch.add(x if self.equalInOut else self.convShortcut(x), out)\n\n\n# modified from torchvision\nclass Bottleneck(nn.Module):\n \"\"\"Bottleneck architecture block for ResNet\"\"\"\n def __init__(self, inplanes, planes, ConvClass, stride=1, downsample=None, expansion=4):\n super(Bottleneck, self).__init__()\n self.expansion = expansion\n pointwise = lambda i,o: ConvClass(i, o, kernel_size=1, padding=0,\n bias=False)\n self.conv1 = pointwise(inplanes, planes)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = ConvClass(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = pointwise(planes, planes * self.expansion)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n \n def add_residual(self, x, out):\n if self.downsample is not None:\n residual = self.downsample(x)\n else:\n residual = x\n return out + residual\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n #out = checkpoint(self.add_residual, x, out)\n out = self.add_residual(x, out)\n out = self.relu(out)\n\n return out\n\n\nclass NetworkBlock(nn.Module):\n def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0, conv = Conv):\n super(NetworkBlock, self).__init__()\n self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate, conv)\n def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate, conv):\n layers = []\n for i in range(nb_layers):\n layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate, conv))\n return nn.Sequential(*layers)\n def forward(self, x):\n return self.layer(x)\n\nif __name__ == '__main__':\n X = torch.randn(5,16,32,32)\n # sanity of generic low-rank layer\n generic = GenericLowRank(16, 32, 3, 2)\n for n,p in generic.named_parameters():\n print(n, p.size(), p.numel())\n out = generic(X)\n print(out.size())\n low = LowRank(16, 32, 3, 2)\n for n, p in low.named_parameters():\n print(n, p.size(), p.numel())\n out = low(X)\n print(out.size())\n assert False\n # check we don't initialise a grouped conv when not required\n layers_to_test = [LowRank(3,32,1,1), GenericLowRank(3,32,1,1),\n HalfHashedSeparable(3,32,1,10), TensorTrain(3,32,1,0.5,3),\n Tucker(3,32,1,0.5,3), CP(3,32,1,0.5,3), ACDC(3,32,1)]\n for layer in layers_to_test:\n assert getattr(layer, 'grouped', None) is None\n # and we *do* when it is required\n layers_to_test = [LowRank(3,32,3,1), GenericLowRank(3,32,3,1),\n HalfHashedSeparable(3,32,3,100), TensorTrain(3,32,3,0.5,3),\n Tucker(3,32,3,0.5,3), CP(3,32,3,0.5,3), ACDC(3,32,3)]\n for layer in layers_to_test:\n assert getattr(layer, 'grouped', None) is not None, layer\n # sanity of LinearShuffleNet\n X = torch.randn(5,16,32,32)\n shuffle = LinearShuffleNet(16,32,3,4)\n print(shuffle(X).size())\n" ]
[ [ "torch.nn.Sequential", "torch.nn.functional.dropout", "torch.randn", "torch.nn.functional.conv2d", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
maglab-uconn/EARShot_TF2
[ "43fcc570b78a80b245d745aebc19f341c7e283fc" ]
[ "earshot-tf2/patterns.py" ]
[ "import os, io, librosa, pickle, json\nimport numpy as np\nfrom random import sample\nimport concurrent.futures\n\nfrom .audio import *\n\nclass PatternGenerator(object):\n '''\n Creates spectrogam/semantic vector pairs, using the parameters in pattern_params,\n and writes them to the directory specifed in the same parameters object.\n\n INPUT:\n pattern_params : parameters.PatternParameters object, required\n see parameters.PatternParameters for information about the various\n training/test pattern option/settings\n '''\n def __init__(self,pattern_params):\n self.pattern_params = pattern_params\n # pronounciation dictionary\n self.assemble_wordlist()\n # creates/reads in/massages semantic vectors\n self.assemble_semantic_vectors()\n # parsing of wav files to get word,talker,pronunciation,file tuples\n self.assemble_wavfile_list()\n # sets (or creates) the directory where the patterns will be dumped\n os.makedirs(self.pattern_params.pattern_path, exist_ok= True)\n\n\n def assemble_wordlist(self):\n '''\n Reads the pronounciation file and creates a word:list of phonemes\n dictionary.\n '''\n self.pron_dict = {}\n with open(self.pattern_params.lexicon, 'r') as f:\n lines = f.readlines()\n for line in lines:\n atoms = line.strip().split()\n self.pron_dict[atoms[0].upper()] = atoms[1].split('.')\n\n\n def assemble_semantic_vectors(self):\n '''\n Creates sparse random vectors (SRVs) for every word in the wordlist,\n or fetches pre-computed vector embeddings (from word2vec, LSI, or whatnot)\n and associates them with the words in the pronounciation dict.\n\n Words in the precomputed vector dictionary are assumed to be strictly lowercase.\n\n If the precomputed vector file is missing words in the model, the function\n raises an error, rather than inserting a random vector of some sort.\n '''\n self.semantic_patterns = {}\n # create new SRVs with the parameters specified in the options class\n if self.pattern_params.semantic_mode == 'SRV':\n # first bit just makes a vector of the desired size with the first num_nonzero\n # elements equal to 1\n master_vec = np.zeros(self.pattern_params.semantic_dict['embedding_dim'])\n master_vec[:self.pattern_params.semantic_dict['num_nonzero']] = 1.0\n # assign a random permutation of the master_vec to each word\n for word in self.pron_dict.keys():\n self.semantic_patterns[word] = np.random.permutation(master_vec)\n # fetch word vectors/output patterns from specified location\n elif self.pattern_params.semantic_mode == 'VEC':\n precomputed_vectors = pickle.load(open(self.pattern_params.semantic_dict['vec_file'],'rb'))\n for word in self.pron_dict.keys():\n # rather than inserting a default value, if we don't have a word vector for\n # a word in the model, we just raise an error and fail\n if word.lower() in precomputed_vectors:\n self.semantic_patterns[word] = precomputed_vectors[word.lower()]\n else:\n raise ValueError('word {} not found in precomputed vectors'.format(word))\n\n\n def assemble_wavfile_list(self):\n '''\n EARSHOT wav files all currently have the format WORD_TALKER.WAV. This\n function recursively walks through the wav_path (there may be talker\n subdirectories) and extracts word/talker pairs to bind to semantic\n vectors.\n '''\n self.wav_files = []\n for root,_,files in os.walk(self.pattern_params.wav_path):\n for file in files:\n base_name,ext = os.path.splitext(file.upper())\n # enforce the rigid file format WORD_TALKER.{wav,WAV}\n if len(base_name.split('_')) != 2 or ext not in ['.wav','.WAV']:\n continue\n word, talker = base_name.split('_')\n # don't bother with words not in the pronunciation dictionary\n if word not in self.pron_dict:\n continue\n self.wav_files.append((word,self.pron_dict[word],talker,os.path.join(root,file).replace('\\\\', '/')))\n\n\n def generate_patterns(self,max_workers=10):\n '''\n Simple wrapper that allows multithreaded pattern generation. (N.B: Wasn't sure\n of any other clean way to move this out of main - KB)\n\n I've added proper exception handling from the executor calls; without this,\n the function silently succeeds no matter what happens because exceptions in the\n child processes are never caught.\n '''\n future_to_item = {}\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n for word,pron,talker,wav_file in self.wav_files:\n future_to_item[executor.submit(self.generate,word,pron,talker,wav_file)] = (word,pron,talker,wav_file)\n for future in concurrent.futures.as_completed(future_to_item):\n word,pron,talker,wav_file = future_to_item[future]\n try:\n data = future.result()\n except:\n print('{},{} pair generated an exception.'.format(word,talker))\n else:\n print('{}:{} pair ({}) processed.'.format(word,talker,wav_file))\n\n\n def generate(self,word,pron,talker,wav_file):\n '''\n Generates a single talker/word spectrogram/semantic pattern example.\n This function is called many times (once per eacn word/talker combo)\n by generate_patterns.\n\n While you *could* use this to generate a specific single pattern,\n it's not recommended. It mostly exists to wrap multithreaded execution.\n\n N.B: Key names in the pattern_dict are set to keys that Model.py will\n currently recognize; when we change these we have to be careful.\n '''\n pattern_dict = {'Word':word,'Pronunciation':pron,'Identifier':talker}\n # read/trim from the wav_file\n sig = librosa.core.load(wav_file,sr=self.pattern_params.spectrogram_dict['samp_rate'])[0]\n sig = librosa.effects.trim(sig,frame_length=32,hop_length=16)[0]\n # this block if we are using spectrograms\n if self.pattern_params.acoustic_mode == 'spectrogram':\n spec = spectrogram(sig,**self.pattern_params.spectrogram_dict)\n pattern_dict['Acoustic'] = np.transpose(spec).astype(np.float32)\n # using MEL frequency cepstral coefficients instead\n elif self.pattern_params.acoustic_mode == 'mel':\n spec = mel_spectrogram(sig,**self.pattern_params.mel_dict)\n else:\n raise ValueError('{} is not a valid choice for acoustic_mode.'.format(self.pattern_params.acoustic_mode == 'mel'))\n pattern_dict['Acoustic'] = np.transpose(spec).astype(np.float32)\n # add the semantic pattern\n pattern_dict['Semantic'] = self.semantic_patterns[word].astype(np.float32)\n pattern_filename = os.path.split(wav_file)[1].replace(os.path.splitext(wav_file)[1], '.pickle').upper()\n # N.B. - why are we fixing the protocol here and not using -1?\n with open(os.path.join(self.pattern_params.pattern_path, pattern_filename).replace('\\\\', '/'), 'wb') as f:\n pickle.dump(pattern_dict, f, protocol= 4)\n # status message\n #print('{}\\t->\\t{}'.format(wav_file, pattern_filename))\n # this is just to catch as exception in the thread pool executor?\n #return 1\n\n\n def generate_metadata(self):\n '''\n Generates the metadata file. I'm not actually sure what this is even used for or where?\n (Presumably I'll find out at some point.) Plus, why aren't we logging this information\n when we create it? This should go away or be drastically modified - right now it's\n basically just a copy of Heejo's original code.\n\n Also, this assumes a very rigid pattern format: WORD_TALKER.PICKLE\n '''\n metadata_dict = {'parameters':self.pattern_params}\n # word : pronunciation\n metadata_dict['Pronunciation_Dict'] = self.pron_dict\n # (word,talker) : pattern_filename\n metadata_dict['Pattern_Path_Dict'] = {}\n # this is just a reverse of metadata_dict['Pattern_Path_Dict']\n metadata_dict['Word_and_Identifier_Dict'] = {}\n # pattern_filename : pattern_step\n metadata_dict['Step_Dict'] = {}\n for root,_,files in os.walk(self.pattern_params.pattern_path):\n for file in files:\n # right now, pattern files have a very rigid format: WORD_TALKER.PICKLE. Ignore\n # any files not matching this pattern\n base_name,ext = os.path.splitext(file)\n if ext != '.PICKLE' or len(base_name.split('_')) != 2:\n continue\n # file matches rigid pattern, carry on.\n with open(os.path.join(root, file).replace('\\\\', '/'), 'rb') as f:\n pattern_dict = pickle.load(f)\n if not all([(key in pattern_dict.keys()) for key in ['Word', 'Identifier', 'Acoustic']]):\n print('File \\'{}\\' is not a valid pattern. This file is ignored.'.format(file))\n continue\n\n metadata_dict['Pattern_Path_Dict'][pattern_dict['Word'], pattern_dict['Identifier']] = file\n metadata_dict['Word_and_Identifier_Dict'][file] = (pattern_dict['Word'], pattern_dict['Identifier'])\n metadata_dict['Step_Dict'][file] = pattern_dict['Acoustic'].shape[0]\n\n metadata_dict['Target_Dict'] = {word: self.semantic_patterns[word] for word in self.pron_dict.keys()}\n\n with open(os.path.join(self.pattern_params.pattern_path, 'METADATA.PICKLE').replace('\\\\', '/'), 'wb') as f:\n pickle.dump(metadata_dict, f, protocol= 4)\n\n\n\n def generate_metadata_subset(self,word_list=None,talker_list=None,metadata_filename='METADATA.SUBSET.PICKLE'):\n '''\n I personally haven't been excluding any talkers, etc. so I don't need this to be super functional right now.\n It's just a pretty close copy of Heejo's function for now, to be tested/modified later.\n '''\n if word_list is not None:\n word_list = [x.upper() for x in word_list]\n if talker_list is not None:\n talker_list = [x.upper() for x in talker_list]\n\n with open(os.path.join(self.pattern_params.pattern_path, 'METADATA.PICKLE').replace('\\\\', '/'), 'rb') as f:\n metadata_dict = pickle.load(f)\n\n sub_metadata_dict = {'parameters':self.pattern_params,'Pronunciation_Dict':self.pron_dict}\n\n # filter on words\n if word_list is not None:\n word_filtered_pattern_files = [pattern_file for pattern_file, (word, talker) in metadata_dict['Word_and_Identifier_Dict'].items() if word in word_list]\n else:\n word_filtered_pattern_files = [pattern_file for pattern_file, (word, talker) in metadata_dict['Word_and_Identifier_Dict'].items()]\n # filter on talkers\n if talker_list is not None:\n talker_filtered_pattern_files = [pattern_file for pattern_file, (word, talker) in metadata_dict['Word_and_Identifier_Dict'].items() if talker in talker_list]\n else:\n talker_filtered_pattern_files = [pattern_file for pattern_file, (word, talker) in metadata_dict['Word_and_Identifier_Dict'].items()]\n\n # get a list of patterns in both filtered lists\n wordtalker_union = []\n for p in word_filtered_pattern_files:\n if p in talker_filtered_pattern_files:\n wordtalker_union.append(p)\n\n sub_metadata_dict['Pattern_Path_Dict'] = {(word,talker): pattern_file for (word,talker), pattern_file in metadata_dict['Pattern_Path_Dict'].items() if pattern_file in wordtalker_union}\n # the word and identifier dict is just a reverse of the pattern path dict\n for k,v in sub_metadata_dict['Pattern_Path_Dict']:\n sub_metadata_dict['Word_and_Identifier_Dict'][v] = k\n\n sub_metadata_dict['Step_Dict'] = {pattern_file:step for pattern_file, step in metadata_dict['Step_Dict'].items() if pattern_file in wordtalker_union}\n\n if word_list is not None:\n sub_metadata_dict['Target_Dict'] = {word:target_pattern for word, target_pattern in metadata_dict['Target_Dict'].items() if word in word_list}\n else:\n sub_metadata_dict['Target_Dict'] = metadata_dict['Target_Dict']\n\n with open(os.path.join(self.pattern_params.pattern_path, metadata_filename).replace('\\\\', '/'), 'wb') as f:\n pickle.dump(sub_metadata_dict, f, protocol= 4)\n" ]
[ [ "numpy.random.permutation", "numpy.zeros", "numpy.transpose" ] ]
jimgoo/Merlion
[ "2239f4ba6fc4fc08b3f88be842908851ee17ddbf" ]
[ "ts_datasets/ts_datasets/forecast/ECL.py" ]
[ "#\n# Copyright (c) 2021 salesforce.com, inc.\n# All rights reserved.\n# SPDX-License-Identifier: BSD-3-Clause\n# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause\n#\nimport glob\nimport logging\nimport os\n\nimport pandas as pd\n\nfrom ts_datasets.base import BaseDataset\n\nlogger = logging.getLogger(__name__)\n\n\nclass ECL(BaseDataset):\n \"\"\"\n ECL (Electricity Consuming Load)3: It collects the electricity consumption (Kwh)\n of 321 clients. Due to the missing data (Li et al. 2019), we convert the dataset\n into hourly consumption of 2 years and set `MT 320` as the target value.\n\n - source: https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014\n \"\"\"\n\n def __init__(self, rootdir=None):\n \"\"\"\n :param rootdir: The root directory at which the dataset can be found.\n \"\"\"\n super().__init__()\n\n fnames = [\"https://jgoode.s3.amazonaws.com/ts-datasets/ECL.csv\"]\n\n start_timestamp = \"2012-01-01 00:00:00\"\n\n for i, fn in enumerate(sorted(fnames)):\n df = pd.read_csv(fn, index_col=\"date\", parse_dates=True)\n df = df[df.index >= start_timestamp]\n # put the target at the beginning\n df = df.loc[\n :,\n ['MT_320'] + df.columns.difference(['MT_320']).tolist()\n ]\n\n df.index.rename(\"timestamp\", inplace=True)\n assert isinstance(df.index, pd.DatetimeIndex)\n df.sort_index(inplace=True)\n\n self.time_series.append(df)\n self.metadata.append(\n {\n # punt on this for now\n \"trainval\": pd.Series(df.index <= start_timestamp, index=df.index),\n \"start_timestamp\": start_timestamp,\n }\n )\n" ]
[ [ "pandas.read_csv", "pandas.Series" ] ]
Kitware/netharn
[ "9ebc8ddb33c56fe890684f3a0a6369c52ebe4742" ]
[ "netharn/data/collate.py" ]
[ "\"\"\"\nFIXME 0 dimension tensors\n\"\"\"\nimport torch.utils.data as torch_data\nimport torch\nimport ubelt as ub\nimport numpy as np # NOQA\nimport re\n\n# if six.PY2:\n# import collections\n# container_abcs = collections\n# elif six.PY3:\n# import collections.abc\n# container_abcs = collections.abc\ntry:\n import collections.abc as container_abcs\n from six import string_types as string_classes\n from six import integer_types as int_classes\nexcept Exception:\n from torch._six import container_abcs\n from torch._six import string_classes, int_classes\n\ndefault_collate = torch_data.dataloader.default_collate\n\n\n# numpy_type_map = torch_data.dataloader.numpy_type_map # moved in torch 1.1.0\nnumpy_type_map = {\n 'float64': torch.DoubleTensor,\n 'float32': torch.FloatTensor,\n 'float16': torch.HalfTensor,\n 'int64': torch.LongTensor,\n 'int32': torch.IntTensor,\n 'int16': torch.ShortTensor,\n 'int8': torch.CharTensor,\n 'uint8': torch.ByteTensor,\n}\n\n\nclass CollateException(Exception):\n pass\n\n\n_DEBUG = False\n\n\ndef _collate_else(batch, collate_func):\n \"\"\"\n Handles recursion in the else case for these special collate functions\n\n This is duplicates all non-tensor cases from `torch_data.dataloader.default_collate`\n This also contains support for collating slices.\n \"\"\"\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if re.search('[SaUO]', elem.dtype.str) is not None:\n raise TypeError(error_msg.format(elem.dtype))\n\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], slice):\n batch = default_collate([{\n 'start': sl.start,\n 'stop': sl.stop,\n 'step': 1 if sl.step is None else sl.step\n } for sl in batch])\n return batch\n elif isinstance(batch[0], int_classes):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], container_abcs.Mapping):\n # Hack the mapping collation implementation to print error info\n if _DEBUG:\n collated = {}\n try:\n for key in batch[0]:\n collated[key] = collate_func([d[key] for d in batch])\n except Exception:\n print('\\n!!Error collating key = {!r}\\n'.format(key))\n raise\n return collated\n else:\n return {key: collate_func([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple\n return type(batch[0])(*(default_collate(samples) for samples in zip(*batch)))\n elif isinstance(batch[0], container_abcs.Sequence):\n transposed = zip(*batch)\n return [collate_func(samples) for samples in transposed]\n else:\n raise TypeError((error_msg.format(type(batch[0]))))\n\n\ndef list_collate(inbatch):\n \"\"\"\n Collates batches containing items with non-uniform data sizes\n\n Used for detection datasets with boxes.\n\n Args:\n inbatch: a list of items returned by __getitem__ for each item in the\n batch\n\n Example:\n >>> from netharn.data.collate import *\n >>> import torch\n >>> rng = np.random.RandomState(0)\n >>> inbatch = []\n >>> bsize = 4\n >>> for i in range(bsize):\n >>> # add an image and some dummy bboxes to the batch\n >>> img = torch.rand(3, 4, 4) # dummy 4x4 image\n >>> boxes = torch.LongTensor([[0, 0, 1, 1]] * i)\n >>> item = (img, boxes)\n >>> inbatch.append(item)\n >>> out_batch = list_collate(inbatch)\n >>> assert len(out_batch) == 2\n >>> batch_img, batch_boxes = out_batch\n >>> assert list(out_batch[0].shape) == [bsize, 3, 4, 4]\n >>> assert len(out_batch[1]) == bsize\n >>> assert len(out_batch[1][0]) == 0\n >>> assert len(out_batch[1][1]) == 1\n >>> assert len(out_batch[1][2]) == 2\n\n Example:\n >>> import torch\n >>> rng = np.random.RandomState(0)\n >>> inbatch = []\n >>> bsize = 4\n >>> for _ in range(bsize):\n >>> # add an image and some dummy bboxes to the batch\n >>> img = torch.rand(3, 8, 8) # dummy 8x8 image\n >>> boxes = torch.FloatTensor()\n >>> item = (img, [boxes])\n >>> inbatch.append(item)\n >>> out_batch = list_collate(inbatch)\n >>> assert len(out_batch) == 2\n >>> assert list(out_batch[0].shape) == [bsize, 3, 8, 8]\n >>> assert len(out_batch[1][0]) == bsize\n \"\"\"\n try:\n if torch.is_tensor(inbatch[0]):\n num_items = [len(item) for item in inbatch]\n if ub.allsame(num_items):\n if len(num_items) == 0 or num_items[0] == 0:\n batch = inbatch\n else:\n batch = default_collate(inbatch)\n else:\n batch = inbatch\n else:\n batch = _collate_else(inbatch, list_collate)\n except Exception as ex:\n if not isinstance(ex, CollateException):\n raise CollateException(\n 'Failed to collate inbatch={}. Reason: {!r}'.format(inbatch, ex))\n else:\n raise\n return batch\n\n\ndef padded_collate(inbatch, fill_value=-1):\n \"\"\"\n Used for detection datasets with boxes.\n\n Example:\n >>> from netharn.data.collate import *\n >>> import torch\n >>> rng = np.random.RandomState(0)\n >>> inbatch = []\n >>> bsize = 7\n >>> for i in range(bsize):\n >>> # add an image and some dummy bboxes to the batch\n >>> img = torch.rand(3, 8, 8) # dummy 8x8 image\n >>> n = 11 if i == 3 else rng.randint(0, 11)\n >>> boxes = torch.rand(n, 4)\n >>> item = (img, boxes)\n >>> inbatch.append(item)\n >>> out_batch = padded_collate(inbatch)\n >>> assert len(out_batch) == 2\n >>> assert list(out_batch[0].shape) == [bsize, 3, 8, 8]\n >>> assert list(out_batch[1].shape) == [bsize, 11, 4]\n\n Example:\n >>> import torch\n >>> rng = np.random.RandomState(0)\n >>> inbatch = []\n >>> bsize = 4\n >>> for _ in range(bsize):\n >>> # add an image and some dummy bboxes to the batch\n >>> img = torch.rand(3, 8, 8) # dummy 8x8 image\n >>> #boxes = torch.empty(0, 4)\n >>> boxes = torch.FloatTensor()\n >>> item = (img, [boxes])\n >>> inbatch.append(item)\n >>> out_batch = padded_collate(inbatch)\n >>> assert len(out_batch) == 2\n >>> assert list(out_batch[0].shape) == [bsize, 3, 8, 8]\n >>> #assert list(out_batch[1][0].shape) == [bsize, 0, 4]\n >>> assert list(out_batch[1][0].shape) in [[0], []] # torch .3 a .4\n\n Example:\n >>> inbatch = [torch.rand(4, 4), torch.rand(8, 4),\n >>> torch.rand(0, 4), torch.rand(3, 4),\n >>> torch.rand(0, 4), torch.rand(1, 4)]\n >>> out_batch = padded_collate(inbatch)\n >>> assert list(out_batch.shape) == [6, 8, 4]\n \"\"\"\n try:\n if torch.is_tensor(inbatch[0]):\n num_items = [len(item) for item in inbatch]\n if ub.allsame(num_items):\n if len(num_items) == 0:\n batch = torch.FloatTensor()\n elif num_items[0] == 0:\n batch = torch.FloatTensor()\n else:\n batch = default_collate(inbatch)\n else:\n max_size = max(num_items)\n real_tail_shape = None\n for item in inbatch:\n if item.numel():\n tail_shape = item.shape[1:]\n if real_tail_shape is not None:\n assert real_tail_shape == tail_shape\n real_tail_shape = tail_shape\n\n padded_inbatch = []\n for item in inbatch:\n n_extra = max_size - len(item)\n if n_extra > 0:\n shape = (n_extra,) + tuple(real_tail_shape)\n if torch.__version__.startswith('0.3'):\n extra = torch.Tensor(np.full(shape, fill_value=fill_value))\n else:\n extra = torch.full(shape, fill_value=fill_value,\n dtype=item.dtype)\n padded_item = torch.cat([item, extra], dim=0)\n padded_inbatch.append(padded_item)\n else:\n padded_inbatch.append(item)\n batch = inbatch\n batch = default_collate(padded_inbatch)\n else:\n batch = _collate_else(inbatch, padded_collate)\n except Exception as ex:\n if not isinstance(ex, CollateException):\n try:\n _debug_inbatch_shapes(inbatch)\n except Exception:\n pass\n raise CollateException(\n 'Failed to collate inbatch={}. Reason: {!r}'.format(inbatch, ex))\n else:\n raise\n return batch\n\n\ndef _debug_inbatch_shapes(inbatch):\n import ubelt as ub\n print('len(inbatch) = {}'.format(len(inbatch)))\n extensions = ub.util_format.FormatterExtensions()\n\n @extensions.register((torch.Tensor, np.ndarray))\n def format_shape(data, **kwargs):\n return ub.repr2(dict(type=str(type(data)), shape=data.shape), nl=1, sv=1)\n\n print('inbatch = ' + ub.repr2(inbatch, extensions=extensions, nl=True))\n\n\nif __name__ == '__main__':\n \"\"\"\n CommandLine:\n python -m netharn.data.collate all\n \"\"\"\n import xdoctest\n xdoctest.doctest_module(__file__)\n" ]
[ [ "torch.LongTensor", "torch.full", "torch.cat", "torch.from_numpy", "torch.is_tensor", "numpy.full", "torch.FloatTensor", "torch.__version__.startswith", "torch.DoubleTensor" ] ]
wheeltune/kid-neuro
[ "131ec888e4f0c3ee1d7b4c4ebf57a6b1d5323d8a" ]
[ "learner/generator.py" ]
[ "import pytorch_lightning as pl\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n__all__ = [\"KeystrokesGeneratorLearner\"]\n\n#===============================================================================\n\nclass KeystrokesGeneratorLearner(pl.LightningModule):\n\n #---------------------------------------------------------------------------\n def __init__(self, generator, p_teacher=0.5, lr=1e-3):\n super().__init__()\n\n self._p_teacher_forcing = p_teacher\n self._lr = lr\n\n self.generator = generator\n # self.loss = nn.MSELoss()\n self.loss = nn.L1Loss()\n\n #---------------------------------------------------------------------------\n def forward(self, x, y_true=None):\n\n if self.training:\n use_teacher_forcing = (random.random() < self._p_teacher_forcing)\n else:\n use_teacher_forcing = False\n\n batch_size = x.shape[0]\n hidden = self.generator.init_hidden(batch_size)\n\n if use_teacher_forcing:\n x_help = nn.functional.pad(y_true[:, :-1], (0, 0, 1, 0, 0, 0))\n x = torch.cat((x, x_help), axis=2)\n y, _ = self.generator(x, hidden)\n\n else:\n y = [torch.zeros((x.shape[0], 1, 4))]\n\n for i in range(x.shape[1]):\n x_i = x[:, i : i + 1]\n\n if i == 0:\n hide = torch.zeros(batch_size)\n x_i = torch.cat((\n x_i,\n torch.zeros(batch_size, 1, 4)\n ), axis=2)\n\n else:\n hide = (x[:, i-1:i, 0:1] != 0)\n x_i = torch.cat((\n x_i,\n y[-1],\n (y[-1][:, :, 0:1] - y[-2][:, :, 1:2]) * hide,\n (y[-1][:, :, 0:1] + y[-1][:, :, 1:2] - y[-2][:, :, 1:2]) * hide,\n ), axis=2)\n\n y_i, hidden = self.generator(x_i, hidden)\n y.append(y_i)\n\n y = torch.cat(y[1:], axis=1)\n\n return y\n\n #---------------------------------------------------------------------------\n def step(self, batch, batch_idx, validation=False):\n x, _ = batch\n x, y = x[:, :, :-4], x[:, :, -4:]\n\n y_pred = self(x, y)\n loss = self.loss(y_pred, y[:, :, 0:2])\n logs = {\n \"loss\": loss,\n }\n\n if not validation:\n logs.update({\n \"lr\": self.optimizers().param_groups[0][\"lr\"]\n })\n\n return loss, logs\n\n #---------------------------------------------------------------------------\n def training_step(self, batch, batch_idx):\n loss, logs = self.step(batch, batch_idx)\n self.log_dict({f\"train_{k}\": v for k, v in logs.items()}, on_step=True, on_epoch=False)\n return loss\n\n #---------------------------------------------------------------------------\n def validation_step(self, batch, batch_idx):\n loss, logs = self.step(batch, batch_idx, validation=True)\n self.log_dict({f\"test_{k}\": v for k, v in logs.items()})\n return loss\n\n #---------------------------------------------------------------------------\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self._lr)\n return [optimizer]\n # scheduler = {\n # 'scheduler': torch.optim.lr_scheduler.ReduceLROnPlateau(\n # optimizer,\n # 'min',\n # factor=0.9,\n # patience=25,\n # min_lr=1e-4),\n # 'monitor': 'test_loss'\n # }\n # return [optimizer], [scheduler]\n\n#===============================================================================\n" ]
[ [ "torch.zeros", "torch.nn.L1Loss", "torch.nn.functional.pad", "torch.cat" ] ]
Globe-Eater/Geographic-Duplicate-Detection-
[ "ec467fc41cb456959da87fd913465dc9daa27d80" ]
[ "SVM/mapping.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 3 15:04:11 2019\n\n@author: kellenbullock\n\nGEOGRAPHY part\n\n\nThis file will be for making the \ncoordiantes column, \nzipping latitude and longitude together, \ncreating point geometry, \nreading in shapefile of ok counties,\nsetting coordinate system for both points, and counties,\ndisplaying map.\n\n\"\"\"\n\nimport pandas as pd\nimport geopandas as gpd\nfrom shapely.geometry import Point\nimport matplotlib.pyplot as plt\n\n# read in properties\nproperties = pd.read_csv('data.csv')\n\n# reading in county boundries\nok_counties = gpd.read_file('Shapefile_Ok_Counties/tl_2016_40_cousub.shp', crs={'init': 'ESPG:2268'})\n\n# ESPG:4326 WGS 1984\n# ESPG:2268 NAD 1983\n# creating geometry for properties\nproperties['Coordiantes'] = list(zip(properties.Lat, properties.Long))\nproperties['Coordiantes'] = properties['Coordiantes'].apply(Point)\nproperties = gpd.GeoDataFrame(properties, crs={'init':'ESPG:2268'}, geometry='Coordiantes')\nproperties.plot()\n#properties = properties.to_crs(ok_counties.crs)\n\n#properties.plot()\n\n\n# Loading in OK counties shapefile\n#counties = gpd.GeoDataFrame('/Shapefile/COUNTY_BOUNDARY.shp', crs={'init': \"SR-ORG:6703\"})\n\n# Map\nf, ax = plt.subplots(1, figsize=(10,10))\nax = properties.plot(axes=ax, marker='*', markersize=8, legend=(False), color='red')\nok_counties.plot(ax=ax, edgecolor=('black'), color='blue')\nax.set_axis_off()\nf.suptitle('Properties Distribution', fontsize=10)\nplt.show()\n\n\n'''\nThis is from the jupyter notebook using Geopandas to map the obesity points:\n\nf, ax = plt.subplots(1, figsize=(16,12))\nax = data_plot.plot(axes=ax, column='Obesity',edgecolor=('black'), legend=(True))\nparks.plot(ax=ax, marker='o', color='red', markersize=8)\ngyms.plot(ax=ax, marker='o', color='Orange', markersize=8)\nax.set_axis_off()\nf.suptitle('Map 3',fontsize=20)\nlims = plt.axis('equal')\nplt.show()\n\n'''\n\n\n'''\n# I think I can largely ignore this:\n\ndef points(df, Lat, Long):\n \n This code is an example from another project to convert sparate lat longs to geometry\n from shapely.geometry import Point\n \n properites = pd.read_excel('Gym_Points.xlsx')\n gyms['Coordinates'] = list(zip(gyms.Long, gyms.Lat))\n gyms['Coordinates'] = gyms['Coordinates'].apply(Point)\n gyms = GeoDataFrame(gyms, crs={'init': 'epsg:4269'}, geometry='Coordinates')\n type(gyms)\n gyms.head()\n \n \n df['Coordinates'] = list(zip(df.Long, df.Lat))\n df['Coordinates'] = df['Coordinates'].apply(Point)\n Properties = GeoDataFrame(df, crs={'init': 'epsg:4269'}, geometry='Coordinates')\n\n def plot():\n df = GeoDataFrame(df, crs={'init': 'espg:4269'}, geometry = 'Coordinates')\n return \n'''" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ] ]
sourabbapusridhar/object-detection-and-tracking-with-multiple-cameras
[ "4674ec5129125e7e329d4638edf4e0518e592525" ]
[ "tracking/opticalflow.py" ]
[ "# Optical Flow\n\n# RUN object_tracker.py\n# conda activate yolov4-cpu\n# python3 object_tracker.py --weights ./checkpoints/yolov4-tiny-416 --model yolov4 --video ./data/video/test.mp4 --output ./outputs/tiny.avi --tiny\n\nimport cv2\nimport numpy as np\n\n\"\"\"Solve optical flow problem\n\n Parameters \n ----------\n frame : int \n Current frame number.\n id_b : int\n Current ID number of detected object.\n BBOX : List[top left x, top left y, width, height ]\n A list of current bounding box coordinates in tlwh (top left width height) format\n class_name : str\n A string of current class name\n\n Returns\n -------\n state = List(int, int, int, np.array([int, int]), np.array([int, int]), np.array([int, int]))\n state = List(frame, id, camera ID, previous BBOX center, current BBOX center, delta[x,y])\n Returns a list with the following six entries:\n * frame\n * id\n * camera ID: -1\n * previous BBOX center: x_last and y_last center position\n * current BBOX center: x_curr and y_curr center position\n * delta: dx and dy, change in position between detections, [pixel/frame]\n \"\"\" \n\n \nclass OpticalFlow:\n \n def __init__(self, frame, id_b, BBOX, class_name):\n self.frame = 0\n self.last_frame = 0\n self.id_b = id_b\n self.last_BBOX = BBOX\n self.this_BBOX = BBOX\n self.class_name = class_name\n self.opt_dict = {} \n\n def __call__(self, frame, id_b, BBOX, class_name): \n \n ### REMOVE LATER ####\n PATH_DICT = './outputs/dict_test.txt'\n PATH_OUT = './outputs/optical_test.txt'\n\n # Fill dictonary\n if str(id_b) not in self.opt_dict: # If no stored info about current ID, add info.\n # Add first ID found\n self.opt_dict[str(id_b)] = [id_b, frame, frame, BBOX, BBOX, class_name]\n # Can not make calculations here since no info about ID is stored\n\n #print('Found new ID and added to dictionary')\n else:\n #print('Existing ID in dict')\n\n # Extract info from dictonary for ID\n ID, f_last, f_curr, bbox_last, bbox_curr, class_name = self.opt_dict[str(id_b)]\n\n # Update parameters \n self.id_b = ID # update ID\n self.last_frame = f_curr # Update last frame to the previous current frame\n self.frame = frame # Update current frame to input frame\n self.last_BBOX = bbox_curr # Update last bbox to the previous current bbox\n self.this_BBOX = BBOX # Update current bbox to input bbox\n self.class_name = class_name # Update class_name\n\n # Add new info to dictonary\n self.opt_dict[str(self.id_b)] = [self.id_b, self.last_frame, self.frame, self.last_BBOX, self.this_BBOX , self.class_name ] \n\n with open(PATH_DICT, 'a') as fs:\n fs.write(str(self.opt_dict[str(self.id_b)]) + \"\\n\")\n\n\n #### COMPUTATIONS ####\n\n # Center point for last box\n last_box_center_x = self.last_BBOX[0] + self.last_BBOX[2]/2\n last_box_center_y = self.last_BBOX[1] - self.last_BBOX[3]/2\n last_box_center = np.array([last_box_center_x,last_box_center_y])\n\n # Center point for this box\n this_box_center_x = self.this_BBOX[0] + self.this_BBOX[2]/2\n this_box_center_y = self.this_BBOX[1] - self.this_BBOX[3]/2\n this_box_center = np.array([this_box_center_x,this_box_center_y])\n \n # Frame delta\n frame_delta = self.frame - self.last_frame\n\n # Box delta in pixels/frame, vector of delta x and delta y \n if frame_delta == 0:\n box_delta_xy = (this_box_center - last_box_center)\n else: \n box_delta_xy = (this_box_center - last_box_center)/frame_delta\n\n state = [self.frame, self.id_b, -1, self.class_name, np.array([last_box_center_x, last_box_center_y]), np.array([this_box_center_x, this_box_center_y]), box_delta_xy]\n # state = [frame, id, camera ID = -1, class, np.array(prev_x prev_y), np.array(x, y), np.array(dx, dy)]\n\n ############REMOVE LATER ###########\n with open(PATH_OUT, 'a') as fs:\n fs.write(str(state) + \"\\n\")\n ####################################\n\n return state\n\n" ]
[ [ "numpy.array" ] ]
notaJiminLee/pycoral
[ "d04eabadb69b57899c429d808633969444985ff2" ]
[ "benchmarks/inference_benchmarks.py" ]
[ "# Lint as: python3\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Benchmark of models.\n\nBenchmark are measured with CPU 'performance' mode. To enable it, you need to\ninstall 'cpupower' and run:\nsudo cpupower frequency-set --governor performance\n\nThe reference number is measured on:\n - 'x86_64': Intel Xeon E5-1650 v3(3.50GHz) + Edge TPU accelarator + USB 3.0\n - 'rp3b': Raspberry Pi 3 B (version1.2)+ Edge TPU accelarator + USB 2.0\n - 'rp3b+': Raspberry Pi 3 B+ (version1.3)+ Edge TPU accelarator + USB 2.0\n - 'aarch64': Edge TPU dev board.\n\"\"\"\n\nimport time\nimport timeit\n\nimport numpy as np\n\nfrom benchmarks import test_utils\nfrom pycoral.utils.edgetpu import make_interpreter\n\n\ndef run_benchmark(model):\n \"\"\"Returns average inference time in ms on specified model on random input.\"\"\"\n\n print('Benchmark for [%s]' % model)\n print('model path = %s' % test_utils.test_data_path(model))\n interpreter = make_interpreter(test_utils.test_data_path(model))\n interpreter.allocate_tensors()\n iterations = 200 if 'edgetpu' in model else 20\n\n input_tensor = interpreter.tensor(interpreter.get_input_details()[0]['index'])\n np.random.seed(12345)\n input_tensor()[0] = np.random.randint(\n 0, 256, size=input_tensor().shape[1:], dtype=np.uint8)\n result = 1000 * timeit.timeit(\n interpreter.invoke, number=iterations) / iterations\n\n print('%.2f ms (iterations = %d)' % (result, iterations))\n return result\n\n\ndef main():\n args = test_utils.parse_args()\n machine = test_utils.machine_info()\n test_utils.check_cpu_scaling_governor_status()\n models, reference = test_utils.read_reference(\n 'inference_reference_%s.csv' % machine)\n\n results = [('MODEL', 'INFERENCE_TIME')]\n for i, model in enumerate(models, start=1):\n print('-------------- Model %d / %d ---------------' % (i, len(models)))\n results.append((model, run_benchmark(model)))\n test_utils.save_as_csv(\n 'inference_benchmarks_%s_%s.csv' %\n (machine, time.strftime('%Y%m%d-%H%M%S')), results)\n test_utils.check_result(reference, results, args.enable_assertion)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.random.seed" ] ]
adamjstewart/evolution
[ "ba202fd857e9883b8b374ac263f7154a2e58fec0" ]
[ "evolution.py" ]
[ "#!/usr/bin/env python\n\n# Hello World program written using evolutionary computation\n\n# Requires Python 2.7+ and NumPy\n\nimport numpy\nimport random\nimport string\nimport sys\nimport time\n\n\nPOP_SIZE = 1000\nEND_GOAL = list('Hello, World!')\nRATE_OF_INSERTION = 0.1\nRATE_OF_DELETION = 0.1\nRATE_OF_SUBSTITUTION = 0.1\nRATE_OF_DUPLICATION = 0.1\n\n\ndef mutate(sequence):\n '''Returns: a random mutation of sequence.\n\n Preconditions: sequence must be a list.'''\n\n assert(isinstance(sequence, list))\n\n sequence = insertion(sequence)\n sequence = deletion(sequence)\n sequence = substitution(sequence)\n sequence = duplication(sequence)\n\n return sequence\n\n\ndef insertion(sequence):\n '''Returns: sequence with random characters added.\n\n Preconditions: sequence must be a list.'''\n\n assert(isinstance(sequence, list))\n\n i = 0\n while i <= len(sequence):\n if random.random() < RATE_OF_INSERTION:\n newChar = random.choice(string.printable)\n sequence.insert(i, newChar)\n i += 1\n i += 1\n\n return sequence\n\n\ndef deletion(sequence):\n '''Returns: sequence with random characters removed.\n\n Preconditions: sequence must be a list.'''\n\n assert(isinstance(sequence, list))\n\n i = 0\n while i < len(sequence):\n if random.random() < RATE_OF_DELETION:\n sequence.pop(i)\n else:\n i += 1\n\n return sequence\n\n\ndef substitution(sequence):\n '''Returns: sequence with random characters substituted.\n\n Preconditions: sequence must be a list.'''\n\n assert(isinstance(sequence, list))\n\n for i in range(len(sequence)):\n if random.random() < RATE_OF_SUBSTITUTION:\n sequence[i] = random.choice(string.printable)\n\n return sequence\n\n\ndef duplication(sequence):\n '''Returns: sequence with random characters duplicated.\n\n Preconditions: sequence must be a list.'''\n\n assert(isinstance(sequence, list))\n\n i = 0\n while i < len(sequence):\n if random.random() < RATE_OF_DUPLICATION:\n sequence.insert(i, sequence[i])\n i += 1\n i += 1\n\n return sequence\n\n\ndef fitness(source, target):\n '''Returns: the evolutionary fitness of source.\n\n Calculates the Levenshtein Distance between source and target.\n\n Algorithm and implementation are modified from:\n http://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance\n\n Preconditions: source and target must be lists.'''\n\n assert(isinstance(source, list))\n assert(isinstance(target, list))\n\n if len(source) < len(target):\n return fitness(target, source)\n\n # Now len(source) >= len(target)\n if len(target) == 0:\n return len(source)\n\n # Cast lists to arrays\n source = numpy.asarray(source)\n target = numpy.asarray(target)\n\n # Use dynamic programming algorithm, but with the added optimization\n # that we only need the last two rows of the matrix.\n previousRow = numpy.arange(target.size + 1)\n for s in source:\n # Insertion (target grows longer than source):\n currentRow = previousRow + 1\n\n # Substitution or matching:\n # Target and source items are aligned, and either\n # are different (cost of 1), or are the same (cost of 0).\n currentRow[1:] = numpy.minimum(\n currentRow[1:],\n numpy.add(previousRow[:-1], target != s))\n\n # Deletion (target grows shorter than source):\n currentRow[1:] = numpy.minimum(\n currentRow[1:],\n currentRow[:-1] + 1)\n\n previousRow = currentRow\n\n return previousRow[-1]\n\n\nif __name__ == '__main__':\n\n # Retrieve input string\n sequence = list(' '.join(sys.argv[1:]))\n\n # Warning: there is no guarantee that an evolutionary program will converge\n while sequence != END_GOAL:\n score = fitness(sequence, END_GOAL)\n print('Score: {:2d}, String: {}'.format(score, ''.join(sequence)))\n\n # Reproduction\n generation = []\n for i in range(POP_SIZE):\n generation.append(sequence[:])\n\n generation = list(map(mutate, generation))\n\n # Find most fit\n for newSequence in generation:\n if fitness(newSequence, END_GOAL) <= fitness(sequence, END_GOAL):\n sequence = newSequence\n\n score = fitness(sequence, END_GOAL)\n print('Score: {:2d}, String: {}'.format(score, ''.join(sequence)))\n" ]
[ [ "numpy.asarray", "numpy.arange", "numpy.minimum", "numpy.add" ] ]
SCAII/SCAII
[ "7ab302f788556392850d104d3df6e0b4a556414d" ]
[ "backends/sky-rts/demos/agent_layers_multi.py" ]
[ "from scaii.env.sky_rts.env.scenarios.tower_example import TowerExample\nfrom scaii.env.explanation import Explanation, BarChart, BarGroup, Bar\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nlabels = [\"Health\", \"Agent Location\",\n \"Small Towers\", \"Big Towers\", \"Friend\", \"Enemy\"]\n\n\ndef invert_actions(env):\n out = dict([])\n for k, v in env.actions()['actions'].items():\n out[v] = k\n\n return out\n\n\nenv = TowerExample(map_name=\"multi_step\")\nprint(\"Possible reward types:\", env.reward_types())\nprint(\"Possible actions:\", env.actions())\nprint(\"Action description\", env.action_desc())\nactions = invert_actions(env)\n\ns = env.reset(record=True)\n\nreward = 0\ntyped_reward = dict([])\nnum_steps = 0\nwhile not s.is_terminal():\n\n side = 3\n f = plt.figure(figsize=[6 * side, side])\n for i in range(6):\n plt.subplot(1, 6, 1 + i)\n plt.title(labels[i])\n plt.imshow(s.state[..., i], cmap='gray')\n plt.show()\n\n print(num_steps)\n print(reward)\n num_steps += 1\n\n print(\"acting\")\n act = env.new_action()\n\n explanation = Explanation(\n \"Fake Random Saliency Info\", layer_shape=(40, 40))\n chart = BarChart(\"Move Explanation\", \"Actions\", \"QVal By Reward Type\")\n layer_names = [\"HP\", \"Type 1\", \"Type 2\", \"Type 3\", \"Friend\", \"Enemy\"]\n\n max_quad = 0\n max_value = -np.inf\n for quad in range(1, 5):\n layers = np.random.random((40, 40, 6))\n key = \"BarGroup{}\".format(quad)\n group = BarGroup(\"Attack {}\".format(quad), saliency_key=key)\n explanation.add_layers(layer_names, layers, key=key)\n\n value = 0.0\n for r_type in env.reward_types():\n b_layers = np.random.random((40, 40, 6))\n key = \"BarGroup{}Bar{}\".format(quad, r_type)\n\n bar_val = np.random.rand()\n value += bar_val\n bar = Bar(r_type, bar_val, saliency_key=key)\n\n group.add_bar(bar)\n explanation.add_layers(layer_names, b_layers, key=key)\n\n chart.add_bar_group(group)\n\n if value > max_value:\n max_quad = quad\n max_value = value\n\n explanation.with_bar_chart(chart)\n\n act.attack_quadrant(max_quad)\n s = env.act(act, explanation=explanation)\n\n for k, v in s.typed_reward.items():\n if k not in typed_reward:\n typed_reward[k] = v\n else:\n typed_reward[k] += v\n\n reward += s.reward\n\nprint(\"Survived {} steps\".format(num_steps))\nprint(\"Reward is:\", reward, \"Terminal?:\", s.is_terminal())\nprint(\"With types:\", typed_reward)\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.random.random", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "numpy.random.rand", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
TheDr1ver/fintulib
[ "8ed45562d1b5452696288d22363691fbe085afd5" ]
[ "fintulib/wrangle/FuzzyMatcher.py" ]
[ "import pandas as pd\nimport numpy as np\nimport dill as pickle\nimport re\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import rand\n\n\ndef cossim_top(A, B, ntop, lower_bound=0):\n try:\n import sparse_dot_topn.sparse_dot_topn as ct\n except ModuleNotFoundError:\n print(\"This module requires the sparse_dot_topn library \\\n accelerated sparse matrix multiplication,which can be found \\\n at https://github.com/ing-bank/sparse_dot_topn.\")\n import sys\n sys.exit(1)\n B = B.tocsr()\n\n M, _ = A.shape\n _, N = B.shape\n\n idx_dtype = np.int32\n\n nnz_max = M*ntop\n\n indptr = np.empty(M+1, dtype=idx_dtype)\n indices = np.empty(nnz_max, dtype=idx_dtype)\n data = np.empty(nnz_max, dtype=A.dtype)\n\n ct.sparse_dot_topn(\n M, N, np.asarray(A.indptr, dtype=idx_dtype),\n np.asarray(A.indices, dtype=idx_dtype),\n A.data,\n np.asarray(B.indptr, dtype=idx_dtype),\n np.asarray(B.indices, dtype=idx_dtype),\n B.data,\n ntop,\n lower_bound,\n indptr, indices, data)\n\n return csr_matrix((data, indices, indptr), shape=(M, N))\n\n\nclass FuzzyMatcher:\n \"\"\"A fuzzy string matcher based on TF-IDF weighted cosine similarity of character n-grams.\n This class uses the accelerated sparse matrix multiplication library from \\\n https://github.com/ing-bank/sparse_dot_topn and cython - please install before use.\"\"\"\n\n def __init__(self, n_grams=3, verbose=True):\n \"\"\"Create a new fuzzy matcher.\n\n :param n_grams: The number of characters to be used in n-grams. See https://en.wikipedia.org/wiki/N-gram for more details.\n :param verbose: If true, the fuzzy matcher prints information messages during execution.\n \"\"\"\n self.verbose = verbose\n def ngrams_extractor(string):\n string = re.sub(r'[,-./]|\\sBD', r'', string)\n ngrams = zip(*[string[i:] for i in range(n_grams)])\n return [''.join(ngram) for ngram in ngrams]\n\n self.vectorizer = TfidfVectorizer(analyzer=ngrams_extractor, norm=\"l2\")\n self.n_grams_n = n_grams\n self.is_fitted = False\n self.corpus = None\n self.corpus_vect = None\n self.attributes = [\"vectorizer\", \"n_grams_n\",\n \"is_fitted\", \"corpus\", \"corpus_vect\"]\n\n def save_state(self, filename):\n \"\"\"Save the current state of this matcher (including vocabulary and corpus)\n to a file.\n\n :params filename: The path/name of the file to save to.\n \"\"\"\n state = {}\n for a in self.attributes:\n state[a] = getattr(self, a)\n pickle.dump(state, open(filename, \"wb\"))\n\n def load_state(self, filename):\n \"\"\"Read the state of this matcher (including vocabulary and corpus)\n from a previously saved file.\n\n :params filename: The path/name of the file to read from to.\n \"\"\"\n state = pickle.load(open(filename, \"rb\"))\n for a in self.attributes:\n setattr(self, a, state[a])\n\n def fit(self, strings):\n \"\"\"Fit a set of raw strings - this set is used to calculate inverse document frequency (IDF)\n of n-grams.\n\n :param strings: An iterable of raw strings on which the inverse document frequency of n-grams\n for the matcher is calculated.\n \"\"\"\n lc_strings = self._to_lowercase(strings)\n self.vectorizer.fit(lc_strings)\n self.is_fitted = True\n\n def add_corpus(self, corpus):\n \"\"\"Vectorize a corpus of strings to match against. If `fit()` hasn't been called yet,\n the inverse document frequency is calculated from the corpus.\n\n :param corpus: An interable of strings that will be used as the corpus for `match_against_corpus()`.\n \"\"\"\n lc_corpus = self._to_lowercase(corpus)\n if self.is_fitted == False:\n if self.verbose:\n print(\"Fitting vectorizer to corpus...\")\n corpus_vect = self.vectorizer.fit_transform(lc_corpus).transpose()\n else:\n corpus_vect = self.vectorizer.transform(lc_corpus).transpose()\n self.corpus = corpus\n self.corpus_vect = corpus_vect\n self.is_fitted = True\n\n def match_against_corpus(self, strings, top_n=5, threshold=0):\n \"\"\"Match a set of strings against the corpus, returning the top n results above the threshold.\n\n :param strings: The strings to match against the corpus.\n :param top_n: The number of potential matches to return for each string.\n :param threshold: Minimum score - all potential matches below this score will be discarded.\n \"\"\"\n if self.corpus_vect == None:\n raise Exception(\"Must add a corpus before matching against it.\")\n\n lc_strings = self._to_lowercase(strings)\n strings_vect = self.vectorizer.transform(lc_strings)\n\n return self._match_feature_matrices(strings, self.corpus, strings_vect, self.corpus_vect, top_n, threshold)\n\n def match_sets(self, left_strings, right_strings, top_n=5, threshold=0):\n \"\"\"Match the set `left_strings` against the set `right_strings`,\n returning for each string in `left_strings` the `top_n` matches in `right_strings`\n with score greater than `threshold`.\n\n :param left_strings: The strings to match against `right_strings`.\n :param right_strings: The strings that `left_strings` will be matched against.\n :param top_n: The number of potential matches to return for each string.\n :param threshold: Minimum score - all potential matches below this score will be discarded.\n \"\"\"\n if self.is_fitted == False:\n raise Exception(\n \"Must fit a dictionary for IDF-weights before matching. Use `.fit()`.\")\n\n lc_left_strings = self._to_lowercase(left_strings)\n lc_right_strings = self._to_lowercase(right_strings)\n\n left_vect = self.vectorizer.transform(lc_left_strings)\n right_vect = self.vectorizer.transform(lc_right_strings).transpose()\n\n return self._match_feature_matrices(left_strings, right_strings, left_vect, right_vect, top_n, threshold)\n\n def score_pair(self, left_string, right_string):\n \"\"\"Calculate the matching score for two strings.\n\n :param left_string: The string to match against `right_string`.\n :param right_string: The string to match against `left_string`.\n \"\"\"\n if self.is_fitted == False:\n raise Exception(\n \"Must fit a dictionary for IDF-weights before matching. Use `.fit()`.\")\n\n lc_left_string = left_string.lower()\n lc_right_string = right_string.lower()\n\n left_vect = self.vectorizer.transform([lc_left_string])\n right_vect = self.vectorizer.transform([lc_right_string]).transpose()\n\n return np.multiply(left_vect, right_vect)[0,0]\n\n def _to_lowercase(self, strings):\n return [s.lower() for s in strings]\n\n def _match_feature_matrices(self, left_strings, right_strings, left_vect, right_vect, top_n, threshold):\n if self.verbose:\n print(\"Calculating cosine similarities...this might take a while...\")\n c = cossim_top(left_vect, right_vect, top_n, threshold)\n\n if self.verbose:\n print(\"Formatting results...\")\n\n non_zeros = c.nonzero()\n\n sparserows = non_zeros[0]\n sparsecols = non_zeros[1]\n\n nr_matches = sparsecols.size\n\n lookup_indices = []\n lookups = []\n results = []\n\n lookup_index = sparserows[0]\n lookup = left_strings[sparserows[0]]\n entry = (c.data[0], sparsecols[0], right_strings[sparsecols[0]])\n result = [entry]\n\n for index in range(1, nr_matches):\n if sparserows[index] != sparserows[index-1]:\n # new lookup entry - save the previous result list\n # and create a new result list\n lookup_indices.append(lookup_index)\n lookups.append(lookup)\n results.append(result)\n lookup_index = sparserows[index]\n lookup = left_strings[sparserows[index]]\n result = []\n entry = (c.data[index], sparsecols[index],\n right_strings[sparsecols[index]])\n result.append(entry)\n\n lookup_indices.append(lookup_index)\n lookups.append(lookup)\n results.append(result)\n\n # cossim_top only returns indices for which the matching has found a result --\n # we manually add failed lookups so the output matches the input\n failed_lookup_indices = [item for item in range(len(left_strings)) if item not in lookup_indices]\n failed_lookups = [left_strings[i] for i in failed_lookup_indices]\n failed_results = [[] for i in failed_lookup_indices]\n\n lookup_indices = lookup_indices + failed_lookup_indices\n lookups = lookups + failed_lookups\n results = results + failed_results\n\n return pd.DataFrame({\n 'lookup': lookups,\n 'results': results}, index=lookup_indices).sort_index()\n" ]
[ [ "numpy.multiply", "numpy.asarray", "scipy.sparse.csr_matrix", "pandas.DataFrame", "sklearn.feature_extraction.text.TfidfVectorizer", "numpy.empty" ] ]
rnpatien/O-CNN
[ "d29647768d9dd05ebdabafb20db418008d397098" ]
[ "tensorflow/libs/build.py" ]
[ "import os\nimport sys\nimport tensorflow as tf\nimport subprocess\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--octree\", type=str, required=False,\n default='../../octree')\nparser.add_argument(\"--cuda\", type=str, required=False,\n default='/usr/local/cuda-10.1')\nparser.add_argument('--key64', type=str, required=False,\n default='false')\nparser.add_argument('--cc', type=str, required=False,\n default='g++')\n\nargs = parser.parse_args()\nOCTREE_DIR = args.octree\nCUDA_DIR = args.cuda\nCC = args.cc\nKEY64 = '-DKEY64' if args.key64.lower() == 'true' else ''\nLIBS_DIR = os.path.dirname(os.path.realpath(__file__))\n\nTF_CFLAGS = \" \".join(tf.sysconfig.get_compile_flags())\nTF_LFLAGS = \" \".join(tf.sysconfig.get_link_flags())\n\n## g++-4.8\nenv_gcc = 'echo using g++ '\nif CC == 'g++-4.8':\n cmds = [\n 'sudo apt-get update',\n 'sudo apt-get install gcc-4.8 --yes',\n 'sudo apt-get install g++-4.8 --yes',]\n cmd = ' && '.join(cmds)\n print(cmd)\n os.system(cmd)\n env_gcc = 'echo using g++-4.8 && export CC=gcc-4.8 && export CXX=g++-4.8 '\n\n\n## build octree\noctree_ext = os.path.join(OCTREE_DIR, 'external/octree-ext')\nif not os.path.exists(octree_ext):\n cmd = 'git clone --recursive https://github.com/wang-ps/octree-ext.git ' + octree_ext\n print(cmd)\n os.system(cmd)\n\noctree_build = os.path.join(OCTREE_DIR, 'build')\nif os.path.exists(octree_build):\n os.system('rm -r %s' % octree_build)\n\nos.makedirs(octree_build)\nos.chdir(octree_build)\nabi = 'OFF' if '-D_GLIBCXX_USE_CXX11_ABI=1' in TF_CFLAGS else 'ON'\nk64 = 'OFF' if KEY64 != '-DKEY64' else 'ON'\ncmd = env_gcc + '&& cmake .. -DABI=%s -DKEY64=%s && make -j all' % (abi, k64)\nprint(cmd)\nos.system(cmd)\nos.system('./octree_test') # run the test\nos.chdir(LIBS_DIR)\n\n\n## build ocnn-tf\nlines = []\nlines.append(\"TF_CFLAGS := %s\" % TF_CFLAGS)\nlines.append(\"TF_LFLAGS := %s\" % TF_LFLAGS)\nlines.append(\"OCT_CFLAGS := -I%s/octree\" % OCTREE_DIR)\nlines.append(\"OCT_LFLAGS := -L%s/build -loctree_lib\" % OCTREE_DIR)\nlines.append(\"\")\n\nlines.append(\"NVCC_FLAGS1 := -std=c++11 -O2 -c\")\nlines.append(\"NVCC_FLAGS2 := $(TF_CFLAGS) $(OCT_CFLAGS) -I %s/include \" \\\n \"-x cu -Xcompiler -fPIC -D GOOGLE_CUDA=1 -I /usr/local \" \\\n \"-expt-relaxed-constexpr -DNDEBUG %s\" % (CUDA_DIR, KEY64))\nlines.append(\"CC_FLAGS1 := -std=c++11 -O2\")\nlines.append(\"CC_FLAGS2 := $(TF_CFLAGS) $(OCT_CFLAGS) -I %s/include \" \\\n \"-L %s/lib64 -D GOOGLE_CUDA=1 $(TF_LFLAGS) $(OCT_LFLAGS) \" \\\n \"-fPIC -lcudart %s\" % (CUDA_DIR, CUDA_DIR, KEY64))\nlines.append(\"\")\n\nlines.append(\"CC := %s\" % CC)\nlines.append(\"NVCC := %s/bin/nvcc\" % CUDA_DIR)\nlines.append(\"\")\n\ncpu_objects = []\ncpu_sources = []\ngpu_objects = []\ngpu_sources = []\nall_headers = []\nfor filename in sorted(os.listdir(\".\")):\n if filename.endswith(\".cu.cc\") or filename.endswith(\".cu\"):\n targetname = filename + \".o\"\n gpu_sources.append(filename)\n gpu_objects.append(os.path.join(\"object\", targetname))\n elif filename.endswith(\".cc\") or filename.endswith(\".cpp\"):\n targetname = filename + \".o\"\n cpu_sources.append(filename)\n cpu_objects.append(os.path.join(\"object\", targetname))\n elif filename.endswith(\".h\"):\n all_headers.append(filename)\n\nlines.append(\"cpu_objects := %s\" % \" \".join(cpu_objects))\nlines.append(\"gpu_objects := %s\" % \" \".join(gpu_objects))\nlines.append(\"\")\n\nlines.append(\".PHONY : all clean\")\nlines.append(\"\")\n\nlines.append(\"all : libocnn.so\")\nlines.append(\"\")\n\nlines.append(\"libocnn.so : $(cpu_objects) $(gpu_objects)\")\nlines.append(\"\\t$(CC) $(CC_FLAGS1) -shared -o libocnn.so object/*.o $(CC_FLAGS2)\")\nlines.append(\"\")\n\n\nfor i in range(len(cpu_objects)):\n dependency = b\" \".join(\n subprocess.Popen(\"%s -std=c++11 -MM -MG %s\" % (CC, cpu_sources[i]),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n shell=True).stdout.readlines()).decode(\"utf-8\")\n headers = []\n for h in all_headers:\n if dependency.find(h) > 0:\n headers.append(h)\n headers = \" \".join(headers)\n lines.append(\"%s : %s %s\" % (cpu_objects[i], headers, cpu_sources[i]))\n lines.append(\"\\t$(CC) $(CC_FLAGS1) -c %s $(CC_FLAGS2) -o %s\" % \\\n (cpu_sources[i], cpu_objects[i]))\n lines.append(\"\")\n\nfor i in range(len(gpu_objects)):\n dependency = b\" \".join(\n subprocess.Popen(\"%s -std=c++11 -MM -MG %s\" % (CC, gpu_sources[i]),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n shell=True).stdout.readlines()).decode(\"utf-8\")\n headers = []\n for h in all_headers:\n if dependency.find(h) > 0:\n headers.append(h)\n headers = \" \".join(headers)\n lines.append(\"%s : %s %s\" % (gpu_objects[i], headers, gpu_sources[i]))\n lines.append(\"\\t$(NVCC) $(NVCC_FLAGS1) %s $(NVCC_FLAGS2) -o %s\" % \\\n (gpu_sources[i], gpu_objects[i]))\n lines.append(\"\")\n\nlines.append(\"clean :\")\nlines.append(\"\\t rm %s\" % os.path.join(\"object\", \"*.o\"))\nlines.append(\"\")\n\nlines = [line + \"\\n\" for line in lines]\nwith open(\"Makefile\", \"w\") as f:\n f.writelines(lines)\n\n# make\nif os.path.exists(\"object\"):\n os.system(\"rm -r object\")\n os.mkdir(\"object\")\nos.system(\"make -j all\")\n\n# test\nos.chdir(LIBS_DIR + \"/../test\")\n# env variable\ncmd = 'export OCTREE_KEY=' + ('64' if KEY64 == '-DKEY64' else '32')\nos.system(cmd + \" && python test_all.py\")" ]
[ [ "tensorflow.sysconfig.get_compile_flags", "tensorflow.sysconfig.get_link_flags" ] ]
gomerudo/openai-baselines
[ "42adc549f3a7b3241227dfa2ebb1188784c4da06" ]
[ "baselines/common/policies.py" ]
[ "import tensorflow as tf\nfrom baselines.common import tf_util\nfrom baselines.meta_a2c.utils import fc\nfrom baselines.common.distributions import make_pdtype\nfrom baselines.common.input import observation_placeholder, encode_observation\nfrom baselines.common.tf_util import adjust_shape\nfrom baselines.common.mpi_running_mean_std import RunningMeanStd\nfrom baselines.common.models import get_network_builder\n\nimport gym\n\n\nclass PolicyWithValue(object):\n \"\"\"\n Encapsulates fields and methods for RL policy and value function estimation with shared parameters\n \"\"\"\n\n def __init__(self, env, observations, latent, estimate_q=False, vf_latent=None, sess=None, is_meta=False, p_action=None, p_reward=None, timestep=None,**tensors):\n \"\"\"\n Parameters:\n ----------\n env RL environment\n\n observations tensorflow placeholder in which the observations will be fed\n\n latent latent state from which policy distribution parameters should be inferred\n\n vf_latent latent state from which value function should be inferred (if None, then latent is used)\n\n sess tensorflow session to run calculations in (if None, default session is used)\n\n **tensors tensorflow tensors for additional attributes such as state or mask\n\n \"\"\"\n\n self.X = observations\n self.is_meta = is_meta\n self.p_action = p_action\n self.p_reward = p_reward\n self.timestep = timestep\n self.state = tf.constant([])\n self.initial_state = None\n self.__dict__.update(tensors)\n\n vf_latent = vf_latent if vf_latent is not None else latent\n\n vf_latent = tf.layers.flatten(vf_latent)\n latent = tf.layers.flatten(latent)\n\n # Based on the action space, will select what probability distribution type\n self.pdtype = make_pdtype(env.action_space)\n\n self.pd, self.pi = self.pdtype.pdfromlatent(latent, init_scale=0.01)\n\n # Take an action\n self.action = self.pd.sample()\n\n # Calculate the neg log of our probability\n self.neglogp = self.pd.neglogp(self.action)\n self.sess = sess or tf.get_default_session()\n\n if estimate_q:\n assert isinstance(env.action_space, gym.spaces.Discrete)\n self.q = fc(vf_latent, 'q', env.action_space.n)\n self.vf = self.q\n else:\n self.vf = fc(vf_latent, 'vf', 1)\n self.vf = self.vf[:, 0]\n\n def _evaluate(self, variables, observation, ob_pa=None, ob_pr=None, ob_t=None, **extra_feed):\n sess = self.sess\n feed_dict = {\n self.X: adjust_shape(self.X, observation)\n }\n \n if self.is_meta:\n meta_dict = {\n self.p_reward: adjust_shape(self.p_reward, ob_pr),\n self.timestep: adjust_shape(self.timestep, ob_t),\n self.p_action: adjust_shape(self.p_action, ob_pa),\n }\n # Update\n feed_dict.update(meta_dict)\n\n for inpt_name, data in extra_feed.items():\n # print(\"Input name\", inpt_name)\n # print(\"Data\", data)\n if inpt_name in self.__dict__.keys():\n inpt = self.__dict__[inpt_name]\n if isinstance(inpt, tf.Tensor) and inpt._op.type == 'Placeholder':\n # print(\"Original shape is:\", data.shape)\n feed_dict[inpt] = adjust_shape(inpt, data)\n\n return sess.run(variables, feed_dict)\n\n def step(self, observation, p_action=None, p_reward=None, timestep=None, **extra_feed):\n \"\"\"\n Compute next action(s) given the observation(s)\n\n Parameters:\n ----------\n\n observation observation data (either single or a batch)\n\n **extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)\n\n Returns:\n -------\n (action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple\n \"\"\"\n a, v, state, neglogp = self._evaluate(\n [self.action, self.vf, self.state, self.neglogp],\n observation,\n p_action,\n p_reward,\n timestep,\n **extra_feed\n )\n if state.size == 0:\n state = None\n return a, v, state, neglogp\n\n def value(self, ob, p_action=None, p_reward=None, timestep=None, *args, **kwargs):\n \"\"\"\n Compute value estimate(s) given the observation(s)\n\n Parameters:\n ----------\n\n observation observation data (either single or a batch)\n\n **extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)\n\n Returns:\n -------\n value estimate\n \"\"\"\n aux = self._evaluate(self.vf, ob, p_action, p_reward, timestep, *args, **kwargs)\n return aux\n\n def save(self, save_path):\n tf_util.save_state(save_path, sess=self.sess)\n\n def load(self, load_path):\n tf_util.load_state(load_path, sess=self.sess)\n\ndef build_policy(env, policy_network, value_network=None, normalize_observations=False, estimate_q=False, **policy_kwargs):\n if isinstance(policy_network, str):\n network_type = policy_network\n policy_network = get_network_builder(network_type)(**policy_kwargs)\n\n def policy_fn(nbatch=None, nsteps=None, sess=None, observ_placeholder=None):\n ob_space = env.observation_space\n\n X = observ_placeholder if observ_placeholder is not None else observation_placeholder(ob_space, batch_size=nbatch)\n\n extra_tensors = {}\n\n if normalize_observations and X.dtype == tf.float32:\n encoded_x, rms = _normalize_clip_observation(X)\n extra_tensors['rms'] = rms\n else:\n encoded_x = X\n\n encoded_x = encode_observation(ob_space, encoded_x)\n\n with tf.variable_scope('pi', reuse=tf.AUTO_REUSE):\n if network_type.startswith(\"meta\"):\n # Placeholders for meta-information\n p_reward = tf.placeholder(shape=[nbatch, 1], dtype=tf.float32, name=\"p_reward\")\n p_action = tf.placeholder(shape=[nbatch], dtype=tf.int32, name=\"p_action\")\n timestep = tf.placeholder(shape=[nbatch, 1], dtype=tf.float32, name=\"timestep\")\n policy_latent = policy_network(encoded_x, p_reward, timestep, p_action, env.action_space.n)\n else:\n policy_latent = policy_network(encoded_x)\n\n if isinstance(policy_latent, tuple):\n policy_latent, recurrent_tensors = policy_latent\n\n if recurrent_tensors is not None:\n # recurrent architecture, need a few more steps\n nenv = nbatch // nsteps\n assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(nbatch, nsteps)\n\n # Check for meta-models (Learning to reinforcement learn)\n if network_type.startswith(\"meta\"):\n policy_latent, recurrent_tensors = policy_network(encoded_x, p_reward, timestep, p_action, env.action_space.n, nenv)\n else:\n policy_latent, recurrent_tensors = policy_network(encoded_x, nenv)\n\n extra_tensors.update(recurrent_tensors)\n\n\n _v_net = value_network\n\n if _v_net is None or _v_net == 'shared':\n vf_latent = policy_latent\n else:\n if _v_net == 'copy':\n _v_net = policy_network\n else:\n assert callable(_v_net)\n\n with tf.variable_scope('vf', reuse=tf.AUTO_REUSE):\n # TODO recurrent architectures are not supported with value_network=copy yet\n vf_latent = _v_net(encoded_x)\n\n if network_type.startswith(\"meta\"):\n policy = PolicyWithValue(\n env=env,\n observations=X,\n latent=policy_latent,\n vf_latent=vf_latent,\n sess=sess,\n estimate_q=estimate_q,\n is_meta=True,\n p_action=p_action,\n p_reward=p_reward,\n timestep=timestep,\n **extra_tensors\n )\n else:\n policy = PolicyWithValue(\n env=env,\n observations=X,\n latent=policy_latent,\n vf_latent=vf_latent,\n sess=sess,\n estimate_q=estimate_q,\n **extra_tensors\n )\n return policy\n\n return policy_fn\n\n\ndef _normalize_clip_observation(x, clip_range=[-5.0, 5.0]):\n rms = RunningMeanStd(shape=x.shape[1:])\n norm_x = tf.clip_by_value((x - rms.mean) / rms.std, min(clip_range), max(clip_range))\n return norm_x, rms\n\n" ]
[ [ "tensorflow.get_default_session", "tensorflow.layers.flatten", "tensorflow.constant", "tensorflow.placeholder", "tensorflow.variable_scope" ] ]
jpmorgen/IoIO
[ "c22523d77bc38162cabf3ddb6d7446e4005864e8" ]
[ "flat_ratio.py" ]
[ "\"\"\"Get on-off ratios for [SII] and Na filters from flats\"\"\"\n\nimport os\nimport glob\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator\nimport pandas as pd\n\nfrom astropy.stats import mad_std, biweight_location\nfrom astropy.time import Time\n\nfrom cormultipipe import (CALIBRATION_ROOT, RedCorData,\n CorMultiPipe, Calibration)\n\ndef flist_to_dict(flist):\n dlist = []\n for f in flist:\n bname = os.path.basename(f)\n s = bname.split('_')\n d = {'fname': f, 'date': s[0], 'band': s[1], 'onoff': s[2]}\n dlist.append(d)\n return dlist\n \ndef flat_flux(fname_or_ccd):\n ccd = RedCorData.read(fname_or_ccd)\n maxval = ccd.meta['FLATDIV']\n exptime = ccd.meta['EXPTIME']\n flux = maxval/exptime\n return flux\n \n\nc = Calibration(start_date='2020-07-07', stop_date='2020-08-22', reduce=True)\n#c = Calibration(start_date='2020-01-01', stop_date='2020-04-24', reduce=True)\n#c = Calibration(start_date='2020-01-01', stop_date='2020-12-31', reduce=True)\n\non_list = glob.glob(os.path.join(CALIBRATION_ROOT, '*on_flat*'))\noff_list = glob.glob(os.path.join(CALIBRATION_ROOT, '*off_flat*'))\n\non_dlist = flist_to_dict(on_list)\noff_dlist = flist_to_dict(off_list)\n\nratio_dlist = []\nfor on_dict in on_dlist:\n date = on_dict['date']\n tm = Time(date, format='fits')\n band = on_dict['band']\n for off_dict in off_dlist:\n if off_dict['date'] != date:\n continue\n if off_dict['band'] != band:\n continue\n off_flux = flat_flux(off_dict['fname'])\n on_flux = flat_flux(on_dict['fname'])\n ratio = off_flux / on_flux\n ratio_dict = {'band': band,\n 'date': date,\n 'time': tm.tt.datetime,\n 'ratio': ratio}\n ratio_dlist.append(ratio_dict)\n\n#print(ratio_dlist)\ndf = pd.DataFrame(ratio_dlist)\n\nf = plt.figure(figsize=[8.5, 11])\nplt.title('Sky flat ratios')\nfor ib, band in enumerate(['Na', 'SII']):\n plt.title('Sky flat ratios')\n this_band = df[df['band'] == band]\n med_ratio = np.median(this_band['ratio'])\n std_ratio = np.std(this_band['ratio'])\n biweight_ratio = biweight_location(this_band['ratio'])\n mad_std_ratio = mad_std(this_band['ratio'])\n print(f'{band} med {med_ratio:.2f} +/- {std_ratio:.2f}')\n print(f'{band} biweight {biweight_ratio:.2f} +/- {mad_std_ratio:.2f}')\n ax = plt.subplot(2, 1, ib+1)\n ax.yaxis.set_minor_locator(AutoMinorLocator())\n ax.tick_params(which='both', bottom=True, top=True, left=True, right=True)\n #plt.plot(df['time'], df['ratio'], 'k.')\n plt.plot(this_band['time'], this_band['ratio'], 'k.')\n plt.ylabel(f'{band} off/on ratio')\n plt.axhline(y=biweight_ratio, color='red')\n plt.text(0.5, biweight_ratio + 0.1*mad_std_ratio, \n f'{biweight_ratio:.2f} +/- {mad_std_ratio:.2f}',\n ha='center', transform=ax.get_yaxis_transform())\n plt.axhline(y=biweight_ratio+mad_std_ratio,\n linestyle='--', color='k', linewidth=1)\n plt.axhline(y=biweight_ratio-mad_std_ratio,\n linestyle='--', color='k', linewidth=1)\nplt.gcf().autofmt_xdate()\n\n#plt.savefig(ps.path.join(CALIBRATION_ROOT, 'off_on_ratio_vs_time.png'), transparent=True)\nshow= True\nif show:\n plt.show()\nplt.close()\n\n" ]
[ [ "matplotlib.pyplot.axhline", "matplotlib.pyplot.title", "numpy.median", "matplotlib.ticker.AutoMinorLocator", "pandas.DataFrame", "matplotlib.pyplot.gcf", "matplotlib.pyplot.plot", "numpy.std", "matplotlib.pyplot.subplot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.close", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
timsainb/spikesorters
[ "0d4ca7718e786720bdf55285d0f0bc3bf9dff9c2" ]
[ "spikesorters/hdsort/hdsort.py" ]
[ "from pathlib import Path\nimport os\nfrom typing import Union\nimport numpy as np\nimport sys\n\nimport spikeextractors as se\nfrom ..basesorter import BaseSorter\nfrom ..utils.shellscript import ShellScript\nfrom ..sorter_tools import recover_recording\n\nPathType = Union[str, Path]\n\n\ndef check_if_installed(hdsort_path: Union[str, None]):\n if hdsort_path is None:\n return False\n assert isinstance(hdsort_path, str)\n\n if hdsort_path.startswith('\"'):\n hdsort_path = hdsort_path[1:-1]\n hdsort_path = str(Path(hdsort_path).absolute())\n if (Path(hdsort_path) / '+hdsort').is_dir():\n return True\n else:\n return False\n\n\nclass HDSortSorter(BaseSorter):\n \"\"\"\n \"\"\"\n\n sorter_name: str = 'hdsort'\n hdsort_path: Union[str, None] = os.getenv('HDSORT_PATH', None)\n requires_locations = False\n _default_params = {\n 'detect_threshold': 4.2,\n 'detect_sign': -1, # -1 - 1\n 'filter': True,\n 'parfor': True,\n 'freq_min': 300,\n 'freq_max': 7000,\n 'max_el_per_group': 9,\n 'min_el_per_group': 1,\n 'add_if_nearer_than': 20,\n 'max_distance_within_group': 52,\n 'n_pc_dims': 6,\n 'chunk_size': 500000,\n 'loop_mode': 'local_parfor',\n 'chunk_mb': 500\n }\n\n _params_description = {\n 'detect_threshold': \"Threshold for spike detection\",\n 'detect_sign': \"Use -1 (negative) or 1 (positive) depending \"\n \"on the sign of the spikes in the recording\",\n 'filter': \"Enable or disable filter\",\n 'parfor': \"If True, the Matlab parfor is used\",\n 'freq_min': \"High-pass filter cutoff frequency\",\n 'freq_max': \"Low-pass filter cutoff frequency\",\n 'max_el_per_group': \"Maximum number of channels per electrode group\",\n 'min_el_per_group': \"Minimum number of channels per electrode group\",\n 'add_if_nearer_than': \"Minimum distance to add electrode to an electrode group\",\n 'max_distance_within_group': \"Maximum distance within an electrode group\",\n 'n_pc_dims': \"Number of principal components dimensions to perform initial clustering\",\n 'chunk_size': \"Chunk size in number of frames for template-matching\",\n 'loop_mode': \"Loop mode: 'loop', 'local_parfor', 'grid' (requires a grid architecture)\",\n 'chunk_mb': \"Chunk size in Mb for saving to binary format (default 500Mb)\",\n }\n\n sorter_description = \"\"\"HDSort is a template-matching spike sorter designed for high density micro-electrode arrays. \n For more information see https://doi.org/10.1152/jn.00803.2017\"\"\"\n\n installation_mesg = \"\"\"\\nTo use HDSort run:\\n\n >>> git clone https://git.bsse.ethz.ch/hima_public/HDsort.git\n and provide the installation path by setting the HDSORT_PATH\n environment variables or using HDSortSorter.set_hdsort_path().\\n\\n\n\n More information on HDSort at:\n https://git.bsse.ethz.ch/hima_public/HDsort.git\n \"\"\"\n\n def __init__(self, **kargs):\n BaseSorter.__init__(self, **kargs)\n \n @classmethod\n def is_installed(cls):\n return check_if_installed(cls.hdsort_path)\n \n @staticmethod\n def get_sorter_version():\n p = os.getenv('HDSORT_PATH', None)\n if p is None:\n return 'unknown'\n else:\n with open(str(Path(p) / 'version.txt'), mode='r', encoding='utf8') as f:\n version = f.readline()\n return version\n\n @staticmethod\n def set_hdsort_path(hdsort_path: PathType):\n HDSortSorter.hdsort_path = str(Path(hdsort_path).absolute())\n try:\n print(\"Setting HDSORT_PATH environment variable for subprocess calls to:\", hdsort_path)\n os.environ[\"HDSORT_PATH\"] = hdsort_path\n except Exception as e:\n print(\"Could not set HDSORT_PATH environment variable:\", e)\n\n def _setup_recording(self, recording, output_folder):\n if not self.is_installed():\n raise Exception(HDSortSorter.installation_mesg)\n\n source_dir = Path(__file__).parent\n utils_path = source_dir.parent / 'utils'\n\n if isinstance(recording, se.MaxOneRecordingExtractor):\n self.params['file_name'] = str(Path(recording._file_path).absolute())\n self.params['file_format'] = 'maxone'\n print('Using MaxOne format')\n else:\n file_name = output_folder / 'recording.h5'\n # Generate three files dataset in Mea1k format\n self.write_hdsort_input_format(recording, save_path=str(file_name), chunk_mb=self.params[\"chunk_mb\"])\n self.params['file_format'] = 'mea1k'\n\n p = self.params\n p['sort_name'] = 'hdsort_output'\n\n # read the template txt files\n with (source_dir / 'hdsort_master.m').open('r') as f:\n hdsort_master_txt = f.read()\n with (source_dir / 'hdsort_config.m').open('r') as f:\n hdsort_config_txt = f.read()\n\n # make substitutions in txt files\n hdsort_master_txt = hdsort_master_txt.format(\n hdsort_path=str(\n Path(HDSortSorter.hdsort_path).absolute()),\n utils_path=str(utils_path.absolute()),\n config_path=str((output_folder / 'hdsort_config.m').absolute()),\n file_name=p['file_name'],\n file_format=p['file_format'],\n sort_name=p['sort_name'],\n chunk_size=p['chunk_size'],\n loop_mode=p['loop_mode']\n )\n\n if p['filter']:\n p['filter'] = 1\n else:\n p['filter'] = 0\n\n if p['parfor']:\n p['parfor'] = 'true'\n else:\n p['parfor'] = 'false'\n\n hdsort_config_txt = hdsort_config_txt.format(\n filter=p['filter'],\n parfor=p['parfor'],\n hpf=p['freq_min'],\n lpf=p['freq_max'],\n max_el_per_group=p['max_el_per_group'],\n min_el_per_group=p['min_el_per_group'],\n add_if_nearer_than=p['add_if_nearer_than'],\n max_distance_within_group=p['max_distance_within_group'],\n detect_threshold=p['detect_threshold'],\n n_pc_dims=p['n_pc_dims'],\n )\n\n for fname, txt in zip(['hdsort_master.m', 'hdsort_config.m'],\n [hdsort_master_txt, hdsort_config_txt]):\n with (output_folder / fname).open('w') as f:\n f.write(txt)\n\n def _run(self, recording, output_folder):\n recording = recover_recording(recording)\n tmpdir = output_folder\n tmpdir.mkdir(parents=True, exist_ok=True)\n samplerate = recording.get_sampling_frequency()\n\n if recording.is_filtered and self.params['filter']:\n print(\"Warning! The recording is already filtered, but HDsort filter is enabled. You can disable \"\n \"filters by setting 'filter' parameter to False\")\n\n if \"win\" in sys.platform and sys.platform != 'darwin':\n shell_cmd = '''\n {disk_move}\n cd {tmpdir}\n matlab -nosplash -wait -r hdsort_master\n '''.format(disk_move=str(output_folder)[:2], tmpdir=output_folder)\n else:\n shell_cmd = '''\n #!/bin/bash\n cd \"{tmpdir}\"\n matlab -nosplash -nodisplay -r hdsort_master\n '''.format(tmpdir=output_folder)\n\n shell_script = ShellScript(shell_cmd, script_path=output_folder / f'run_{self.sorter_name}',\n log_path=output_folder / f'{self.sorter_name}.log', verbose=self.verbose)\n shell_script.start()\n\n retcode = shell_script.wait()\n\n if retcode != 0:\n raise Exception('HDsort returned a non-zero exit code')\n\n samplerate_fname = str(output_folder / 'samplerate.txt')\n with open(samplerate_fname, 'w') as f:\n f.write('{}'.format(samplerate))\n\n\n @staticmethod\n def get_result_from_folder(output_folder):\n output_folder = Path(output_folder)\n sorting = se.HDSortSortingExtractor(file_path=str(output_folder / 'hdsort_output' /\n 'hdsort_output_results.mat'))\n\n return sorting\n\n def write_hdsort_input_format(self, recording, save_path, chunk_size=None, chunk_mb=500):\n try:\n import h5py\n except:\n raise Exception(\"To use HDSort, install h5py: pip install h5py\")\n\n # check if already in write format\n write_file = True\n if hasattr(recording, '_file_path'):\n if Path(recording._file_path).suffix in ['.h5', '.hdf5']:\n with h5py.File(recording._file_path, 'r') as f:\n keys = f.keys()\n if \"version\" in keys and \"ephys\" in keys and \"mapping\" in keys and \"frame_rate\" in keys \\\n and \"frame_numbers\" in keys:\n if \"sig\" in f[\"ephys\"].keys():\n write_file = False\n self.params['file_name'] = str(Path(recording._file_path).absolute())\n\n if write_file:\n save_path = Path(save_path)\n if save_path.suffix == '':\n save_path = Path(str(save_path) + '.h5')\n mapping_dtype = np.dtype([('electrode', np.int32), ('x', np.float64), ('y', np.float64),\n ('channel', np.int32)])\n\n assert 'location' in recording.get_shared_channel_property_names(), \"'location' property is needed \" \\\n \"to run HDSort\"\n\n with h5py.File(save_path, 'w') as f:\n f.create_group('ephys')\n f.create_dataset('version', data=str(20161003))\n ephys = f['ephys']\n ephys.create_dataset('frame_rate', data=recording.get_sampling_frequency())\n ephys.create_dataset('frame_numbers', data=np.arange(recording.get_num_frames()))\n # save mapping\n mapping = np.empty(recording.get_num_channels(), dtype=mapping_dtype)\n x = recording.get_channel_locations()[:, 0]\n y = recording.get_channel_locations()[:, 1]\n for i, ch in enumerate(recording.get_channel_ids()):\n mapping[i] = (ch, x[i], y[i], ch)\n ephys.create_dataset('mapping', data=mapping)\n # save traces\n recording.write_to_h5_dataset_format('/ephys/signal', file_handle=f, time_axis=1,\n chunk_size=chunk_size, chunk_mb=chunk_mb)\n self.params['file_name'] = str(save_path.absolute())\n\n" ]
[ [ "numpy.dtype" ] ]
douglasdavis/awkward-1.0
[ "f00775803a5568efb0a8e2dae3b1a4f23228fa40", "f00775803a5568efb0a8e2dae3b1a4f23228fa40", "f00775803a5568efb0a8e2dae3b1a4f23228fa40", "f00775803a5568efb0a8e2dae3b1a4f23228fa40" ]
[ "tests/test_0084-start-unionarray.py", "tests/v2/test_1192-iterables-in-__array_function__.py", "tests/v2/test_0979-where-multidimentional-numpy-array.py", "tests/test_0072-fillna-operation.py" ]
[ "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\n\ndef test_getitem():\n content0 = ak.from_iter([[1.1, 2.2, 3.3], [], [4.4, 5.5]], highlevel=False)\n content1 = ak.from_iter([\"one\", \"two\", \"three\", \"four\", \"five\"], highlevel=False)\n tags = ak.layout.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))\n\n assert np.asarray(ak.layout.UnionArray8_32.regular_index(tags)).tolist() == [\n 0,\n 1,\n 0,\n 1,\n 2,\n 2,\n 3,\n 4,\n ]\n assert np.asarray(ak.layout.UnionArray8_32.regular_index(tags)).dtype == np.dtype(\n np.int32\n )\n assert np.asarray(ak.layout.UnionArray8_U32.regular_index(tags)).tolist() == [\n 0,\n 1,\n 0,\n 1,\n 2,\n 2,\n 3,\n 4,\n ]\n assert np.asarray(ak.layout.UnionArray8_U32.regular_index(tags)).dtype == np.dtype(\n np.uint32\n )\n assert np.asarray(ak.layout.UnionArray8_64.regular_index(tags)).tolist() == [\n 0,\n 1,\n 0,\n 1,\n 2,\n 2,\n 3,\n 4,\n ]\n assert np.asarray(ak.layout.UnionArray8_64.regular_index(tags)).dtype == np.dtype(\n np.int64\n )\n\n index = ak.layout.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))\n array = ak.layout.UnionArray8_32(tags, index, [content0, content1])\n assert np.asarray(array.tags).tolist() == [1, 1, 0, 0, 1, 0, 1, 1]\n assert np.asarray(array.tags).dtype == np.dtype(np.int8)\n assert np.asarray(array.index).tolist() == [0, 1, 0, 1, 2, 2, 4, 3]\n assert np.asarray(array.index).dtype == np.dtype(np.int32)\n assert type(array.contents) is list\n assert [ak.to_list(x) for x in array.contents] == [\n [[1.1, 2.2, 3.3], [], [4.4, 5.5]],\n [\"one\", \"two\", \"three\", \"four\", \"five\"],\n ]\n assert array.numcontents == 2\n assert ak.to_list(array.content(0)) == [[1.1, 2.2, 3.3], [], [4.4, 5.5]]\n assert ak.to_list(array.content(1)) == [\"one\", \"two\", \"three\", \"four\", \"five\"]\n assert ak.to_list(array.project(0)) == [[1.1, 2.2, 3.3], [], [4.4, 5.5]]\n assert ak.to_list(array.project(1)) == [\"one\", \"two\", \"three\", \"five\", \"four\"]\n repr(array)\n\n assert ak.to_list(array[0]) == \"one\"\n assert ak.to_list(array[1]) == \"two\"\n assert ak.to_list(array[2]) == [1.1, 2.2, 3.3]\n assert ak.to_list(array[3]) == []\n assert ak.to_list(array[4]) == \"three\"\n assert ak.to_list(array[5]) == [4.4, 5.5]\n assert ak.to_list(array[6]) == \"five\"\n assert ak.to_list(array[7]) == \"four\"\n\n assert ak.to_list(array) == [\n \"one\",\n \"two\",\n [1.1, 2.2, 3.3],\n [],\n \"three\",\n [4.4, 5.5],\n \"five\",\n \"four\",\n ]\n assert ak.to_list(array[1:-1]) == [\n \"two\",\n [1.1, 2.2, 3.3],\n [],\n \"three\",\n [4.4, 5.5],\n \"five\",\n ]\n assert ak.to_list(array[2:-2]) == [[1.1, 2.2, 3.3], [], \"three\", [4.4, 5.5]]\n assert ak.to_list(array[::2]) == [\"one\", [1.1, 2.2, 3.3], \"three\", \"five\"]\n assert ak.to_list(array[::2, 1:]) == [\"ne\", [2.2, 3.3], \"hree\", \"ive\"]\n assert ak.to_list(array[:, :-1]) == [\n \"on\",\n \"tw\",\n [1.1, 2.2],\n [],\n \"thre\",\n [4.4],\n \"fiv\",\n \"fou\",\n ]\n\n content2 = ak.from_iter(\n [{\"x\": 0, \"y\": []}, {\"x\": 1, \"y\": [1.1]}, {\"x\": 2, \"y\": [1.1, 2.2]}],\n highlevel=False,\n )\n content3 = ak.from_iter(\n [\n {\"x\": 0.0, \"y\": \"zero\", \"z\": False},\n {\"x\": 1.1, \"y\": \"one\", \"z\": True},\n {\"x\": 2.2, \"y\": \"two\", \"z\": False},\n {\"x\": 3.3, \"y\": \"three\", \"z\": True},\n {\"x\": 4.4, \"y\": \"four\", \"z\": False},\n ],\n highlevel=False,\n )\n array2 = ak.layout.UnionArray8_32(tags, index, [content2, content3])\n assert ak.to_list(array2) == [\n {\"x\": 0.0, \"y\": \"zero\", \"z\": False},\n {\"x\": 1.1, \"y\": \"one\", \"z\": True},\n {\"x\": 0, \"y\": []},\n {\"x\": 1, \"y\": [1.1]},\n {\"x\": 2.2, \"y\": \"two\", \"z\": False},\n {\"x\": 2, \"y\": [1.1, 2.2]},\n {\"x\": 4.4, \"y\": \"four\", \"z\": False},\n {\"x\": 3.3, \"y\": \"three\", \"z\": True},\n ]\n assert ak.to_list(array2[\"x\"]) == [0.0, 1.1, 0, 1, 2.2, 2, 4.4, 3.3]\n assert ak.to_list(array2[\"y\"]) == [\n \"zero\",\n \"one\",\n [],\n [1.1],\n \"two\",\n [1.1, 2.2],\n \"four\",\n \"three\",\n ]\n assert ak.to_list(array2[:, \"y\", 1:]) == [\n \"ero\",\n \"ne\",\n [],\n [],\n \"wo\",\n [2.2],\n \"our\",\n \"hree\",\n ]\n assert ak.to_list(array2[\"y\", :, 1:]) == [\n \"ero\",\n \"ne\",\n [],\n [],\n \"wo\",\n [2.2],\n \"our\",\n \"hree\",\n ]\n with pytest.raises(ValueError) as err:\n array2[:, 1:, \"y\"]\n assert str(err.value).startswith(\"in NumpyArray, too many dimensions in slice\")\n with pytest.raises(ValueError) as err:\n array2[\"z\"]\n assert str(err.value).startswith('key \"z\" does not exist (not in record)')\n\n array3 = ak.layout.UnionArray8_32(tags, index, [content3, content2])\n array4 = ak.layout.UnionArray8_32(\n tags, index, [content0, content1, content2, content3]\n )\n assert set(content2.keys()) == {\"x\", \"y\"}\n assert set(content3.keys()) == {\"x\", \"y\", \"z\"}\n assert set(array2.keys()) == {\"x\", \"y\"}\n assert set(array3.keys()) == {\"x\", \"y\"}\n assert array4.keys() == []\n\n\ndef test_identities():\n content0 = ak.from_iter([[1.1, 2.2, 3.3], [], [4.4, 5.5]], highlevel=False)\n content1 = ak.from_iter([\"one\", \"two\", \"three\", \"four\", \"five\"], highlevel=False)\n tags = ak.layout.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))\n index = ak.layout.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))\n array = ak.layout.UnionArray8_32(tags, index, [content0, content1])\n\n array.setidentities()\n assert np.asarray(array.identities).tolist() == [\n [0],\n [1],\n [2],\n [3],\n [4],\n [5],\n [6],\n [7],\n ]\n assert np.asarray(array.content(0).identities).tolist() == [[2], [3], [5]]\n assert np.asarray(array.content(1).identities).tolist() == [[0], [1], [4], [7], [6]]\n\n\ndef test_fromiter():\n builder = ak.layout.ArrayBuilder()\n\n builder.integer(0)\n builder.integer(1)\n builder.integer(2)\n builder.beginlist()\n builder.endlist()\n builder.beginlist()\n builder.real(1.1)\n builder.endlist()\n builder.beginlist()\n builder.real(1.1)\n builder.real(2.2)\n builder.endlist()\n builder.beginlist()\n builder.real(1.1)\n builder.real(2.2)\n builder.real(3.3)\n builder.endlist()\n\n assert ak.to_list(builder.snapshot()) == [\n 0,\n 1,\n 2,\n [],\n [1.1],\n [1.1, 2.2],\n [1.1, 2.2, 3.3],\n ]\n\n assert ak.to_list(\n ak.from_iter([0, 1, 2, [], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3]])\n ) == [0, 1, 2, [], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3]]\n assert ak.to_list(\n ak.from_iter(\n [\n 0,\n 1,\n 2,\n [],\n \"zero\",\n [1.1],\n \"one\",\n [1.1, 2.2],\n \"two\",\n [1.1, 2.2, 3.3],\n \"three\",\n ]\n )\n ) == [\n 0,\n 1,\n 2,\n [],\n \"zero\",\n [1.1],\n \"one\",\n [1.1, 2.2],\n \"two\",\n [1.1, 2.2, 3.3],\n \"three\",\n ]\n assert ak.to_list(\n ak.from_json(\n '[0, 1, 2, [], \"zero\", [1.1], \"one\", [1.1, 2.2], \"two\", [1.1, 2.2, 3.3], \"three\"]'\n )\n ) == [\n 0,\n 1,\n 2,\n [],\n \"zero\",\n [1.1],\n \"one\",\n [1.1, 2.2],\n \"two\",\n [1.1, 2.2, 3.3],\n \"three\",\n ]\n assert (\n ak.to_json(\n ak.from_json(\n '[0,1,2,[],\"zero\",[1.1],\"one\",[1.1,2.2],\"two\",[1.1,2.2,3.3],\"three\"]'\n )\n )\n == '[0,1,2,[],\"zero\",[1.1],\"one\",[1.1,2.2],\"two\",[1.1,2.2,3.3],\"three\"]'\n )\n", "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\nto_list = ak._v2.operations.convert.to_list\n\n\ndef test():\n first = ak._v2.operations.convert.from_numpy(np.array([1, 2, 3]))\n deltas = ak._v2.highlevel.Array([[1, 2], [1, 2], [1, 2, 3]])\n assert np.hstack(\n (\n first[:, np.newaxis],\n ak._v2.operations.structure.fill_none(\n ak.operations.structure.pad_none(deltas, 3, axis=-1), 999\n ),\n )\n ).tolist() == [[1, 1, 2, 999], [2, 1, 2, 999], [3, 1, 2, 3]]\n", "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\n\ndef test():\n array = ak._v2.highlevel.Array(\n ak._v2.contents.RegularArray(\n ak._v2.contents.NumpyArray(np.r_[1, 2, 3, 4, 5, 6, 7, 8, 9]), 3\n )\n )\n condition = ak._v2.highlevel.Array(\n ak._v2.contents.NumpyArray(\n np.array([[True, True, True], [True, True, False], [True, False, True]])\n )\n )\n\n assert ak._v2.operations.structure.where(\n condition == 2, array, 2 * array\n ).tolist() == [\n [2, 4, 6],\n [8, 10, 12],\n [14, 16, 18],\n ]\n", "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\n\ndef test_fillna_empty_array():\n empty = ak.layout.EmptyArray()\n\n assert ak.to_list(empty) == []\n array = empty.rpad(5, 0)\n assert ak.to_list(array) == [None, None, None, None, None]\n value = ak.layout.NumpyArray(np.array([10]))\n assert ak.to_list(array.fillna(value)) == [10, 10, 10, 10, 10]\n\n\ndef test_fillna_numpy_array():\n content = ak.layout.NumpyArray(np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]]))\n array = content.rpad(3, 0)\n assert ak.to_list(array) == [[1.1, 2.2, 3.3], [4.4, 5.5, 6.6], None]\n value = ak.layout.NumpyArray(np.array([0]))\n assert ak.to_list(array.fillna(value)) == [[1.1, 2.2, 3.3], [4.4, 5.5, 6.6], 0]\n\n array = content.rpad(5, 1)\n assert ak.to_list(array) == [\n [1.1, 2.2, 3.3, None, None],\n [4.4, 5.5, 6.6, None, None],\n ]\n value = ak.layout.NumpyArray(np.array([0]))\n assert ak.to_list(array.fillna(value)) == [\n [1.1, 2.2, 3.3, 0, 0],\n [4.4, 5.5, 6.6, 0, 0],\n ]\n\n\n# def test_fillna_numpy_O_array():\n# pyobject_array = ak.layout.NumpyArray(np.array([1.1, 2.2, 3.3, None, 5.5, 6.6, None]))\n# assert ak.to_list(optarray.fillna(0)) == [1.1, 2.2, 3.3, 0, 5.5, 6.6, 0]\n\n\ndef test_fillna_regular_array():\n content = ak.layout.NumpyArray(\n np.array(\n [\n 2.1,\n 8.4,\n 7.4,\n 1.6,\n 2.2,\n 3.4,\n 6.2,\n 5.4,\n 1.5,\n 3.9,\n 3.8,\n 3.0,\n 8.5,\n 6.9,\n 4.3,\n 3.6,\n 6.7,\n 1.8,\n 3.2,\n ]\n )\n )\n index = ak.layout.Index64(\n np.array([13, 9, 13, 4, 8, 3, 15, -1, 16, 2, 8], dtype=np.int64)\n )\n indexedarray = ak.layout.IndexedOptionArray64(index, content)\n regarray = ak.layout.RegularArray(indexedarray, 3, zeros_length=0)\n\n assert ak.to_list(regarray) == [[6.9, 3.9, 6.9], [2.2, 1.5, 1.6], [3.6, None, 6.7]]\n value = ak.layout.NumpyArray(np.array([666]))\n assert ak.to_list(regarray.fillna(value)) == [\n [6.9, 3.9, 6.9],\n [2.2, 1.5, 1.6],\n [3.6, 666, 6.7],\n ]\n\n\ndef test_fillna_listarray_array():\n content = ak.layout.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n starts = ak.layout.Index64(np.array([0, 3, 4, 5, 8]))\n stops = ak.layout.Index64(np.array([3, 3, 6, 8, 9]))\n listarray = ak.layout.ListArray64(starts, stops, content)\n\n assert ak.to_list(listarray) == [\n [0.0, 1.1, 2.2],\n [],\n [4.4, 5.5],\n [5.5, 6.6, 7.7],\n [8.8],\n ]\n value = ak.layout.NumpyArray(np.array([55]))\n assert ak.to_list(listarray.fillna(value)) == [\n [0.0, 1.1, 2.2],\n [],\n [4.4, 5.5],\n [5.5, 6.6, 7.7],\n [8.8],\n ]\n\n\ndef test_fillna_unionarray():\n content1 = ak.from_iter([[], [1.1], [2.2, 2.2]], highlevel=False)\n content2 = ak.from_iter([[2, 2], [1], []], highlevel=False)\n tags = ak.layout.Index8(np.array([0, 1, 0, 1, 0, 1], dtype=np.int8))\n index = ak.layout.Index64(np.array([0, 0, 1, 1, 2, 2], dtype=np.int64))\n array = ak.layout.UnionArray8_64(tags, index, [content1, content2])\n assert ak.to_list(array) == [[], [2, 2], [1.1], [1], [2.2, 2.2], []]\n\n padded_array = array.rpad(2, 1)\n assert ak.to_list(padded_array) == [\n [None, None],\n [2, 2],\n [1.1, None],\n [1, None],\n [2.2, 2.2],\n [None, None],\n ]\n value = ak.layout.NumpyArray(np.array([777]))\n assert ak.to_list(padded_array.fillna(value)) == [\n [777, 777],\n [2, 2],\n [1.1, 777],\n [1, 777],\n [2.2, 2.2],\n [777, 777],\n ]\n\n\ndef test_highlevel():\n array = ak.Array([[1.1, 2.2, None, 3.3], [], [4.4, None, 5.5]])\n assert ak.to_list(ak.fill_none(array, 999, axis=1)) == [\n [1.1, 2.2, 999, 3.3],\n [],\n [4.4, 999, 5.5],\n ]\n assert ak.to_list(ak.fill_none(array, [1, 2, 3], axis=1)) == [\n [1.1, 2.2, [1, 2, 3], 3.3],\n [],\n [4.4, [1, 2, 3], 5.5],\n ]\n assert ak.to_list(ak.fill_none(array, [], axis=1)) == [\n [1.1, 2.2, [], 3.3],\n [],\n [4.4, [], 5.5],\n ]\n assert ak.to_list(ak.fill_none(array, {\"x\": 999}, axis=1)) == [\n [1.1, 2.2, {\"x\": 999}, 3.3],\n [],\n [4.4, {\"x\": 999}, 5.5],\n ]\n\n array = ak.Array([[1.1, 2.2, 3.3], None, [], None, [4.4, 5.5]])\n assert ak.to_list(ak.fill_none(array, 999, axis=0)) == [\n [1.1, 2.2, 3.3],\n 999,\n [],\n 999,\n [4.4, 5.5],\n ]\n assert ak.to_list(ak.fill_none(array, [1, 2, 3], axis=0)) == [\n [1.1, 2.2, 3.3],\n [1, 2, 3],\n [],\n [1, 2, 3],\n [4.4, 5.5],\n ]\n assert ak.to_list(ak.fill_none(array, {\"x\": 999}, axis=0)) == [\n [1.1, 2.2, 3.3],\n {\"x\": 999},\n [],\n {\"x\": 999},\n [4.4, 5.5],\n ]\n assert ak.to_list(ak.fill_none(array, [], axis=0)) == [\n [1.1, 2.2, 3.3],\n [],\n [],\n [],\n [4.4, 5.5],\n ]\n" ]
[ [ "numpy.asarray", "numpy.array", "numpy.dtype" ], [ "numpy.array" ], [ "numpy.array" ], [ "numpy.array" ] ]
Kandy990125/PointClouds_cls
[ "23efc75e9f38a1a9c931711c64d3feffc3246424" ]
[ "PointCNN_Pytorch/train.py" ]
[ "import numpy as np\nimport torch\nfrom torch import nn\n\nfrom PointCNN_Pytorch import provider\nfrom PointCNN_Pytorch.model.classifier import Classifier\nfrom dataset.ModelNet40 import get_data_loader\nfrom torch.autograd import Variable\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nblue = lambda x: '\\033[94m' + x + '\\033[0m'\n\nbatch_size = 32\nnum_epochs = 10\ntrain_loader = get_data_loader(train=True, batch_size=batch_size)\ntest_loader = get_data_loader(train=False, batch_size=batch_size)\nprint(\"----------already load datasets----------\")\nmodel = Classifier(NUM_CLASS=40).cuda()\noptimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)\nloss_fn = nn.CrossEntropyLoss()\nres = []\nfor epoch in range(num_epochs):\n for x, label in train_loader:\n # x = x.permute(0, 2, 1).to(device)\n # print(x)\n label = label.to(device)\n label = Variable(label, requires_grad=False).cuda()\n\n rotated_data = provider.rotate_point_cloud(x)\n jittered_data = provider.jitter_point_cloud(rotated_data) # P_Sampled\n P_sampled = jittered_data\n F_sampled = np.zeros((batch_size, 2048, 0))\n optimizer.zero_grad()\n P_sampled = torch.from_numpy(P_sampled).float()\n P_sampled = Variable(P_sampled, requires_grad=False).cuda()\n\n out = model((P_sampled, P_sampled))\n loss = loss_fn(out, label)\n loss.backward()\n optimizer.step()\n pred_choice = out.data.max(1)[1]\n print(\"result:\"+str(pred_choice))\n correct = pred_choice.eq(label.data).cpu().sum()\n # print('[%d: %d/%d] train loss: %f accuracy: %f' %\n # (epoch, idx, num_batch, loss.item(), correct.item() / float(batch_size)))\n print('[%d] train loss: %f accuracy:%f' % (epoch, loss.item(), correct.item() / float(batch_size)))\n\n if (epoch+1) % 5 == 0:\n total_correct = 0\n total_testset = 0\n for x, label in train_loader:\n rotated_data = provider.rotate_point_cloud(x)\n jittered_data = provider.jitter_point_cloud(rotated_data) # P_Sampled\n P_sampled = jittered_data\n F_sampled = np.zeros((batch_size, 2048, 0))\n P_sampled = torch.from_numpy(P_sampled).float()\n P_sampled = Variable(P_sampled, requires_grad=False).cuda()\n label = label.to(device)\n label = Variable(label, requires_grad=False).cuda()\n out = model((P_sampled, P_sampled))\n pred_choice = out.data.max(1)[1]\n correct = pred_choice.eq(label.data).cpu().sum()\n total_correct += correct.item()\n total_testset += x.size()[0]\n print(\"The {} is {}.\\t Test accuracy {}\".format(blue('epoch'), epoch, total_correct / float(total_testset)))\n res.append(total_correct / float(total_testset))\n torch.save(model.state_dict, 'out/model_epoch%d.pth' % (epoch))\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.from_numpy", "torch.save", "torch.cuda.is_available", "numpy.zeros", "torch.autograd.Variable" ] ]
Bilal-A-Qureshi/Point-GNN
[ "401eba8f7afaaa811ee1d001f38405629071a42a" ]
[ "models/nms.py" ]
[ "\"\"\"This file defines nms functions to merge boxes\"\"\"\n\nimport time\n\nimport cv2\nimport numpy as np\nfrom shapely.geometry import Polygon\n\ndef boxes_3d_to_corners(boxes_3d):\n all_corners = []\n for box_3d in boxes_3d:\n x3d, y3d, z3d, l, h, w, yaw = box_3d\n R = np.array([[np.cos(yaw), 0, np.sin(yaw)],\n [0, 1, 0 ],\n [-np.sin(yaw), 0, np.cos(yaw)]]);\n corners = np.array([[ l/2, 0.0, w/2], # front up right\n [ l/2, 0.0, -w/2], # front up left\n [-l/2, 0.0, -w/2], # back up left\n [-l/2, 0.0, w/2], # back up right\n [ l/2, -h, w/2], # front down right\n [ l/2, -h, -w/2], # front down left\n [-l/2, -h, -w/2], # back down left\n [-l/2, -h, w/2]]) # back down right\n r_corners = corners.dot(np.transpose(R))\n cam_points_xyz = r_corners+np.array([x3d, y3d, z3d])\n all_corners.append(cam_points_xyz)\n return np.array(all_corners)\n\ndef overlapped_boxes_3d(single_box, box_list):\n x0_max, y0_max, z0_max = np.max(single_box, axis=0)\n x0_min, y0_min, z0_min = np.min(single_box, axis=0)\n overlap = np.zeros(len(box_list))\n for i, box in enumerate(box_list):\n x_max, y_max, z_max = np.max(box, axis=0)\n x_min, y_min, z_min = np.min(box, axis=0)\n if x0_max < x_min or x0_min > x_max:\n overlap[i] = 0\n continue\n if y0_max < y_min or y0_min > y_max:\n overlap[i] = 0\n continue\n if z0_max < z_min or z0_min > z_max:\n overlap[i] = 0\n continue\n x_draw_min = min(x0_min, x_min)\n x_draw_max = max(x0_max, x_max)\n z_draw_min = min(z0_min, z_min)\n z_draw_max = max(z0_max, z_max)\n offset = np.array([x_draw_min, z_draw_min])\n buf1 = np.zeros((z_draw_max-z_draw_min, x_draw_max-x_draw_min),\n dtype=np.int32)\n buf2 = np.zeros_like(buf1)\n cv2.fillPoly(buf1, [single_box[:4, [0,2]]-offset], color=1)\n cv2.fillPoly(buf2, [box[:4, [0,2]]-offset], color=1)\n shared_area = cv2.countNonZero(buf1*buf2)\n area1 = cv2.countNonZero(buf1)\n area2 = cv2.countNonZero(buf2)\n shared_y = min(y_max, y0_max) - max(y_min, y0_min)\n intersection = shared_y * shared_area\n union = (y_max-y_min) * area2 + (y0_max-y0_min) * area1\n overlap[i] = np.float32(intersection) / (union - intersection)\n return overlap\n\ndef overlapped_boxes_3d_fast_poly(single_box, box_list):\n single_box_max_corner = np.max(single_box, axis=0)\n single_box_min_corner = np.min(single_box, axis=0)\n x0_max, y0_max, z0_max = single_box_max_corner\n x0_min, y0_min, z0_min = single_box_min_corner\n max_corner = np.max(box_list, axis=1)\n min_corner = np.min(box_list, axis=1)\n overlap = np.zeros(len(box_list))\n non_overlap_mask = np.logical_or(single_box_max_corner < min_corner,\n single_box_min_corner > max_corner)\n non_overlap_mask = np.any(non_overlap_mask, axis=1)\n p1 = Polygon(single_box[:4, [0,2]])\n area1 = p1.area\n for i in range(len(box_list)):\n if not non_overlap_mask[i]:\n x_max, y_max, z_max = max_corner[i]\n x_min, y_min, z_min = min_corner[i]\n p2 = Polygon(box_list[i][:4, [0,2]])\n shared_area = p1.intersection(p2).area\n area2 = p2.area\n shared_y = min(y_max, y0_max) - max(y_min, y0_min)\n intersection = shared_y * shared_area\n union = (y_max-y_min) * area2 + (y0_max-y0_min) * area1\n overlap[i] = np.float32(intersection) / (union - intersection)\n return overlap\n\ndef bboxes_sort(classes, scores, bboxes, top_k=400, attributes=None):\n \"\"\"Sort bounding boxes by decreasing order and keep only the top_k\n \"\"\"\n idxes = np.argsort(-scores)\n classes = classes[idxes]\n scores = scores[idxes]\n bboxes = bboxes[idxes]\n if attributes is not None:\n attributes = attributes[idxes]\n if top_k > 0:\n if len(idxes) > top_k:\n classes = classes[:top_k]\n scores = scores[:top_k]\n bboxes = bboxes[:top_k]\n if attributes is not None:\n attributes = attributes[:top_k]\n return classes, scores, bboxes, attributes\n\ndef bboxes_nms(classes, scores, bboxes, nms_threshold=0.45,\n overlapped_fn=overlapped_boxes_3d, appr_factor=10.0, attributes=None):\n \"\"\"Apply non-maximum selection to bounding boxes.\n \"\"\"\n boxes_corners = boxes_3d_to_corners(bboxes)\n # convert to pixels\n boxes_corners = np.int32(boxes_corners*appr_factor)\n keep_bboxes = np.ones(scores.shape, dtype=np.bool)\n for i in range(scores.size-1):\n if keep_bboxes[i]:\n # Computer overlap with bboxes which are following.\n overlap = overlapped_fn(boxes_corners[i], boxes_corners[(i+1):])\n # Overlap threshold for keeping + checking part of the same class\n keep_overlap = np.logical_or(\n overlap <= nms_threshold, classes[(i+1):] != classes[i])\n keep_bboxes[(i+1):] = np.logical_and(\n keep_bboxes[(i+1):], keep_overlap)##\n idxes = np.where(keep_bboxes)\n classes = classes[idxes]\n scores = scores[idxes]\n bboxes = bboxes[idxes]\n if attributes is not None:\n attributes = attributes[idxes]\n return classes, scores, bboxes, attributes\n\ndef bboxes_nms_uncertainty(classes, scores, bboxes, scores_threshold=0.25,\n nms_threshold=0.45, overlapped_fn=overlapped_boxes_3d, appr_factor=10.0,\n attributes=None):\n \"\"\"Apply non-maximum selection to bounding boxes.\n \"\"\"\n boxes_corners = boxes_3d_to_corners(bboxes)\n # boxes_corners = bboxes\n # convert to pixels\n # boxes_corners = np.int32(boxes_corners*appr_factor)\n keep_bboxes = np.ones(scores.shape, dtype=np.bool)\n for i in range(scores.size-1):\n if keep_bboxes[i]:\n # Only compute on the rest of bboxes\n valid = keep_bboxes[(i+1):]\n # Computer overlap with bboxes which are following.\n overlap = overlapped_fn(\n boxes_corners[i], boxes_corners[(i+1):][valid])\n # Overlap threshold for keeping + checking part of the same class\n remove_overlap = np.logical_and(\n overlap > nms_threshold, classes[(i+1):][valid] == classes[i])\n overlaped_bboxes = np.concatenate(\n [bboxes[(i+1):][valid][remove_overlap], bboxes[[i]]], axis=0)\n boxes_mean = np.median(overlaped_bboxes, axis=0)\n bboxes[i][:] = boxes_mean[:]\n boxes_corners_mean = boxes_3d_to_corners(\n np.expand_dims(boxes_mean, axis=0))\n boxes_mean_overlap = overlapped_fn(boxes_corners_mean[0],\n boxes_corners[(i+1):][valid][remove_overlap])\n scores[i] += np.sum(\n scores[(i+1):][valid][remove_overlap]*boxes_mean_overlap)\n keep_bboxes[(i+1):][valid] = np.logical_not(remove_overlap)##\n idxes = np.where(keep_bboxes)\n classes = classes[idxes]\n scores = scores[idxes]\n bboxes = bboxes[idxes]\n if attributes is not None:\n attributes = attributes[idxes]\n return classes, scores, bboxes, attributes\n\ndef bboxes_nms_merge_only(classes, scores, bboxes, scores_threshold=0.25,\n nms_threshold=0.45, overlapped_fn=overlapped_boxes_3d, appr_factor=10.0,\n attributes=None):\n \"\"\"Apply non-maximum selection to bounding boxes.\n \"\"\"\n boxes_corners = boxes_3d_to_corners(bboxes)\n # convert to pixels\n keep_bboxes = np.ones(scores.shape, dtype=np.bool)\n for i in range(scores.size-1):\n if keep_bboxes[i]:\n # Only compute on the rest of bboxes\n valid = keep_bboxes[(i+1):]\n # Computer overlap with bboxes which are following.\n overlap = overlapped_fn(boxes_corners[i],\n boxes_corners[(i+1):][valid])\n # Overlap threshold for keeping + checking part of the same class\n remove_overlap = np.logical_and(overlap > nms_threshold,\n classes[(i+1):][valid] == classes[i])\n overlaped_bboxes = np.concatenate(\n [bboxes[(i+1):][valid][remove_overlap], bboxes[[i]]], axis=0)\n boxes_mean = np.median(overlaped_bboxes, axis=0)\n bboxes[i][:] = boxes_mean[:]\n keep_bboxes[(i+1):][valid] = np.logical_not(remove_overlap)##\n\n idxes = np.where(keep_bboxes)\n classes = classes[idxes]\n scores = scores[idxes]\n bboxes = bboxes[idxes]\n if attributes is not None:\n attributes = attributes[idxes]\n return classes, scores, bboxes, attributes\n\ndef bboxes_nms_score_only(classes, scores, bboxes, scores_threshold=0.25,\n nms_threshold=0.45, overlapped_fn=overlapped_boxes_3d, appr_factor=10.0,\n attributes=None):\n \"\"\"Apply non-maximum selection to bounding boxes.\n \"\"\"\n boxes_corners = boxes_3d_to_corners(bboxes)\n # convert to pixels\n keep_bboxes = np.ones(scores.shape, dtype=np.bool)\n for i in range(scores.size-1):\n if keep_bboxes[i]:\n # Only compute on the rest of bboxes\n valid = keep_bboxes[(i+1):]\n # Computer overlap with bboxes which are following.\n overlap = overlapped_fn(boxes_corners[i],\n boxes_corners[(i+1):][valid])\n # Overlap threshold for keeping + checking part of the same class\n remove_overlap = np.logical_and(overlap > nms_threshold,\n classes[(i+1):][valid] == classes[i])\n overlaped_bboxes = np.concatenate(\n [bboxes[(i+1):][valid][remove_overlap], bboxes[[i]]], axis=0)\n boxes_mean = bboxes[i][:]\n bboxes[i][:] = boxes_mean[:]\n boxes_corners_mean = boxes_3d_to_corners(\n np.expand_dims(boxes_mean, axis=0))\n boxes_mean_overlap = overlapped_fn(boxes_corners_mean[0],\n boxes_corners[(i+1):][valid][remove_overlap])\n scores[i] += np.sum(\n scores[(i+1):][valid][remove_overlap]*boxes_mean_overlap)\n keep_bboxes[(i+1):][valid] = np.logical_not(remove_overlap)##\n idxes = np.where(keep_bboxes)\n classes = classes[idxes]\n scores = scores[idxes]\n bboxes = bboxes[idxes]\n if attributes is not None:\n attributes = attributes[idxes]\n return classes, scores, bboxes, attributes\n\ndef nms_boxes_3d(class_labels, detection_boxes_3d, detection_scores,\n overlapped_thres=0.5, overlapped_fn=overlapped_boxes_3d, appr_factor=10.0,\n top_k=-1, attributes=None):\n class_labels, detection_scores, detection_boxes_3d, attributes = \\\n bboxes_sort(\n class_labels, detection_scores, detection_boxes_3d, top_k=top_k,\n attributes=attributes)\n # nms\n class_labels, detection_scores, detection_boxes_3d, attributes = \\\n bboxes_nms(\n class_labels, detection_scores, detection_boxes_3d,\n nms_threshold=overlapped_thres, overlapped_fn=overlapped_fn,\n appr_factor=appr_factor, attributes=attributes)\n return class_labels, detection_boxes_3d, detection_scores, attributes\n\ndef nms_boxes_3d_uncertainty(class_labels, detection_boxes_3d, detection_scores,\n overlapped_thres=0.5, overlapped_fn=overlapped_boxes_3d, appr_factor=10.0,\n top_k=-1, attributes=None):\n\n class_labels, detection_scores, detection_boxes_3d, attributes = \\\n bboxes_sort(\n class_labels, detection_scores, detection_boxes_3d, top_k=top_k,\n attributes=attributes)\n # nms\n class_labels, detection_scores, detection_boxes_3d, attributes = \\\n bboxes_nms_uncertainty(\n class_labels, detection_scores, detection_boxes_3d,\n nms_threshold=overlapped_thres, overlapped_fn=overlapped_fn,\n appr_factor=appr_factor, attributes=attributes)\n return class_labels, detection_boxes_3d, detection_scores, attributes\n\ndef nms_boxes_3d_merge_only(class_labels, detection_boxes_3d, detection_scores,\n overlapped_thres=0.5, overlapped_fn=overlapped_boxes_3d, appr_factor=10.0,\n top_k=-1, attributes=None):\n class_labels, detection_scores, detection_boxes_3d, attributes = \\\n bboxes_sort(\n class_labels, detection_scores, detection_boxes_3d, top_k=top_k,\n attributes=attributes)\n # nms\n class_labels, detection_scores, detection_boxes_3d, attributes = \\\n bboxes_nms_merge_only(\n class_labels, detection_scores, detection_boxes_3d,\n nms_threshold=overlapped_thres, overlapped_fn=overlapped_fn,\n appr_factor=appr_factor, attributes=attributes)\n return class_labels, detection_boxes_3d, detection_scores, attributes\n\ndef nms_boxes_3d_score_only(class_labels, detection_boxes_3d, detection_scores,\n overlapped_thres=0.5, overlapped_fn=overlapped_boxes_3d, appr_factor=10.0,\n top_k=-1, attributes=None):\n class_labels, detection_scores, detection_boxes_3d, attributes = \\\n bboxes_sort(\n class_labels, detection_scores, detection_boxes_3d, top_k=top_k,\n attributes=attributes)\n # nms\n class_labels, detection_scores, detection_boxes_3d, attributes = \\\n bboxes_nms_score_only(\n class_labels, detection_scores, detection_boxes_3d,\n nms_threshold=overlapped_thres, overlapped_fn=overlapped_fn,\n appr_factor=appr_factor, attributes=attributes)\n return class_labels, detection_boxes_3d, detection_scores, attributes\n" ]
[ [ "numpy.expand_dims", "numpy.concatenate", "numpy.max", "numpy.zeros_like", "numpy.any", "numpy.where", "numpy.sin", "numpy.float32", "numpy.zeros", "numpy.logical_not", "numpy.min", "numpy.median", "numpy.logical_or", "numpy.transpose", "numpy.argsort", "numpy.logical_and", "numpy.array", "numpy.sum", "numpy.int32", "numpy.cos", "numpy.ones" ] ]
anaruse/dask
[ "72304a94c98ace592f01df91e3d9e89febda307c" ]
[ "dask/dataframe/io/tests/test_parquet.py" ]
[ "import math\nimport glob\nimport os\nimport sys\nimport warnings\nfrom decimal import Decimal\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask\nimport dask.multiprocessing\nimport dask.dataframe as dd\nfrom dask.dataframe.utils import assert_eq\nfrom dask.dataframe.io.parquet.utils import _parse_pandas_metadata\nfrom dask.dataframe.optimize import optimize_read_parquet_getitem\nfrom dask.dataframe.io.parquet.core import ParquetSubgraph\nfrom dask.utils import natural_sort_key, parse_bytes\n\n\ntry:\n import fastparquet\nexcept ImportError:\n fastparquet = False\n\n\ntry:\n import pyarrow as pa\nexcept ImportError:\n check_pa_divs = pa = False\n\ntry:\n import pyarrow.parquet as pq\nexcept ImportError:\n pq = False\n\n\nSKIP_FASTPARQUET = not fastparquet\nSKIP_FASTPARQUET_REASON = \"fastparquet not found\"\nFASTPARQUET_MARK = pytest.mark.skipif(SKIP_FASTPARQUET, reason=SKIP_FASTPARQUET_REASON)\n\nif pq and pa.__version__ < LooseVersion(\"0.13.1\"):\n SKIP_PYARROW = True\n SKIP_PYARROW_REASON = \"pyarrow >= 0.13.1 required for parquet\"\nelse:\n if (\n sys.platform == \"win32\"\n and pa\n and (\n (pa.__version__ == LooseVersion(\"0.16.0\"))\n or (pa.__version__ == LooseVersion(\"2.0.0\"))\n )\n ):\n SKIP_PYARROW = True\n SKIP_PYARROW_REASON = (\n \"skipping pyarrow 0.16.0 and 2.0.0 on windows: \"\n \"https://github.com/dask/dask/issues/6093\"\n \"|https://github.com/dask/dask/issues/6754\"\n )\n else:\n SKIP_PYARROW = not pq\n SKIP_PYARROW_REASON = \"pyarrow not found\"\nPYARROW_MARK = pytest.mark.skipif(SKIP_PYARROW, reason=SKIP_PYARROW_REASON)\n\nif pa and pa.__version__ < LooseVersion(\"1.0.0\"):\n SKIP_PYARROW_DS = True\n SKIP_PYARROW_DS_REASON = \"pyarrow >= 1.0.0 required for pyarrow dataset API\"\nelse:\n SKIP_PYARROW_DS = SKIP_PYARROW\n SKIP_PYARROW_DS_REASON = \"pyarrow not found\"\nPYARROW_DS_MARK = pytest.mark.skipif(SKIP_PYARROW_DS, reason=SKIP_PYARROW_DS_REASON)\n\n\ndef check_fastparquet():\n if SKIP_FASTPARQUET:\n pytest.skip(SKIP_FASTPARQUET_REASON)\n\n\ndef check_pyarrow():\n if SKIP_PYARROW:\n pytest.skip(SKIP_PYARROW_REASON)\n\n\ndef check_engine():\n if SKIP_FASTPARQUET and SKIP_PYARROW:\n pytest.skip(\"No parquet engine (fastparquet or pyarrow) found\")\n\n\nnrows = 40\nnpartitions = 15\ndf = pd.DataFrame(\n {\n \"x\": [i * 7 % 5 for i in range(nrows)], # Not sorted\n \"y\": [i * 2.5 for i in range(nrows)], # Sorted\n },\n index=pd.Index([10 * i for i in range(nrows)], name=\"myindex\"),\n)\n\nddf = dd.from_pandas(df, npartitions=npartitions)\n\n\[email protected](\n params=[\n pytest.param(\"fastparquet\", marks=FASTPARQUET_MARK),\n pytest.param(\"pyarrow-legacy\", marks=PYARROW_MARK),\n pytest.param(\"pyarrow-dataset\", marks=PYARROW_DS_MARK),\n ]\n)\ndef engine(request):\n return request.param\n\n\ndef write_read_engines(**kwargs):\n \"\"\"Product of both engines for write/read:\n\n To add custom marks, pass keyword of the form: `mark_writer_reader=reason`,\n or `mark_engine=reason` to apply to all parameters with that engine.\"\"\"\n backends = {\"pyarrow-dataset\", \"pyarrow-legacy\", \"fastparquet\"}\n marks = {(w, r): [] for w in backends for r in backends}\n\n # Skip if uninstalled\n for name, skip, reason in [\n (\"fastparquet\", SKIP_FASTPARQUET, SKIP_FASTPARQUET_REASON),\n (\"pyarrow-legacy\", SKIP_PYARROW_DS, SKIP_PYARROW_DS_REASON),\n (\"pyarrow-dataset\", SKIP_PYARROW, SKIP_PYARROW_REASON),\n ]:\n if skip:\n val = pytest.mark.skip(reason=reason)\n for k in marks:\n if name in k:\n marks[k].append(val)\n\n # Custom marks\n for kw, val in kwargs.items():\n kind, rest = kw.split(\"_\", 1)\n key = tuple(rest.split(\"_\"))\n if (\n kind not in (\"xfail\", \"skip\")\n or len(key) > 2\n or set(key).difference(backends)\n ):\n raise ValueError(\"unknown keyword %r\" % kw)\n val = getattr(pytest.mark, kind)(reason=val)\n if len(key) == 2:\n marks[key].append(val)\n else:\n for k in marks:\n if key in k:\n marks[k].append(val)\n\n return pytest.mark.parametrize(\n (\"write_engine\", \"read_engine\"),\n [pytest.param(*k, marks=tuple(v)) for (k, v) in sorted(marks.items())],\n )\n\n\npyarrow_fastparquet_msg = \"fastparquet fails reading pyarrow written directories\"\nwrite_read_engines_xfail = write_read_engines(\n **{\n \"xfail_pyarrow-dataset_fastparquet\": pyarrow_fastparquet_msg,\n \"xfail_pyarrow-legacy_fastparquet\": pyarrow_fastparquet_msg,\n }\n)\n\nfp_pandas_msg = \"pandas with fastparquet engine does not preserve index\"\nfp_pandas_xfail = write_read_engines(\n **{\n \"xfail_fastparquet_pyarrow-dataset\": fp_pandas_msg,\n \"xfail_fastparquet_pyarrow-legacy\": fp_pandas_msg,\n }\n)\n\n\n@write_read_engines()\ndef test_local(tmpdir, write_engine, read_engine):\n tmp = str(tmpdir)\n data = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n df = dd.from_pandas(data, chunksize=500)\n\n df.to_parquet(tmp, write_index=False, engine=write_engine)\n\n files = os.listdir(tmp)\n assert \"_common_metadata\" in files\n assert \"_metadata\" in files\n assert \"part.0.parquet\" in files\n\n df2 = dd.read_parquet(tmp, index=False, engine=read_engine)\n\n assert len(df2.divisions) > 1\n\n out = df2.compute(scheduler=\"sync\").reset_index()\n\n for column in df.columns:\n assert (data[column] == out[column]).all()\n\n\[email protected](\"index\", [False, True])\n@write_read_engines_xfail\ndef test_empty(tmpdir, write_engine, read_engine, index):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [\"a\", \"b\", \"b\"], \"b\": [4, 5, 6]})[:0]\n if index:\n df.set_index(\"a\", inplace=True, drop=True)\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf.to_parquet(fn, write_index=index, engine=write_engine)\n read_df = dd.read_parquet(fn, engine=read_engine)\n assert_eq(ddf, read_df)\n\n\n@write_read_engines()\ndef test_simple(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [\"a\", \"b\", \"b\"], \"b\": [4, 5, 6]})\n df.set_index(\"a\", inplace=True, drop=True)\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=write_engine)\n read_df = dd.read_parquet(fn, index=[\"a\"], engine=read_engine)\n assert_eq(ddf, read_df)\n\n\n@write_read_engines()\ndef test_delayed_no_metadata(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [\"a\", \"b\", \"b\"], \"b\": [4, 5, 6]})\n df.set_index(\"a\", inplace=True, drop=True)\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(\n fn, engine=write_engine, compute=False, write_metadata_file=False\n ).compute()\n files = os.listdir(fn)\n assert \"_metadata\" not in files\n # Fastparquet doesn't currently handle a directory without \"_metadata\"\n read_df = dd.read_parquet(\n os.path.join(fn, \"*.parquet\"),\n index=[\"a\"],\n engine=read_engine,\n gather_statistics=True,\n )\n assert_eq(ddf, read_df)\n\n\n@write_read_engines()\ndef test_read_glob(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n if os.path.exists(os.path.join(tmp_path, \"_metadata\")):\n os.unlink(os.path.join(tmp_path, \"_metadata\"))\n files = os.listdir(tmp_path)\n assert \"_metadata\" not in files\n\n ddf2 = dd.read_parquet(\n os.path.join(tmp_path, \"*.parquet\"),\n engine=read_engine,\n index=\"myindex\", # Must specify index without _metadata\n gather_statistics=True,\n )\n assert_eq(ddf, ddf2)\n\n\n@write_read_engines()\ndef test_read_list(tmpdir, write_engine, read_engine):\n if write_engine == read_engine == \"fastparquet\" and os.name == \"nt\":\n # fastparquet or dask is not normalizing filepaths correctly on\n # windows.\n pytest.skip(\"filepath bug.\")\n\n tmpdir = str(tmpdir)\n ddf.to_parquet(tmpdir, engine=write_engine)\n files = sorted(\n [\n os.path.join(tmpdir, f)\n for f in os.listdir(tmpdir)\n if not f.endswith(\"_metadata\")\n ],\n key=natural_sort_key,\n )\n\n ddf2 = dd.read_parquet(\n files, engine=read_engine, index=\"myindex\", gather_statistics=True\n )\n assert_eq(ddf, ddf2)\n\n\n@write_read_engines()\ndef test_columns_auto_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine)\n\n # XFAIL, auto index selection not longer supported (for simplicity)\n # ### Emtpy columns ###\n # With divisions if supported\n assert_eq(dd.read_parquet(fn, columns=[], engine=read_engine), ddf[[]])\n\n # No divisions\n assert_eq(\n dd.read_parquet(fn, columns=[], engine=read_engine, gather_statistics=False),\n ddf[[]].clear_divisions(),\n check_divisions=True,\n )\n\n # ### Single column, auto select index ###\n # With divisions if supported\n assert_eq(dd.read_parquet(fn, columns=[\"x\"], engine=read_engine), ddf[[\"x\"]])\n\n # No divisions\n assert_eq(\n dd.read_parquet(fn, columns=[\"x\"], engine=read_engine, gather_statistics=False),\n ddf[[\"x\"]].clear_divisions(),\n check_divisions=True,\n )\n\n\n@write_read_engines()\ndef test_columns_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine)\n\n # With Index\n # ----------\n # ### Emtpy columns, specify index ###\n # With divisions if supported\n assert_eq(\n dd.read_parquet(fn, columns=[], engine=read_engine, index=\"myindex\"), ddf[[]]\n )\n\n # No divisions\n assert_eq(\n dd.read_parquet(\n fn, columns=[], engine=read_engine, index=\"myindex\", gather_statistics=False\n ),\n ddf[[]].clear_divisions(),\n check_divisions=True,\n )\n\n # ### Single column, specify index ###\n # With divisions if supported\n assert_eq(\n dd.read_parquet(fn, index=\"myindex\", columns=[\"x\"], engine=read_engine),\n ddf[[\"x\"]],\n )\n\n # No divisions\n assert_eq(\n dd.read_parquet(\n fn,\n index=\"myindex\",\n columns=[\"x\"],\n engine=read_engine,\n gather_statistics=False,\n ),\n ddf[[\"x\"]].clear_divisions(),\n check_divisions=True,\n )\n\n # ### Two columns, specify index ###\n # With divisions if supported\n assert_eq(\n dd.read_parquet(fn, index=\"myindex\", columns=[\"x\", \"y\"], engine=read_engine),\n ddf,\n )\n\n # No divisions\n assert_eq(\n dd.read_parquet(\n fn,\n index=\"myindex\",\n columns=[\"x\", \"y\"],\n engine=read_engine,\n gather_statistics=False,\n ),\n ddf.clear_divisions(),\n check_divisions=True,\n )\n\n\ndef test_nonsense_column(tmpdir, engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=engine)\n with pytest.raises((ValueError, KeyError)):\n dd.read_parquet(fn, columns=[\"nonesense\"], engine=engine)\n with pytest.raises((Exception, KeyError)):\n dd.read_parquet(fn, columns=[\"nonesense\"] + list(ddf.columns), engine=engine)\n\n\n@write_read_engines()\ndef test_columns_no_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine)\n ddf2 = ddf.reset_index()\n\n # No Index\n # --------\n # All columns, none as index\n assert_eq(\n dd.read_parquet(fn, index=False, engine=read_engine, gather_statistics=True),\n ddf2,\n check_index=False,\n check_divisions=True,\n )\n\n # Two columns, none as index\n assert_eq(\n dd.read_parquet(\n fn,\n index=False,\n columns=[\"x\", \"y\"],\n engine=read_engine,\n gather_statistics=True,\n ),\n ddf2[[\"x\", \"y\"]],\n check_index=False,\n check_divisions=True,\n )\n\n # One column and one index, all as columns\n assert_eq(\n dd.read_parquet(\n fn,\n index=False,\n columns=[\"myindex\", \"x\"],\n engine=read_engine,\n gather_statistics=True,\n ),\n ddf2[[\"myindex\", \"x\"]],\n check_index=False,\n check_divisions=True,\n )\n\n\n@write_read_engines()\ndef test_gather_statistics_no_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine, write_index=False)\n\n df = dd.read_parquet(fn, engine=read_engine, index=False)\n assert df.index.name is None\n assert not df.known_divisions\n\n\ndef test_columns_index_with_multi_index(tmpdir, engine):\n fn = os.path.join(str(tmpdir), \"test.parquet\")\n index = pd.MultiIndex.from_arrays(\n [np.arange(10), np.arange(10) + 1], names=[\"x0\", \"x1\"]\n )\n df = pd.DataFrame(np.random.randn(10, 2), columns=[\"a\", \"b\"], index=index)\n df2 = df.reset_index(drop=False)\n\n if engine == \"fastparquet\":\n fastparquet.write(fn, df.reset_index(), write_index=False)\n\n # fastparquet doesn't support multi-index\n with pytest.raises(ValueError):\n ddf = dd.read_parquet(fn, engine=engine, index=index.names)\n\n else:\n pq.write_table(pa.Table.from_pandas(df.reset_index(), preserve_index=False), fn)\n\n # Pyarrow supports multi-index reads\n ddf = dd.read_parquet(fn, engine=engine, index=index.names)\n assert_eq(ddf, df)\n\n d = dd.read_parquet(fn, columns=\"a\", engine=engine, index=index.names)\n assert_eq(d, df[\"a\"])\n\n d = dd.read_parquet(fn, index=[\"a\", \"b\"], columns=[\"x0\", \"x1\"], engine=engine)\n assert_eq(d, df2.set_index([\"a\", \"b\"])[[\"x0\", \"x1\"]])\n\n # Just index\n d = dd.read_parquet(fn, index=False, engine=engine)\n assert_eq(d, df2)\n\n d = dd.read_parquet(fn, columns=[\"b\"], index=[\"a\"], engine=engine)\n assert_eq(d, df2.set_index(\"a\")[[\"b\"]])\n\n d = dd.read_parquet(fn, columns=[\"a\", \"b\"], index=[\"x0\"], engine=engine)\n assert_eq(d, df2.set_index(\"x0\")[[\"a\", \"b\"]])\n\n # Just columns\n d = dd.read_parquet(fn, columns=[\"x0\", \"a\"], index=[\"x1\"], engine=engine)\n assert_eq(d, df2.set_index(\"x1\")[[\"x0\", \"a\"]])\n\n # Both index and columns\n d = dd.read_parquet(fn, index=False, columns=[\"x0\", \"b\"], engine=engine)\n assert_eq(d, df2[[\"x0\", \"b\"]])\n\n for index in [\"x1\", \"b\"]:\n d = dd.read_parquet(fn, index=index, columns=[\"x0\", \"a\"], engine=engine)\n assert_eq(d, df2.set_index(index)[[\"x0\", \"a\"]])\n\n # Columns and index intersect\n for index in [\"a\", \"x0\"]:\n with pytest.raises(ValueError):\n d = dd.read_parquet(fn, index=index, columns=[\"x0\", \"a\"], engine=engine)\n\n # Series output\n for ind, col, sol_df in [\n (\"x1\", \"x0\", df2.set_index(\"x1\")),\n (False, \"b\", df2),\n (False, \"x0\", df2[[\"x0\"]]),\n (\"a\", \"x0\", df2.set_index(\"a\")[[\"x0\"]]),\n (\"a\", \"b\", df2.set_index(\"a\")),\n ]:\n d = dd.read_parquet(fn, index=ind, columns=col, engine=engine)\n assert_eq(d, sol_df[col])\n\n\n@write_read_engines()\ndef test_no_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=write_engine)\n ddf2 = dd.read_parquet(fn, engine=read_engine)\n assert_eq(df, ddf2, check_index=False)\n\n\ndef test_read_series(tmpdir, engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, columns=[\"x\"], index=\"myindex\", engine=engine)\n assert_eq(ddf[[\"x\"]], ddf2)\n\n ddf2 = dd.read_parquet(fn, columns=\"x\", index=\"myindex\", engine=engine)\n assert_eq(ddf.x, ddf2)\n\n\ndef test_names(tmpdir, engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=engine)\n\n def read(fn, **kwargs):\n return dd.read_parquet(fn, engine=engine, **kwargs)\n\n assert set(read(fn).dask) == set(read(fn).dask)\n\n assert set(read(fn).dask) != set(read(fn, columns=[\"x\"]).dask)\n\n assert set(read(fn, columns=(\"x\",)).dask) == set(read(fn, columns=[\"x\"]).dask)\n\n\n@write_read_engines()\ndef test_roundtrip_from_pandas(tmpdir, write_engine, read_engine):\n fn = str(tmpdir.join(\"test.parquet\"))\n dfp = df.copy()\n dfp.index.name = \"index\"\n dfp.to_parquet(\n fn, engine=\"pyarrow\" if write_engine.startswith(\"pyarrow\") else \"fastparquet\"\n )\n ddf = dd.read_parquet(fn, index=\"index\", engine=read_engine)\n assert_eq(dfp, ddf)\n\n\n@write_read_engines()\ndef test_categorical(tmpdir, write_engine, read_engine):\n tmp = str(tmpdir)\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"] * 100}, dtype=\"category\")\n ddf = dd.from_pandas(df, npartitions=3)\n dd.to_parquet(ddf, tmp, engine=write_engine)\n\n ddf2 = dd.read_parquet(tmp, categories=\"x\", engine=read_engine)\n assert ddf2.compute().x.cat.categories.tolist() == [\"a\", \"b\", \"c\"]\n\n ddf2 = dd.read_parquet(tmp, categories=[\"x\"], engine=read_engine)\n assert ddf2.compute().x.cat.categories.tolist() == [\"a\", \"b\", \"c\"]\n\n # autocat\n if read_engine == \"fastparquet\":\n ddf2 = dd.read_parquet(tmp, engine=read_engine)\n assert ddf2.compute().x.cat.categories.tolist() == [\"a\", \"b\", \"c\"]\n\n ddf2.loc[:1000].compute()\n assert assert_eq(df, ddf2)\n\n # dereference cats\n ddf2 = dd.read_parquet(tmp, categories=[], engine=read_engine)\n\n ddf2.loc[:1000].compute()\n assert (df.x == ddf2.x).all()\n\n\ndef test_append(tmpdir, engine):\n \"\"\"Test that appended parquet equal to the original one.\"\"\"\n check_fastparquet()\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n df.index.name = \"index\"\n\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)\n ddf1.to_parquet(tmp, engine=engine)\n ddf2.to_parquet(tmp, append=True, engine=engine)\n\n ddf3 = dd.read_parquet(tmp, engine=engine)\n assert_eq(df, ddf3)\n\n\ndef test_append_create(tmpdir, engine):\n \"\"\"Test that appended parquet equal to the original one.\"\"\"\n tmp_path = str(tmpdir)\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n df.index.name = \"index\"\n\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)\n ddf1.to_parquet(tmp_path, append=True, engine=engine)\n ddf2.to_parquet(tmp_path, append=True, engine=engine)\n\n ddf3 = dd.read_parquet(tmp_path, engine=engine)\n assert_eq(df, ddf3)\n\n\ndef test_append_with_partition(tmpdir, engine):\n # check_fastparquet()\n tmp = str(tmpdir)\n df0 = pd.DataFrame(\n {\n \"lat\": np.arange(0, 10),\n \"lon\": np.arange(10, 20),\n \"value\": np.arange(100, 110),\n }\n )\n df0.index.name = \"index\"\n df1 = pd.DataFrame(\n {\n \"lat\": np.arange(10, 20),\n \"lon\": np.arange(10, 20),\n \"value\": np.arange(120, 130),\n }\n )\n df1.index.name = \"index\"\n dd_df0 = dd.from_pandas(df0, npartitions=1)\n dd_df1 = dd.from_pandas(df1, npartitions=1)\n dd.to_parquet(dd_df0, tmp, partition_on=[\"lon\"], engine=engine)\n dd.to_parquet(\n dd_df1,\n tmp,\n partition_on=[\"lon\"],\n append=True,\n ignore_divisions=True,\n engine=engine,\n )\n\n out = dd.read_parquet(\n tmp, engine=engine, index=\"index\", gather_statistics=True\n ).compute()\n out[\"lon\"] = out.lon.astype(\"int\") # just to pass assert\n # sort required since partitioning breaks index order\n assert_eq(\n out.sort_values(\"value\"), pd.concat([df0, df1])[out.columns], check_index=False\n )\n\n\ndef test_partition_on_cats(tmpdir, engine):\n tmp = str(tmpdir)\n d = pd.DataFrame(\n {\n \"a\": np.random.rand(50),\n \"b\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n \"c\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n }\n )\n d = dd.from_pandas(d, 2)\n d.to_parquet(tmp, partition_on=[\"b\"], engine=engine)\n df = dd.read_parquet(tmp, engine=engine)\n assert set(df.b.cat.categories) == {\"x\", \"y\", \"z\"}\n\n\[email protected](\"meta\", [False, True])\[email protected](\"stats\", [False, True])\ndef test_partition_on_cats_pyarrow(tmpdir, stats, meta):\n check_pyarrow()\n\n tmp = str(tmpdir)\n d = pd.DataFrame(\n {\n \"a\": np.random.rand(50),\n \"b\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n \"c\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n }\n )\n d = dd.from_pandas(d, 2)\n d.to_parquet(tmp, partition_on=[\"b\"], engine=\"pyarrow\", write_metadata_file=meta)\n df = dd.read_parquet(tmp, engine=\"pyarrow\", gather_statistics=stats)\n assert set(df.b.cat.categories) == {\"x\", \"y\", \"z\"}\n\n\ndef test_partition_on_cats_2(tmpdir, engine):\n tmp = str(tmpdir)\n d = pd.DataFrame(\n {\n \"a\": np.random.rand(50),\n \"b\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n \"c\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n }\n )\n d = dd.from_pandas(d, 2)\n d.to_parquet(tmp, partition_on=[\"b\", \"c\"], engine=engine)\n df = dd.read_parquet(tmp, engine=engine)\n assert set(df.b.cat.categories) == {\"x\", \"y\", \"z\"}\n assert set(df.c.cat.categories) == {\"x\", \"y\", \"z\"}\n\n df = dd.read_parquet(tmp, columns=[\"a\", \"c\"], engine=engine)\n assert set(df.c.cat.categories) == {\"x\", \"y\", \"z\"}\n assert \"b\" not in df.columns\n assert_eq(df, df.compute())\n df = dd.read_parquet(tmp, index=\"c\", engine=engine)\n assert set(df.index.categories) == {\"x\", \"y\", \"z\"}\n assert \"c\" not in df.columns\n # series\n df = dd.read_parquet(tmp, columns=\"b\", engine=engine)\n assert set(df.cat.categories) == {\"x\", \"y\", \"z\"}\n\n\ndef test_append_wo_index(tmpdir, engine):\n \"\"\"Test append with write_index=False.\"\"\"\n tmp = str(tmpdir.join(\"tmp1.parquet\"))\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)\n ddf1.to_parquet(tmp, engine=engine)\n\n with pytest.raises(ValueError) as excinfo:\n ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)\n assert \"Appended columns\" in str(excinfo.value)\n\n tmp = str(tmpdir.join(\"tmp2.parquet\"))\n ddf1.to_parquet(tmp, write_index=False, engine=engine)\n ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)\n\n ddf3 = dd.read_parquet(tmp, index=\"f\", engine=engine)\n assert_eq(df.set_index(\"f\"), ddf3)\n\n\ndef test_append_overlapping_divisions(tmpdir, engine):\n \"\"\"Test raising of error when divisions overlapping.\"\"\"\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half - 10 :], chunksize=100)\n ddf1.to_parquet(tmp, engine=engine)\n\n with pytest.raises(ValueError) as excinfo:\n ddf2.to_parquet(tmp, engine=engine, append=True)\n assert \"Appended divisions\" in str(excinfo.value)\n\n ddf2.to_parquet(tmp, engine=engine, append=True, ignore_divisions=True)\n\n\ndef test_append_different_columns(tmpdir, engine):\n \"\"\"Test raising of error when non equal columns.\"\"\"\n tmp = str(tmpdir)\n df1 = pd.DataFrame({\"i32\": np.arange(100, dtype=np.int32)})\n df2 = pd.DataFrame({\"i64\": np.arange(100, dtype=np.int64)})\n df3 = pd.DataFrame({\"i32\": np.arange(100, dtype=np.int64)})\n\n ddf1 = dd.from_pandas(df1, chunksize=2)\n ddf2 = dd.from_pandas(df2, chunksize=2)\n ddf3 = dd.from_pandas(df3, chunksize=2)\n\n ddf1.to_parquet(tmp, engine=engine)\n\n with pytest.raises(ValueError) as excinfo:\n ddf2.to_parquet(tmp, engine=engine, append=True)\n assert \"Appended columns\" in str(excinfo.value)\n\n with pytest.raises(ValueError) as excinfo:\n ddf3.to_parquet(tmp, engine=engine, append=True)\n assert \"Appended dtypes\" in str(excinfo.value)\n\n\n@write_read_engines_xfail\ndef test_ordering(tmpdir, write_engine, read_engine):\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\"a\": [1, 2, 3], \"b\": [10, 20, 30], \"c\": [100, 200, 300]},\n index=pd.Index([-1, -2, -3], name=\"myindex\"),\n columns=[\"c\", \"a\", \"b\"],\n )\n ddf = dd.from_pandas(df, npartitions=2)\n dd.to_parquet(ddf, tmp, engine=write_engine)\n\n if read_engine == \"fastparquet\":\n pf = fastparquet.ParquetFile(tmp)\n assert pf.columns == [\"myindex\", \"c\", \"a\", \"b\"]\n\n ddf2 = dd.read_parquet(tmp, index=\"myindex\", engine=read_engine)\n assert_eq(ddf, ddf2, check_divisions=False)\n\n\ndef test_read_parquet_custom_columns(tmpdir, engine):\n tmp = str(tmpdir)\n data = pd.DataFrame(\n {\"i32\": np.arange(1000, dtype=np.int32), \"f\": np.arange(1000, dtype=np.float64)}\n )\n df = dd.from_pandas(data, chunksize=50)\n df.to_parquet(tmp, engine=engine)\n\n df2 = dd.read_parquet(tmp, columns=[\"i32\", \"f\"], engine=engine)\n assert_eq(df[[\"i32\", \"f\"]], df2, check_index=False)\n\n fns = glob.glob(os.path.join(tmp, \"*.parquet\"))\n df2 = dd.read_parquet(fns, columns=[\"i32\"], engine=engine).compute()\n df2.sort_values(\"i32\", inplace=True)\n assert_eq(df[[\"i32\"]], df2, check_index=False, check_divisions=False)\n\n df3 = dd.read_parquet(tmp, columns=[\"f\", \"i32\"], engine=engine)\n assert_eq(df[[\"f\", \"i32\"]], df3, check_index=False)\n\n\[email protected](\n \"df,write_kwargs,read_kwargs\",\n [\n (pd.DataFrame({\"x\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [\"c\", \"a\", \"b\"]}), {}, {}),\n (pd.DataFrame({\"x\": [\"cc\", \"a\", \"bbb\"]}), {}, {}),\n (pd.DataFrame({\"x\": [b\"a\", b\"b\", b\"c\"]}), {\"object_encoding\": \"bytes\"}, {}),\n (\n pd.DataFrame({\"x\": pd.Categorical([\"a\", \"b\", \"a\"])}),\n {},\n {\"categories\": [\"x\"]},\n ),\n (pd.DataFrame({\"x\": pd.Categorical([1, 2, 1])}), {}, {\"categories\": [\"x\"]}),\n (pd.DataFrame({\"x\": list(map(pd.Timestamp, [3000, 2000, 1000]))}), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"M8[ns]\"), {}, {}),\n pytest.param(\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ns]\"),\n {},\n {},\n marks=pytest.mark.xfail(\n reason=\"Parquet doesn't support nanosecond precision\"\n ),\n ),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[us]\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ms]\"), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"datetime64[ns]\"), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"datetime64[ns, UTC]\"), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"datetime64[ns, CET]\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"uint16\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"float32\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 1, 2]}, index=[3, 2, 1]), {}, {}),\n (pd.DataFrame({\"x\": [3, 1, 5]}, index=pd.Index([1, 2, 3], name=\"foo\")), {}, {}),\n (pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}, columns=[\"y\", \"x\"]), {}, {}),\n (pd.DataFrame({\"0\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, None]}), {}, {}),\n (pd.DataFrame({\"-\": [3.0, 2.0, None]}), {}, {}),\n (pd.DataFrame({\".\": [3.0, 2.0, None]}), {}, {}),\n (pd.DataFrame({\" \": [3.0, 2.0, None]}), {}, {}),\n ],\n)\ndef test_roundtrip(tmpdir, df, write_kwargs, read_kwargs, engine):\n tmp = str(tmpdir)\n if df.index.name is None:\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n\n oe = write_kwargs.pop(\"object_encoding\", None)\n if oe and engine == \"fastparquet\":\n dd.to_parquet(ddf, tmp, engine=engine, object_encoding=oe, **write_kwargs)\n else:\n dd.to_parquet(ddf, tmp, engine=engine, **write_kwargs)\n ddf2 = dd.read_parquet(tmp, index=df.index.name, engine=engine, **read_kwargs)\n assert_eq(ddf, ddf2)\n\n\ndef test_categories(tmpdir, engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5], \"y\": list(\"caaab\")})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf[\"y\"] = ddf.y.astype(\"category\")\n ddf.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, categories=[\"y\"], engine=engine)\n\n # Shouldn't need to specify categories explicitly\n ddf3 = dd.read_parquet(fn, engine=engine)\n assert_eq(ddf3, ddf2)\n\n with pytest.raises(NotImplementedError):\n ddf2.y.cat.categories\n assert set(ddf2.y.compute().cat.categories) == {\"a\", \"b\", \"c\"}\n cats_set = ddf2.map_partitions(lambda x: x.y.cat.categories.sort_values()).compute()\n assert cats_set.tolist() == [\"a\", \"c\", \"a\", \"b\"]\n\n if engine == \"fastparquet\":\n assert_eq(ddf.y, ddf2.y, check_names=False)\n with pytest.raises(TypeError):\n # attempt to load as category that which is not so encoded\n ddf2 = dd.read_parquet(fn, categories=[\"x\"], engine=engine).compute()\n\n with pytest.raises((ValueError, FutureWarning)):\n # attempt to load as category unknown column\n ddf2 = dd.read_parquet(fn, categories=[\"foo\"], engine=engine)\n\n\ndef test_categories_unnamed_index(tmpdir, engine):\n # Check that we can handle an unnamed categorical index\n # https://github.com/dask/dask/issues/6885\n\n if engine.startswith(\"pyarrow\") and pa.__version__ < LooseVersion(\"0.15.0\"):\n pytest.skip(\"PyArrow>=0.15 Required.\")\n\n tmpdir = str(tmpdir)\n\n df = pd.DataFrame(\n data={\"A\": [1, 2, 3], \"B\": [\"a\", \"a\", \"b\"]}, index=[\"x\", \"y\", \"y\"]\n )\n ddf = dd.from_pandas(df, npartitions=1)\n ddf = ddf.categorize(columns=[\"B\"])\n\n ddf.to_parquet(tmpdir, engine=engine)\n ddf2 = dd.read_parquet(tmpdir, engine=engine)\n\n assert_eq(ddf.index, ddf2.index, check_divisions=False)\n\n\ndef test_empty_partition(tmpdir, engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": range(10), \"b\": range(10)})\n ddf = dd.from_pandas(df, npartitions=5)\n\n ddf2 = ddf[ddf.a <= 5]\n ddf2.to_parquet(fn, engine=engine)\n\n ddf3 = dd.read_parquet(fn, engine=engine)\n assert ddf3.npartitions < 5\n sol = ddf2.compute()\n assert_eq(sol, ddf3, check_names=False, check_index=False)\n\n\ndef test_timestamp_index(tmpdir, engine):\n fn = str(tmpdir)\n df = dd._compat.makeTimeDataFrame()\n df.index.name = \"foo\"\n ddf = dd.from_pandas(df, npartitions=5)\n ddf.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, engine=engine)\n assert_eq(ddf, ddf2)\n\n\ndef test_to_parquet_default_writes_nulls(tmpdir):\n check_fastparquet()\n check_pyarrow()\n fn = str(tmpdir.join(\"test.parquet\"))\n\n df = pd.DataFrame({\"c1\": [1.0, np.nan, 2, np.nan, 3]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n ddf.to_parquet(fn)\n table = pq.read_table(fn)\n assert table[1].null_count == 2\n\n\ndef test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default(tmpdir):\n check_pyarrow()\n\n df = pd.DataFrame(\n {\"partition_column\": [0, 0, 1, 1], \"strings\": [\"a\", \"b\", None, None]}\n )\n\n ddf = dd.from_pandas(df, npartitions=2)\n # In order to allow pyarrow to write an inconsistent schema,\n # we need to avoid writing the _metadata file (will fail >0.17.1)\n # and need to avoid schema inference (i.e. use `schema=None`)\n ddf.to_parquet(\n str(tmpdir),\n engine=\"pyarrow\",\n partition_on=[\"partition_column\"],\n write_metadata_file=False,\n schema=None,\n )\n\n # Test that schema is not validated by default\n # (shouldn't raise error with legacy dataset)\n dd.read_parquet(\n str(tmpdir),\n engine=\"pyarrow-legacy\",\n gather_statistics=False,\n ).compute()\n\n # Test that read fails when validate_schema=True\n # Note: This fails differently for pyarrow.dataset api\n with pytest.raises(ValueError) as e_info:\n dd.read_parquet(\n str(tmpdir),\n engine=\"pyarrow-legacy\",\n gather_statistics=False,\n dataset={\"validate_schema\": True},\n ).compute()\n assert e_info.message.contains(\"ValueError: Schema in partition\")\n assert e_info.message.contains(\"was different\")\n\n\ndef test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema(\n tmpdir,\n):\n check_pyarrow()\n\n # Data types to test: strings, arrays, ints, timezone aware timestamps\n in_arrays = [[0, 1, 2], [3, 4], np.nan, np.nan]\n out_arrays = [[0, 1, 2], [3, 4], None, None]\n in_strings = [\"a\", \"b\", np.nan, np.nan]\n out_strings = [\"a\", \"b\", None, None]\n tstamp = pd.Timestamp(1513393355, unit=\"s\")\n in_tstamps = [tstamp, tstamp, pd.NaT, pd.NaT]\n out_tstamps = [\n # Timestamps come out in numpy.datetime64 format\n tstamp.to_datetime64(),\n tstamp.to_datetime64(),\n np.datetime64(\"NaT\"),\n np.datetime64(\"NaT\"),\n ]\n timezone = \"US/Eastern\"\n tz_tstamp = pd.Timestamp(1513393355, unit=\"s\", tz=timezone)\n in_tz_tstamps = [tz_tstamp, tz_tstamp, pd.NaT, pd.NaT]\n out_tz_tstamps = [\n # Timezones do not make it through a write-read cycle.\n tz_tstamp.tz_convert(None).to_datetime64(),\n tz_tstamp.tz_convert(None).to_datetime64(),\n np.datetime64(\"NaT\"),\n np.datetime64(\"NaT\"),\n ]\n\n df = pd.DataFrame(\n {\n \"partition_column\": [0, 0, 1, 1],\n \"arrays\": in_arrays,\n \"strings\": in_strings,\n \"tstamps\": in_tstamps,\n \"tz_tstamps\": in_tz_tstamps,\n }\n )\n\n ddf = dd.from_pandas(df, npartitions=2)\n schema = pa.schema(\n [\n (\"arrays\", pa.list_(pa.int64())),\n (\"strings\", pa.string()),\n (\"tstamps\", pa.timestamp(\"ns\")),\n (\"tz_tstamps\", pa.timestamp(\"ns\", timezone)),\n (\"partition_column\", pa.int64()),\n ]\n )\n ddf.to_parquet(\n str(tmpdir), engine=\"pyarrow\", partition_on=\"partition_column\", schema=schema\n )\n ddf_after_write = (\n dd.read_parquet(str(tmpdir), engine=\"pyarrow\", gather_statistics=False)\n .compute()\n .reset_index(drop=True)\n )\n\n # Check array support\n arrays_after_write = ddf_after_write.arrays.values\n for i in range(len(df)):\n assert np.array_equal(arrays_after_write[i], out_arrays[i]), type(out_arrays[i])\n\n # Check datetime support\n tstamps_after_write = ddf_after_write.tstamps.values\n for i in range(len(df)):\n # Need to test NaT separately\n if np.isnat(tstamps_after_write[i]):\n assert np.isnat(out_tstamps[i])\n else:\n assert tstamps_after_write[i] == out_tstamps[i]\n\n # Check timezone aware datetime support\n tz_tstamps_after_write = ddf_after_write.tz_tstamps.values\n for i in range(len(df)):\n # Need to test NaT separately\n if np.isnat(tz_tstamps_after_write[i]):\n assert np.isnat(out_tz_tstamps[i])\n else:\n assert tz_tstamps_after_write[i] == out_tz_tstamps[i]\n\n # Check string support\n assert np.array_equal(ddf_after_write.strings.values, out_strings)\n\n # Check partition column\n assert np.array_equal(ddf_after_write.partition_column, df.partition_column)\n\n\[email protected](\"index\", [False, True])\[email protected](\"schema\", [\"infer\", \"complex\"])\ndef test_pyarrow_schema_inference(tmpdir, index, engine, schema):\n\n check_pyarrow()\n if pa.__version__ < LooseVersion(\"0.15.0\"):\n pytest.skip(\"PyArrow>=0.15 Required.\")\n if schema == \"complex\":\n schema = {\"index\": pa.string(), \"amount\": pa.int64()}\n\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"index\": [\"1\", \"2\", \"3\", \"2\", \"3\", \"1\", \"4\"],\n \"date\": pd.to_datetime(\n [\n \"2017-01-01\",\n \"2017-01-01\",\n \"2017-01-01\",\n \"2017-01-02\",\n \"2017-01-02\",\n \"2017-01-06\",\n \"2017-01-09\",\n ]\n ),\n \"amount\": [100, 200, 300, 400, 500, 600, 700],\n },\n index=range(7, 14),\n )\n if index:\n df = dd.from_pandas(df, npartitions=2).set_index(\"index\")\n else:\n df = dd.from_pandas(df, npartitions=2)\n\n df.to_parquet(tmpdir, engine=\"pyarrow\", schema=schema, compute=False).compute(\n scheduler=\"synchronous\"\n )\n df_out = dd.read_parquet(tmpdir, engine=engine)\n\n if index and engine == \"fastparquet\":\n # Fastparquet not handling divisions for\n # pyarrow-written dataset with string index\n assert_eq(df, df_out, check_divisions=False)\n else:\n assert_eq(df, df_out)\n\n\ndef test_partition_on(tmpdir, engine):\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"a1\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"a2\": np.random.choice([\"X\", \"Y\", \"Z\"], size=100),\n \"b\": np.random.random(size=100),\n \"c\": np.random.randint(1, 5, size=100),\n \"d\": np.arange(0, 100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n d.to_parquet(tmpdir, partition_on=[\"a1\", \"a2\"], engine=engine)\n # Note #1: Cross-engine functionality is missing\n # Note #2: The index is not preserved in pyarrow when partition_on is used\n out = dd.read_parquet(\n tmpdir, engine=engine, index=False, gather_statistics=False\n ).compute()\n for val in df.a1.unique():\n assert set(df.b[df.a1 == val]) == set(out.b[out.a1 == val])\n\n # Now specify the columns and allow auto-index detection\n out = dd.read_parquet(tmpdir, engine=engine, columns=[\"b\", \"a2\"]).compute()\n for val in df.a2.unique():\n assert set(df.b[df.a2 == val]) == set(out.b[out.a2 == val])\n\n\ndef test_partition_on_duplicates(tmpdir, engine):\n # https://github.com/dask/dask/issues/6445\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"a1\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"a2\": np.random.choice([\"X\", \"Y\", \"Z\"], size=100),\n \"data\": np.random.random(size=100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n\n for _ in range(2):\n d.to_parquet(tmpdir, partition_on=[\"a1\", \"a2\"], engine=engine)\n\n out = dd.read_parquet(tmpdir, engine=engine).compute()\n\n assert len(df) == len(out)\n for root, dirs, files in os.walk(tmpdir):\n for file in files:\n assert file in (\n \"part.0.parquet\",\n \"part.1.parquet\",\n \"_common_metadata\",\n \"_metadata\",\n )\n\n\[email protected](\"partition_on\", [\"aa\", [\"aa\"]])\ndef test_partition_on_string(tmpdir, partition_on):\n tmpdir = str(tmpdir)\n check_pyarrow()\n with dask.config.set(scheduler=\"single-threaded\"):\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"aa\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"bb\": np.random.random(size=100),\n \"cc\": np.random.randint(1, 5, size=100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n d.to_parquet(\n tmpdir, partition_on=partition_on, write_index=False, engine=\"pyarrow\"\n )\n out = dd.read_parquet(\n tmpdir, index=False, gather_statistics=False, engine=\"pyarrow\"\n )\n out = out.compute()\n for val in df.aa.unique():\n assert set(df.bb[df.aa == val]) == set(out.bb[out.aa == val])\n\n\n@write_read_engines()\ndef test_filters_categorical(tmpdir, write_engine, read_engine):\n tmpdir = str(tmpdir)\n cats = [\"2018-01-01\", \"2018-01-02\", \"2018-01-03\", \"2018-01-04\"]\n dftest = pd.DataFrame(\n {\n \"dummy\": [1, 1, 1, 1],\n \"DatePart\": pd.Categorical(cats, categories=cats, ordered=True),\n }\n )\n ddftest = dd.from_pandas(dftest, npartitions=4).set_index(\"dummy\")\n ddftest.to_parquet(tmpdir, partition_on=\"DatePart\", engine=write_engine)\n ddftest_read = dd.read_parquet(\n tmpdir,\n index=\"dummy\",\n engine=read_engine,\n filters=[((\"DatePart\", \"<=\", \"2018-01-02\"))],\n )\n assert len(ddftest_read) == 2\n\n\n@write_read_engines()\ndef test_filters(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n df = pd.DataFrame({\"x\": range(10), \"y\": list(\"aabbccddee\")})\n ddf = dd.from_pandas(df, npartitions=5)\n assert ddf.npartitions == 5\n\n ddf.to_parquet(tmp_path, engine=write_engine)\n\n a = dd.read_parquet(tmp_path, engine=read_engine, filters=[(\"x\", \">\", 4)])\n assert a.npartitions == 3\n assert (a.x > 3).all().compute()\n\n b = dd.read_parquet(tmp_path, engine=read_engine, filters=[(\"y\", \"==\", \"c\")])\n assert b.npartitions == 1\n assert (b.y == \"c\").all().compute()\n\n c = dd.read_parquet(\n tmp_path, engine=read_engine, filters=[(\"y\", \"==\", \"c\"), (\"x\", \">\", 6)]\n )\n assert c.npartitions <= 1\n assert not len(c)\n assert_eq(c, c)\n\n d = dd.read_parquet(\n tmp_path,\n engine=read_engine,\n filters=[\n # Select two overlapping ranges\n [(\"x\", \">\", 1), (\"x\", \"<\", 6)],\n [(\"x\", \">\", 3), (\"x\", \"<\", 8)],\n ],\n )\n assert d.npartitions == 3\n assert ((d.x > 1) & (d.x < 8)).all().compute()\n\n e = dd.read_parquet(tmp_path, engine=read_engine, filters=[(\"x\", \"in\", (0, 9))])\n assert e.npartitions == 2\n assert ((e.x < 2) | (e.x > 7)).all().compute()\n\n\n@write_read_engines()\ndef test_filters_v0(tmpdir, write_engine, read_engine):\n if write_engine == \"fastparquet\" or read_engine == \"fastparquet\":\n pytest.importorskip(\"fastparquet\", minversion=\"0.3.1\")\n\n # Recent versions of pyarrow support full row-wise filtering\n # (fastparquet and older pyarrow versions do not)\n pyarrow_row_filtering = (\n read_engine == \"pyarrow-dataset\" and pa.__version__ >= LooseVersion(\"1.0.0\")\n )\n\n fn = str(tmpdir)\n df = pd.DataFrame({\"at\": [\"ab\", \"aa\", \"ba\", \"da\", \"bb\"]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n # Ok with 1 partition and filters\n ddf.repartition(npartitions=1, force=True).to_parquet(\n fn, write_index=False, engine=write_engine\n )\n ddf2 = dd.read_parquet(\n fn, index=False, engine=read_engine, filters=[(\"at\", \"==\", \"aa\")]\n ).compute()\n if pyarrow_row_filtering:\n assert_eq(ddf2, ddf[ddf[\"at\"] == \"aa\"], check_index=False)\n else:\n assert_eq(ddf2, ddf)\n\n # with >1 partition and no filters\n ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)\n ddf2 = dd.read_parquet(fn, engine=read_engine).compute()\n assert_eq(ddf2, ddf)\n\n # with >1 partition and filters using base fastparquet\n if read_engine == \"fastparquet\":\n ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)\n df2 = fastparquet.ParquetFile(fn).to_pandas(filters=[(\"at\", \"==\", \"aa\")])\n assert len(df2) > 0\n\n # with >1 partition and filters\n ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)\n ddf2 = dd.read_parquet(\n fn, engine=read_engine, filters=[(\"at\", \"==\", \"aa\")]\n ).compute()\n assert len(ddf2) > 0\n\n\ndef test_filtering_pyarrow_dataset(tmpdir, engine):\n pytest.importorskip(\"pyarrow\", minversion=\"1.0.0\")\n\n fn = str(tmpdir)\n df = pd.DataFrame({\"aa\": range(100), \"bb\": [\"cat\", \"dog\"] * 50})\n ddf = dd.from_pandas(df, npartitions=10)\n ddf.to_parquet(fn, write_index=False, engine=engine)\n\n # Filtered read\n aa_lim = 40\n bb_val = \"dog\"\n filters = [[(\"aa\", \"<\", aa_lim), (\"bb\", \"==\", bb_val)]]\n ddf2 = dd.read_parquet(fn, index=False, engine=\"pyarrow-dataset\", filters=filters)\n\n # Check that partitions are filetered for \"aa\" filter\n nonempty = 0\n for part in ddf[ddf[\"aa\"] < aa_lim].partitions:\n nonempty += int(len(part.compute()) > 0)\n assert ddf2.npartitions == nonempty\n\n # Check that rows are filtered for \"aa\" and \"bb\" filters\n df = df[df[\"aa\"] < aa_lim]\n df = df[df[\"bb\"] == bb_val]\n assert_eq(df, ddf2.compute(), check_index=False)\n\n\ndef test_fiters_file_list(tmpdir, engine):\n df = pd.DataFrame({\"x\": range(10), \"y\": list(\"aabbccddee\")})\n ddf = dd.from_pandas(df, npartitions=5)\n\n ddf.to_parquet(str(tmpdir), engine=engine)\n fils = str(tmpdir.join(\"*.parquet\"))\n ddf_out = dd.read_parquet(\n fils, gather_statistics=True, engine=engine, filters=[(\"x\", \">\", 3)]\n )\n\n assert ddf_out.npartitions == 3\n assert_eq(df[df[\"x\"] > 3], ddf_out.compute(), check_index=False)\n\n # Check that first parition gets filtered for single-path input\n ddf2 = dd.read_parquet(\n str(tmpdir.join(\"part.0.parquet\")),\n gather_statistics=True,\n engine=engine,\n filters=[(\"x\", \">\", 3)],\n )\n assert len(ddf2) == 0\n\n\ndef test_pyarrow_filter_divisions(tmpdir):\n pytest.importorskip(\"pyarrow\")\n\n # Write simple dataset with an index that will only\n # have a sorted index if certain row-groups are filtered out.\n # In this case, we filter \"a\" <= 3 to get a sorted\n # index. Otherwise, \"a\" is NOT monotonically increasing.\n df = pd.DataFrame({\"a\": [0, 1, 10, 12, 2, 3, 8, 9], \"b\": range(8)}).set_index(\"a\")\n df.iloc[:4].to_parquet(\n str(tmpdir.join(\"file.0.parquet\")), engine=\"pyarrow\", row_group_size=2\n )\n df.iloc[4:].to_parquet(\n str(tmpdir.join(\"file.1.parquet\")), engine=\"pyarrow\", row_group_size=2\n )\n\n if pa.__version__ >= LooseVersion(\"1.0.0\"):\n # Only works for ArrowDatasetEngine.\n # Legacy code will not apply filters on individual row-groups\n # when `split_row_groups=False`.\n ddf = dd.read_parquet(\n str(tmpdir),\n engine=\"pyarrow-dataset\",\n split_row_groups=False,\n gather_statistics=True,\n filters=[(\"a\", \"<=\", 3)],\n )\n assert ddf.divisions == (0, 2, 3)\n\n ddf = dd.read_parquet(\n str(tmpdir),\n engine=\"pyarrow-dataset\",\n split_row_groups=True,\n gather_statistics=True,\n filters=[(\"a\", \"<=\", 3)],\n )\n assert ddf.divisions == (0, 2, 3)\n\n\ndef test_divisions_read_with_filters(tmpdir):\n pytest.importorskip(\"fastparquet\", minversion=\"0.3.1\")\n tmpdir = str(tmpdir)\n # generate dataframe\n size = 100\n categoricals = []\n for value in [\"a\", \"b\", \"c\", \"d\"]:\n categoricals += [value] * int(size / 4)\n df = pd.DataFrame(\n {\n \"a\": categoricals,\n \"b\": np.random.random(size=size),\n \"c\": np.random.randint(1, 5, size=size),\n }\n )\n d = dd.from_pandas(df, npartitions=4)\n # save it\n d.to_parquet(tmpdir, write_index=True, partition_on=[\"a\"], engine=\"fastparquet\")\n # read it\n out = dd.read_parquet(tmpdir, engine=\"fastparquet\", filters=[(\"a\", \"==\", \"b\")])\n # test it\n expected_divisions = (25, 49)\n assert out.divisions == expected_divisions\n\n\ndef test_divisions_are_known_read_with_filters(tmpdir):\n pytest.importorskip(\"fastparquet\", minversion=\"0.3.1\")\n tmpdir = str(tmpdir)\n # generate dataframe\n df = pd.DataFrame(\n {\n \"unique\": [0, 0, 1, 1, 2, 2, 3, 3],\n \"id\": [\"id1\", \"id2\", \"id1\", \"id2\", \"id1\", \"id2\", \"id1\", \"id2\"],\n },\n index=[0, 0, 1, 1, 2, 2, 3, 3],\n )\n d = dd.from_pandas(df, npartitions=2)\n # save it\n d.to_parquet(tmpdir, partition_on=[\"id\"], engine=\"fastparquet\")\n # read it\n out = dd.read_parquet(tmpdir, engine=\"fastparquet\", filters=[(\"id\", \"==\", \"id1\")])\n # test it\n assert out.known_divisions\n expected_divisions = (0, 2, 3)\n assert out.divisions == expected_divisions\n\n\[email protected](reason=\"No longer accept ParquetFile objects\")\ndef test_read_from_fastparquet_parquetfile(tmpdir):\n check_fastparquet()\n fn = str(tmpdir)\n\n df = pd.DataFrame(\n {\n \"a\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"b\": np.random.random(size=100),\n \"c\": np.random.randint(1, 5, size=100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n d.to_parquet(fn, partition_on=[\"a\"], engine=\"fastparquet\")\n\n pq_f = fastparquet.ParquetFile(fn)\n\n # OK with no filters\n out = dd.read_parquet(pq_f).compute()\n for val in df.a.unique():\n assert set(df.b[df.a == val]) == set(out.b[out.a == val])\n\n # OK with filters\n out = dd.read_parquet(pq_f, filters=[(\"a\", \"==\", \"B\")]).compute()\n assert set(df.b[df.a == \"B\"]) == set(out.b)\n\n # Engine should not be set to 'pyarrow'\n with pytest.raises(AssertionError):\n out = dd.read_parquet(pq_f, engine=\"pyarrow\")\n\n\[email protected](\"scheduler\", [\"threads\", \"processes\"])\ndef test_to_parquet_lazy(tmpdir, scheduler, engine):\n tmpdir = str(tmpdir)\n df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [1.0, 2.0, 3.0, 4.0]})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n value = ddf.to_parquet(tmpdir, compute=False, engine=engine)\n\n assert hasattr(value, \"dask\")\n value.compute(scheduler=scheduler)\n assert os.path.exists(tmpdir)\n\n ddf2 = dd.read_parquet(tmpdir, engine=engine)\n\n assert_eq(ddf, ddf2)\n\n\ndef test_timestamp96(tmpdir):\n check_fastparquet()\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [\"now\"]}, dtype=\"M8[ns]\")\n ddf = dd.from_pandas(df, 1)\n ddf.to_parquet(fn, write_index=False, times=\"int96\")\n pf = fastparquet.ParquetFile(fn)\n assert pf._schema[1].type == fastparquet.parquet_thrift.Type.INT96\n out = dd.read_parquet(fn, index=False).compute()\n assert_eq(out, df)\n\n\ndef test_drill_scheme(tmpdir):\n check_fastparquet()\n fn = str(tmpdir)\n N = 5\n df1 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate([\"a\", \"b\", \"c\"])})\n df2 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate([\"a\", \"b\", \"c\"])})\n files = []\n for d in [\"test_data1\", \"test_data2\"]:\n dn = os.path.join(fn, d)\n if not os.path.exists(dn):\n os.mkdir(dn)\n files.append(os.path.join(dn, \"data1.parq\"))\n\n fastparquet.write(files[0], df1)\n fastparquet.write(files[1], df2)\n\n df = dd.read_parquet(files)\n assert \"dir0\" in df.columns\n out = df.compute()\n assert \"dir0\" in out\n assert (np.unique(out.dir0) == [\"test_data1\", \"test_data2\"]).all()\n\n\ndef test_parquet_select_cats(tmpdir, engine):\n fn = str(tmpdir)\n df = pd.DataFrame(\n {\n \"categories\": pd.Series(\n np.random.choice([\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"], size=100),\n dtype=\"category\",\n ),\n \"ints\": pd.Series(list(range(0, 100)), dtype=\"int\"),\n \"floats\": pd.Series(list(range(0, 100)), dtype=\"float\"),\n }\n )\n\n ddf = dd.from_pandas(df, 1)\n ddf.to_parquet(fn, engine=engine)\n rddf = dd.read_parquet(fn, columns=[\"ints\"], engine=engine)\n assert list(rddf.columns) == [\"ints\"]\n rddf = dd.read_parquet(fn, engine=engine)\n assert list(rddf.columns) == list(df)\n\n\ndef test_columns_name(tmpdir, engine):\n if engine == \"fastparquet\" and fastparquet.__version__ <= LooseVersion(\"0.3.1\"):\n pytest.skip(\"Fastparquet does not write column_indexes up to 0.3.1\")\n tmp_path = str(tmpdir)\n df = pd.DataFrame({\"A\": [1, 2]}, index=pd.Index([\"a\", \"b\"], name=\"idx\"))\n df.columns.name = \"cols\"\n ddf = dd.from_pandas(df, 2)\n\n ddf.to_parquet(tmp_path, engine=engine)\n result = dd.read_parquet(tmp_path, engine=engine, index=[\"idx\"])\n assert_eq(result, df)\n\n\ndef check_compression(engine, filename, compression):\n if engine == \"fastparquet\":\n pf = fastparquet.ParquetFile(filename)\n md = pf.fmd.row_groups[0].columns[0].meta_data\n if compression is None:\n assert md.total_compressed_size == md.total_uncompressed_size\n else:\n assert md.total_compressed_size != md.total_uncompressed_size\n else:\n metadata = pa.parquet.ParquetDataset(filename).metadata\n names = metadata.schema.names\n for i in range(metadata.num_row_groups):\n row_group = metadata.row_group(i)\n for j in range(len(names)):\n column = row_group.column(j)\n if compression is None:\n assert (\n column.total_compressed_size == column.total_uncompressed_size\n )\n else:\n compress_expect = compression\n if compression == \"default\":\n compress_expect = \"snappy\"\n assert compress_expect.lower() == column.compression.lower()\n assert (\n column.total_compressed_size != column.total_uncompressed_size\n )\n\n\[email protected](\"compression,\", [\"default\", None, \"gzip\", \"snappy\"])\ndef test_writing_parquet_with_compression(tmpdir, compression, engine):\n fn = str(tmpdir)\n if compression in [\"snappy\", \"default\"]:\n pytest.importorskip(\"snappy\")\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"] * 10, \"y\": [1, 2, 3] * 10})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=3)\n\n ddf.to_parquet(fn, compression=compression, engine=engine)\n out = dd.read_parquet(fn, engine=engine)\n assert_eq(out, ddf)\n check_compression(engine, fn, compression)\n\n\[email protected](\"compression,\", [\"default\", None, \"gzip\", \"snappy\"])\ndef test_writing_parquet_with_partition_on_and_compression(tmpdir, compression, engine):\n fn = str(tmpdir)\n if compression in [\"snappy\", \"default\"]:\n pytest.importorskip(\"snappy\")\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"] * 10, \"y\": [1, 2, 3] * 10})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=3)\n\n ddf.to_parquet(fn, compression=compression, engine=engine, partition_on=[\"x\"])\n check_compression(engine, fn, compression)\n\n\[email protected](\n params=[\n # fastparquet 0.1.3\n {\n \"columns\": [\n {\n \"metadata\": None,\n \"name\": \"idx\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"idx\"],\n \"pandas_version\": \"0.21.0\",\n },\n # pyarrow 0.7.1\n {\n \"columns\": [\n {\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"metadata\": None,\n \"name\": \"idx\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"idx\"],\n \"pandas_version\": \"0.21.0\",\n },\n # pyarrow 0.8.0\n {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"A\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": \"idx\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n },\n # TODO: fastparquet update\n ]\n)\ndef pandas_metadata(request):\n return request.param\n\n\ndef test_parse_pandas_metadata(pandas_metadata):\n index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(\n pandas_metadata\n )\n assert index_names == [\"idx\"]\n assert column_names == [\"A\"]\n assert column_index_names == [None]\n\n # for new pyarrow\n if pandas_metadata[\"index_columns\"] == [\"__index_level_0__\"]:\n assert mapping == {\"__index_level_0__\": \"idx\", \"A\": \"A\"}\n else:\n assert mapping == {\"idx\": \"idx\", \"A\": \"A\"}\n\n assert isinstance(mapping, dict)\n\n\ndef test_parse_pandas_metadata_null_index():\n # pyarrow 0.7.1 None for index\n e_index_names = [None]\n e_column_names = [\"x\"]\n e_mapping = {\"__index_level_0__\": None, \"x\": \"x\"}\n e_column_index_names = [None]\n\n md = {\n \"columns\": [\n {\n \"metadata\": None,\n \"name\": \"x\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"metadata\": None,\n \"name\": \"__index_level_0__\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)\n assert index_names == e_index_names\n assert column_names == e_column_names\n assert mapping == e_mapping\n assert column_index_names == e_column_index_names\n\n # pyarrow 0.8.0 None for index\n md = {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"x\",\n \"metadata\": None,\n \"name\": \"x\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": None,\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)\n assert index_names == e_index_names\n assert column_names == e_column_names\n assert mapping == e_mapping\n assert column_index_names == e_column_index_names\n\n\ndef test_read_no_metadata(tmpdir, engine):\n # use pyarrow.parquet to create a parquet file without\n # pandas metadata\n check_pyarrow()\n tmp = str(tmpdir) + \"table.parq\"\n\n table = pa.Table.from_arrays(\n [pa.array([1, 2, 3]), pa.array([3, 4, 5])], names=[\"A\", \"B\"]\n )\n pq.write_table(table, tmp)\n result = dd.read_parquet(tmp, engine=engine)\n expected = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [3, 4, 5]})\n assert_eq(result, expected)\n\n\ndef test_parse_pandas_metadata_duplicate_index_columns():\n md = {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"A\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(md)\n assert index_names == [\"A\"]\n assert column_names == [\"A\"]\n assert storage_name_mapping == {\"__index_level_0__\": \"A\", \"A\": \"A\"}\n assert column_index_names == [None]\n\n\ndef test_parse_pandas_metadata_column_with_index_name():\n md = {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"A\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(md)\n assert index_names == [\"A\"]\n assert column_names == [\"A\"]\n assert storage_name_mapping == {\"__index_level_0__\": \"A\", \"A\": \"A\"}\n assert column_index_names == [None]\n\n\ndef test_writing_parquet_with_kwargs(tmpdir, engine):\n fn = str(tmpdir)\n path1 = os.path.join(fn, \"normal\")\n path2 = os.path.join(fn, \"partitioned\")\n pytest.importorskip(\"snappy\")\n\n df = pd.DataFrame(\n {\n \"a\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"b\": np.random.random(size=100),\n \"c\": np.random.randint(1, 5, size=100),\n }\n )\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=3)\n\n engine_kwargs = {\n \"pyarrow-dataset\": {\n \"compression\": \"snappy\",\n \"coerce_timestamps\": None,\n \"use_dictionary\": True,\n },\n \"fastparquet\": {\"compression\": \"snappy\", \"times\": \"int64\", \"fixed_text\": None},\n }\n engine_kwargs[\"pyarrow-legacy\"] = engine_kwargs[\"pyarrow-dataset\"]\n\n ddf.to_parquet(path1, engine=engine, **engine_kwargs[engine])\n out = dd.read_parquet(path1, engine=engine)\n assert_eq(out, ddf, check_index=(engine != \"fastparquet\"))\n\n # Avoid race condition in pyarrow 0.8.0 on writing partitioned datasets\n with dask.config.set(scheduler=\"sync\"):\n ddf.to_parquet(\n path2, engine=engine, partition_on=[\"a\"], **engine_kwargs[engine]\n )\n out = dd.read_parquet(path2, engine=engine).compute()\n for val in df.a.unique():\n assert set(df.b[df.a == val]) == set(out.b[out.a == val])\n\n\ndef test_writing_parquet_with_unknown_kwargs(tmpdir, engine):\n fn = str(tmpdir)\n\n with pytest.raises(TypeError):\n ddf.to_parquet(fn, engine=engine, unknown_key=\"unknown_value\")\n\n\ndef test_to_parquet_with_get(tmpdir):\n check_engine()\n\n from dask.multiprocessing import get as mp_get\n\n tmpdir = str(tmpdir)\n\n flag = [False]\n\n def my_get(*args, **kwargs):\n flag[0] = True\n return mp_get(*args, **kwargs)\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf.to_parquet(tmpdir, compute_kwargs={\"scheduler\": my_get})\n assert flag[0]\n\n result = dd.read_parquet(os.path.join(tmpdir, \"*\"))\n assert_eq(result, df, check_index=False)\n\n\ndef test_select_partitioned_column(tmpdir, engine):\n pytest.importorskip(\"snappy\")\n if engine.startswith(\"pyarrow\"):\n import pyarrow as pa\n\n if pa.__version__ < LooseVersion(\"0.9.0\"):\n pytest.skip(\"pyarrow<0.9.0 did not support this\")\n\n fn = str(tmpdir)\n size = 20\n d = {\n \"signal1\": np.random.normal(0, 0.3, size=size).cumsum() + 50,\n \"fake_categorical1\": np.random.choice([\"A\", \"B\", \"C\"], size=size),\n \"fake_categorical2\": np.random.choice([\"D\", \"E\", \"F\"], size=size),\n }\n df = dd.from_pandas(pd.DataFrame(d), 2)\n df.to_parquet(\n fn,\n compression=\"snappy\",\n write_index=False,\n engine=engine,\n partition_on=[\"fake_categorical1\", \"fake_categorical2\"],\n )\n\n df_partitioned = dd.read_parquet(fn, engine=engine)\n df_partitioned[df_partitioned.fake_categorical1 == \"A\"].compute()\n\n\ndef test_with_tz(tmpdir, engine):\n if engine.startswith(\"pyarrow\") and pa.__version__ < LooseVersion(\"0.11.0\"):\n pytest.skip(\"pyarrow<0.11.0 did not support this\")\n if engine == \"fastparquet\" and fastparquet.__version__ < LooseVersion(\"0.3.0\"):\n pytest.skip(\"fastparquet<0.3.0 did not support this\")\n\n with warnings.catch_warnings():\n if engine == \"fastparquet\":\n # fastparquet-442\n warnings.simplefilter(\"ignore\", FutureWarning) # pandas 0.25\n fn = str(tmpdir)\n df = pd.DataFrame([[0]], columns=[\"a\"], dtype=\"datetime64[ns, UTC]\")\n df = dd.from_pandas(df, 1)\n df.to_parquet(fn, engine=engine)\n df2 = dd.read_parquet(fn, engine=engine)\n assert_eq(df, df2, check_divisions=False, check_index=False)\n\n\ndef test_arrow_partitioning(tmpdir):\n # Issue #3518\n check_pyarrow()\n path = str(tmpdir)\n data = {\n \"p\": np.repeat(np.arange(3), 2).astype(np.int8),\n \"b\": np.repeat(-1, 6).astype(np.int16),\n \"c\": np.repeat(-2, 6).astype(np.float32),\n \"d\": np.repeat(-3, 6).astype(np.float64),\n }\n pdf = pd.DataFrame(data)\n ddf = dd.from_pandas(pdf, npartitions=2)\n ddf.to_parquet(path, engine=\"pyarrow\", write_index=False, partition_on=\"p\")\n\n ddf = dd.read_parquet(path, index=False, engine=\"pyarrow\")\n\n ddf.astype({\"b\": np.float32}).compute()\n\n\ndef test_sorted_warnings(tmpdir, engine):\n\n if engine.startswith(\"pyarrow\"):\n pytest.skip(\n \"ArrowEngine will only collect statistics for \"\n \"known index columns and/or filtered columns.\"\n )\n\n tmpdir = str(tmpdir)\n df = dd.from_pandas(\n pd.DataFrame({\"cola\": range(10), \"colb\": range(10)}), npartitions=2\n )\n df.to_parquet(tmpdir, engine=engine, write_index=False)\n with pytest.warns(RuntimeWarning) as record:\n out = dd.read_parquet(tmpdir, engine=engine)\n assert \"['cola', 'colb']\" in str(record[-1].message)\n warnings = len(record)\n assert out.columns.tolist() == [\"cola\", \"colb\"]\n with pytest.warns(None) as record:\n dd.read_parquet(tmpdir, engine=engine, index=False)\n assert len(record) < warnings # still may have some arrow warnings\n\n\ndef test_informative_error_messages():\n with pytest.raises(ValueError) as info:\n dd.read_parquet(\"foo\", engine=\"foo\")\n\n assert \"foo\" in str(info.value)\n assert \"arrow\" in str(info.value)\n assert \"fastparquet\" in str(info.value)\n\n\ndef test_append_cat_fp(tmpdir, engine):\n path = str(tmpdir)\n # https://github.com/dask/dask/issues/4120\n df = pd.DataFrame({\"x\": [\"a\", \"a\", \"b\", \"a\", \"b\"]})\n df[\"x\"] = df[\"x\"].astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=1)\n\n dd.to_parquet(ddf, path, engine=engine)\n dd.to_parquet(ddf, path, append=True, ignore_divisions=True, engine=engine)\n\n d = dd.read_parquet(path, engine=engine).compute()\n assert d[\"x\"].tolist() == [\"a\", \"a\", \"b\", \"a\", \"b\"] * 2\n\n\[email protected](\n \"df\",\n [\n pd.DataFrame({\"x\": [4, 5, 6, 1, 2, 3]}),\n pd.DataFrame({\"x\": [\"c\", \"a\", \"b\"]}),\n pd.DataFrame({\"x\": [\"cc\", \"a\", \"bbb\"]}),\n pd.DataFrame({\"x\": [b\"a\", b\"b\", b\"c\"]}),\n pytest.param(pd.DataFrame({\"x\": pd.Categorical([\"a\", \"b\", \"a\"])})),\n pytest.param(pd.DataFrame({\"x\": pd.Categorical([1, 2, 1])})),\n pd.DataFrame({\"x\": list(map(pd.Timestamp, [3000000, 2000000, 1000000]))}), # ms\n pd.DataFrame({\"x\": list(map(pd.Timestamp, [3000, 2000, 1000]))}), # us\n pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"M8[ns]\"),\n # pd.DataFrame({'x': [3, 2, 1]}).astype('M8[ns]'), # Casting errors\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[us]\"),\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ms]\"),\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"uint16\"),\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"float32\"),\n pd.DataFrame({\"x\": [3, 1, 2]}, index=[3, 2, 1]),\n pd.DataFrame(\n {\"x\": [4, 5, 6, 1, 2, 3]}, index=pd.Index([1, 2, 3, 4, 5, 6], name=\"foo\")\n ),\n pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}),\n pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}, columns=[\"y\", \"x\"]),\n pd.DataFrame({\"0\": [3, 2, 1]}),\n pd.DataFrame({\"x\": [3, 2, None]}),\n pd.DataFrame({\"-\": [3.0, 2.0, None]}),\n pd.DataFrame({\".\": [3.0, 2.0, None]}),\n pd.DataFrame({\" \": [3.0, 2.0, None]}),\n ],\n)\ndef test_roundtrip_arrow(tmpdir, df):\n check_pyarrow()\n # Index will be given a name when preserved as index\n tmp_path = str(tmpdir)\n if not df.index.name:\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n dd.to_parquet(ddf, tmp_path, engine=\"pyarrow\", write_index=True)\n ddf2 = dd.read_parquet(tmp_path, engine=\"pyarrow\", gather_statistics=True)\n assert_eq(ddf, ddf2)\n\n\ndef test_datasets_timeseries(tmpdir, engine):\n tmp_path = str(tmpdir)\n df = dask.datasets.timeseries(\n start=\"2000-01-01\", end=\"2000-01-10\", freq=\"1d\"\n ).persist()\n df.to_parquet(tmp_path, engine=engine)\n\n df2 = dd.read_parquet(tmp_path, engine=engine)\n assert_eq(df, df2)\n\n\ndef test_pathlib_path(tmpdir, engine):\n import pathlib\n\n df = pd.DataFrame({\"x\": [4, 5, 6, 1, 2, 3]})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n path = pathlib.Path(str(tmpdir))\n ddf.to_parquet(path, engine=engine)\n ddf2 = dd.read_parquet(path, engine=engine)\n assert_eq(ddf, ddf2)\n\n\ndef test_pyarrow_metadata_nthreads(tmpdir):\n check_pyarrow()\n tmp_path = str(tmpdir)\n df = pd.DataFrame({\"x\": [4, 5, 6, 1, 2, 3]})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(tmp_path, engine=\"pyarrow\")\n ops = {\"dataset\": {\"metadata_nthreads\": 2}}\n ddf2 = dd.read_parquet(tmp_path, engine=\"pyarrow\", **ops)\n assert_eq(ddf, ddf2)\n\n\ndef test_categories_large(tmpdir, engine):\n # Issue #5112\n check_fastparquet()\n fn = str(tmpdir.join(\"parquet_int16.parq\"))\n numbers = np.random.randint(0, 800000, size=1000000)\n df = pd.DataFrame(numbers.T, columns=[\"name\"])\n df.name = df.name.astype(\"category\")\n\n df.to_parquet(fn, engine=\"fastparquet\", compression=\"uncompressed\")\n ddf = dd.read_parquet(fn, engine=engine, categories={\"name\": 80000})\n\n assert_eq(sorted(df.name.cat.categories), sorted(ddf.compute().name.cat.categories))\n\n\n@write_read_engines()\ndef test_read_glob_no_meta(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n\n ddf2 = dd.read_parquet(\n os.path.join(tmp_path, \"*.parquet\"), engine=read_engine, gather_statistics=False\n )\n assert_eq(ddf, ddf2, check_divisions=False)\n\n\n@write_read_engines()\ndef test_read_glob_yes_meta(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n paths = glob.glob(os.path.join(tmp_path, \"*.parquet\"))\n paths.append(os.path.join(tmp_path, \"_metadata\"))\n ddf2 = dd.read_parquet(paths, engine=read_engine, gather_statistics=False)\n assert_eq(ddf, ddf2, check_divisions=False)\n\n\[email protected](\"statistics\", [True, False, None])\[email protected](\"remove_common\", [True, False])\n@write_read_engines()\ndef test_read_dir_nometa(tmpdir, write_engine, read_engine, statistics, remove_common):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n if os.path.exists(os.path.join(tmp_path, \"_metadata\")):\n os.unlink(os.path.join(tmp_path, \"_metadata\"))\n files = os.listdir(tmp_path)\n assert \"_metadata\" not in files\n\n if remove_common and os.path.exists(os.path.join(tmp_path, \"_common_metadata\")):\n os.unlink(os.path.join(tmp_path, \"_common_metadata\"))\n\n ddf2 = dd.read_parquet(tmp_path, engine=read_engine, gather_statistics=statistics)\n assert_eq(ddf, ddf2, check_divisions=False)\n\n\[email protected](\"schema\", [\"infer\", None])\ndef test_timeseries_nulls_in_schema(tmpdir, engine, schema):\n\n if (\n schema == \"infer\"\n and engine.startswith(\"pyarrow\")\n and pa.__version__ < LooseVersion(\"0.15.0\")\n ):\n pytest.skip(\"PyArrow>=0.15 Required.\")\n\n # GH#5608: relative path failing _metadata/_common_metadata detection.\n tmp_path = str(tmpdir.mkdir(\"files\"))\n tmp_path = os.path.join(tmp_path, \"../\", \"files\")\n\n ddf2 = (\n dask.datasets.timeseries(start=\"2000-01-01\", end=\"2000-01-03\", freq=\"1h\")\n .reset_index()\n .map_partitions(lambda x: x.loc[:5])\n )\n ddf2 = ddf2.set_index(\"x\").reset_index().persist()\n ddf2.name = ddf2.name.where(ddf2.timestamp == \"2000-01-01\", None)\n\n # Note: `append_row_groups` will fail with pyarrow>0.17.1 for _metadata write\n dataset = {\"validate_schema\": False}\n engine_use = engine\n if schema != \"infer\" and engine.startswith(\"pyarrow\"):\n engine_use = \"pyarrow-legacy\"\n ddf2.to_parquet(\n tmp_path, engine=engine_use, write_metadata_file=False, schema=schema\n )\n ddf_read = dd.read_parquet(tmp_path, engine=engine_use, dataset=dataset)\n\n assert_eq(ddf_read, ddf2, check_divisions=False, check_index=False)\n\n # Can force schema validation on each partition in pyarrow\n if engine.startswith(\"pyarrow\") and schema is None:\n dataset = {\"validate_schema\": True}\n engine_use = engine\n if schema != \"infer\":\n engine_use = \"pyarrow-legacy\"\n # The schema mismatch should raise an error if the\n # dataset was written with `schema=None` (no inference)\n with pytest.raises(ValueError):\n ddf_read = dd.read_parquet(tmp_path, dataset=dataset, engine=engine_use)\n\n\[email protected](\"numerical\", [True, False])\[email protected](\n \"timestamp\", [\"2000-01-01\", \"2000-01-02\", \"2000-01-03\", \"2000-01-04\"]\n)\ndef test_timeseries_nulls_in_schema_pyarrow(tmpdir, timestamp, numerical):\n check_pyarrow()\n tmp_path = str(tmpdir)\n ddf2 = dd.from_pandas(\n pd.DataFrame(\n {\n \"timestamp\": [\n pd.Timestamp(\"2000-01-01\"),\n pd.Timestamp(\"2000-01-02\"),\n pd.Timestamp(\"2000-01-03\"),\n pd.Timestamp(\"2000-01-04\"),\n ],\n \"id\": np.arange(4, dtype=\"float64\"),\n \"name\": [\"cat\", \"dog\", \"bird\", \"cow\"],\n }\n ),\n npartitions=2,\n ).persist()\n if numerical:\n ddf2.id = ddf2.id.where(ddf2.timestamp == timestamp, None)\n ddf2.id = ddf2.id.astype(\"float64\")\n else:\n ddf2.name = ddf2.name.where(ddf2.timestamp == timestamp, None)\n\n # There should be no schema error if you specify a schema on write\n schema = pa.schema(\n [(\"timestamp\", pa.timestamp(\"ns\")), (\"id\", pa.float64()), (\"name\", pa.string())]\n )\n ddf2.to_parquet(tmp_path, schema=schema, write_index=False, engine=\"pyarrow\")\n assert_eq(\n dd.read_parquet(\n tmp_path, dataset={\"validate_schema\": True}, index=False, engine=\"pyarrow\"\n ),\n ddf2,\n check_divisions=False,\n check_index=False,\n )\n\n\ndef test_read_inconsistent_schema_pyarrow(tmpdir):\n check_pyarrow()\n\n # Note: This is a proxy test for a cudf-related issue fix\n # (see cudf#5062 github issue). The cause of that issue is\n # schema inconsistencies that do not actually correspond to\n # different types, but whether or not the file/column contains\n # null values.\n\n df1 = pd.DataFrame({\"id\": [0, 1], \"val\": [10, 20]})\n df2 = pd.DataFrame({\"id\": [2, 3], \"val\": [30, 40]})\n\n desired_type = \"int64\"\n other_type = \"int32\"\n df1.val = df1.val.astype(desired_type)\n df2.val = df2.val.astype(other_type)\n\n df_expect = pd.concat([df1, df2], ignore_index=True)\n df_expect[\"val\"] = df_expect.val.astype(desired_type)\n\n df1.to_parquet(os.path.join(tmpdir, \"0.parquet\"))\n df2.to_parquet(os.path.join(tmpdir, \"1.parquet\"))\n\n # Read Directory\n check = dd.read_parquet(str(tmpdir), dataset={\"validate_schema\": False})\n assert_eq(check.compute(), df_expect, check_index=False)\n\n # Read List\n check = dd.read_parquet(\n os.path.join(tmpdir, \"*.parquet\"), dataset={\"validate_schema\": False}\n )\n assert_eq(check.compute(), df_expect, check_index=False)\n\n\ndef test_graph_size_pyarrow(tmpdir, engine):\n import pickle\n\n fn = str(tmpdir)\n\n ddf1 = dask.datasets.timeseries(\n start=\"2000-01-01\", end=\"2000-01-02\", freq=\"60S\", partition_freq=\"1H\"\n )\n\n ddf1.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, engine=engine)\n\n # pyarrow.dataset requires fragments to be passed in dict.\n # This requires slightly more space than \"legacy\"\n assert len(pickle.dumps(ddf2.__dask_graph__())) < 32000\n\n\[email protected](\"preserve_index\", [True, False])\[email protected](\"index\", [None, np.random.permutation(2000)])\ndef test_getitem_optimization(tmpdir, engine, preserve_index, index):\n tmp_path_rd = str(tmpdir.mkdir(\"read\"))\n tmp_path_wt = str(tmpdir.mkdir(\"write\"))\n df = pd.DataFrame(\n {\"A\": [1, 2] * 1000, \"B\": [3, 4] * 1000, \"C\": [5, 6] * 1000}, index=index\n )\n df.index.name = \"my_index\"\n ddf = dd.from_pandas(df, 2, sort=False)\n\n ddf.to_parquet(tmp_path_rd, engine=engine, write_index=preserve_index)\n ddf = dd.read_parquet(tmp_path_rd, engine=engine)[\"B\"]\n\n # Write ddf back to disk to check that the round trip\n # preserves the getitem optimization\n out = ddf.to_frame().to_parquet(tmp_path_wt, engine=engine, compute=False)\n dsk = optimize_read_parquet_getitem(out.dask, keys=[out.key])\n\n read = [key for key in dsk.layers if key.startswith(\"read-parquet\")][0]\n subgraph = dsk.layers[read]\n assert isinstance(subgraph, ParquetSubgraph)\n assert subgraph.columns == [\"B\"]\n\n assert_eq(ddf.compute(optimize_graph=False), ddf.compute())\n\n\ndef test_getitem_optimization_empty(tmpdir, engine):\n df = pd.DataFrame({\"A\": [1] * 100, \"B\": [2] * 100, \"C\": [3] * 100, \"D\": [4] * 100})\n ddf = dd.from_pandas(df, 2)\n fn = os.path.join(str(tmpdir))\n ddf.to_parquet(fn, engine=engine)\n\n df2 = dd.read_parquet(fn, columns=[], engine=engine)\n dsk = optimize_read_parquet_getitem(df2.dask, keys=[df2._name])\n\n subgraph = list(dsk.layers.values())[0]\n assert isinstance(subgraph, ParquetSubgraph)\n assert subgraph.columns == []\n\n\ndef test_getitem_optimization_multi(tmpdir, engine):\n df = pd.DataFrame({\"A\": [1] * 100, \"B\": [2] * 100, \"C\": [3] * 100, \"D\": [4] * 100})\n ddf = dd.from_pandas(df, 2)\n fn = os.path.join(str(tmpdir))\n ddf.to_parquet(fn, engine=engine)\n\n a = dd.read_parquet(fn, engine=engine)[\"B\"]\n b = dd.read_parquet(fn, engine=engine)[[\"C\"]]\n c = dd.read_parquet(fn, engine=engine)[[\"C\", \"A\"]]\n\n a1, a2, a3 = dask.compute(a, b, c)\n b1, b2, b3 = dask.compute(a, b, c, optimize_graph=False)\n\n assert_eq(a1, b1)\n assert_eq(a2, b2)\n assert_eq(a3, b3)\n\n\ndef test_subgraph_getitem():\n meta = pd.DataFrame(columns=[\"a\"])\n subgraph = ParquetSubgraph(\"name\", \"pyarrow\", \"fs\", meta, [], [], [0, 1, 2], {})\n\n with pytest.raises(KeyError):\n subgraph[\"foo\"]\n\n with pytest.raises(KeyError):\n subgraph[(\"name\", -1)]\n\n with pytest.raises(KeyError):\n subgraph[(\"name\", 3)]\n\n\ndef test_blockwise_parquet_annotations(tmpdir):\n check_engine()\n\n df = pd.DataFrame({\"a\": np.arange(40, dtype=np.int32)})\n expect = dd.from_pandas(df, npartitions=2)\n expect.to_parquet(str(tmpdir))\n\n with dask.annotate(foo=\"bar\"):\n ddf = dd.read_parquet(str(tmpdir))\n\n # `ddf` should now have ONE Blockwise layer\n layers = ddf.__dask_graph__().layers\n assert len(layers) == 1\n assert isinstance(list(layers.values())[0], ParquetSubgraph)\n assert list(layers.values())[0].annotations == {\"foo\": \"bar\"}\n\n\ndef test_split_row_groups_pyarrow(tmpdir):\n \"\"\"Test split_row_groups read_parquet kwarg\"\"\"\n check_pyarrow()\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\"i32\": np.arange(800, dtype=np.int32), \"f\": np.arange(800, dtype=np.float64)}\n )\n df.index.name = \"index\"\n\n half = len(df) // 2\n dd.from_pandas(df.iloc[:half], npartitions=2).to_parquet(\n tmp, engine=\"pyarrow\", row_group_size=100\n )\n\n ddf3 = dd.read_parquet(tmp, engine=\"pyarrow\", split_row_groups=True, chunksize=1)\n assert ddf3.npartitions == 4\n\n ddf3 = dd.read_parquet(\n tmp, engine=\"pyarrow\", gather_statistics=True, split_row_groups=False\n )\n assert ddf3.npartitions == 2\n\n dd.from_pandas(df.iloc[half:], npartitions=2).to_parquet(\n tmp, append=True, engine=\"pyarrow\", row_group_size=50\n )\n\n ddf3 = dd.read_parquet(\n tmp,\n engine=\"pyarrow\",\n gather_statistics=True,\n split_row_groups=True,\n chunksize=1,\n )\n assert ddf3.npartitions == 12\n\n ddf3 = dd.read_parquet(\n tmp, engine=\"pyarrow\", gather_statistics=True, split_row_groups=False\n )\n assert ddf3.npartitions == 4\n\n\[email protected](\"split_row_groups\", [1, 12])\[email protected](\"gather_statistics\", [True, False])\ndef test_split_row_groups_int_pyarrow(tmpdir, split_row_groups, gather_statistics):\n\n check_pyarrow()\n tmp = str(tmpdir)\n engine = \"pyarrow\"\n row_group_size = 10\n npartitions = 4\n half_size = 400\n df = pd.DataFrame(\n {\n \"i32\": np.arange(2 * half_size, dtype=np.int32),\n \"f\": np.arange(2 * half_size, dtype=np.float64),\n }\n )\n half = len(df) // 2\n\n dd.from_pandas(df.iloc[:half], npartitions=npartitions).to_parquet(\n tmp, engine=engine, row_group_size=row_group_size\n )\n dd.from_pandas(df.iloc[half:], npartitions=npartitions).to_parquet(\n tmp, append=True, engine=engine, row_group_size=row_group_size\n )\n\n ddf2 = dd.read_parquet(\n tmp,\n engine=engine,\n split_row_groups=split_row_groups,\n gather_statistics=gather_statistics,\n )\n expected_rg_cout = int(half_size / row_group_size)\n assert ddf2.npartitions == 2 * math.ceil(expected_rg_cout / split_row_groups)\n\n\ndef test_split_row_groups_filter_pyarrow(tmpdir):\n check_pyarrow()\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\"i32\": np.arange(800, dtype=np.int32), \"f\": np.arange(800, dtype=np.float64)}\n )\n df.index.name = \"index\"\n search_val = 600\n filters = [(\"f\", \"==\", search_val)]\n\n dd.from_pandas(df, npartitions=4).to_parquet(\n tmp, append=True, engine=\"pyarrow\", row_group_size=50\n )\n\n ddf2 = dd.read_parquet(tmp, engine=\"pyarrow\")\n ddf3 = dd.read_parquet(\n tmp,\n engine=\"pyarrow\",\n gather_statistics=True,\n split_row_groups=True,\n filters=filters,\n )\n\n ddf2.compute(scheduler=\"sync\")\n\n assert search_val in ddf3[\"i32\"]\n assert_eq(\n ddf2[ddf2[\"i32\"] == search_val].compute(),\n ddf3[ddf3[\"i32\"] == search_val].compute(),\n )\n\n\ndef test_optimize_getitem_and_nonblockwise(tmpdir):\n check_engine()\n path = os.path.join(tmpdir, \"path.parquet\")\n df = pd.DataFrame(\n {\"a\": [3, 4, 2], \"b\": [1, 2, 4], \"c\": [5, 4, 2], \"d\": [1, 2, 3]},\n index=[\"a\", \"b\", \"c\"],\n )\n df.to_parquet(path)\n\n df2 = dd.read_parquet(path)\n df2[[\"a\", \"b\"]].rolling(3).max().compute()\n\n\ndef test_optimize_and_not(tmpdir):\n check_engine()\n path = os.path.join(tmpdir, \"path.parquet\")\n df = pd.DataFrame(\n {\"a\": [3, 4, 2], \"b\": [1, 2, 4], \"c\": [5, 4, 2], \"d\": [1, 2, 3]},\n index=[\"a\", \"b\", \"c\"],\n )\n df.to_parquet(path)\n\n df2 = dd.read_parquet(path)\n df2a = df2[\"a\"].groupby(df2[\"c\"]).first().to_delayed()\n df2b = df2[\"b\"].groupby(df2[\"c\"]).first().to_delayed()\n df2c = df2[[\"a\", \"b\"]].rolling(2).max().to_delayed()\n df2d = df2.rolling(2).max().to_delayed()\n (result,) = dask.compute(df2a + df2b + df2c + df2d)\n\n expected = [\n dask.compute(df2a)[0][0],\n dask.compute(df2b)[0][0],\n dask.compute(df2c)[0][0],\n dask.compute(df2d)[0][0],\n ]\n for a, b in zip(result, expected):\n assert_eq(a, b)\n\n\[email protected](\"metadata\", [True, False])\[email protected](\"chunksize\", [None, 1024, 4096, \"1MiB\"])\ndef test_chunksize(tmpdir, chunksize, engine, metadata):\n check_pyarrow() # Need pyarrow for write phase in this test\n\n nparts = 2\n df_size = 100\n row_group_size = 5\n row_group_byte_size = 451 # Empirically measured\n\n df = pd.DataFrame(\n {\n \"a\": np.random.choice([\"apple\", \"banana\", \"carrot\"], size=df_size),\n \"b\": np.random.random(size=df_size),\n \"c\": np.random.randint(1, 5, size=df_size),\n \"index\": np.arange(0, df_size),\n }\n ).set_index(\"index\")\n\n ddf1 = dd.from_pandas(df, npartitions=nparts)\n ddf1.to_parquet(\n str(tmpdir),\n engine=\"pyarrow\",\n row_group_size=row_group_size,\n write_metadata_file=metadata,\n )\n\n if metadata:\n path = str(tmpdir)\n else:\n dirname = str(tmpdir)\n files = os.listdir(dirname)\n assert \"_metadata\" not in files\n path = os.path.join(dirname, \"*.parquet\")\n\n ddf2 = dd.read_parquet(\n path,\n engine=engine,\n chunksize=chunksize,\n split_row_groups=True,\n gather_statistics=True,\n index=\"index\",\n )\n\n assert_eq(ddf1, ddf2, check_divisions=False)\n\n num_row_groups = df_size // row_group_size\n if not chunksize:\n assert ddf2.npartitions == num_row_groups\n else:\n # Check that we are really aggregating\n df_byte_size = row_group_byte_size * num_row_groups\n expected = df_byte_size // parse_bytes(chunksize)\n remainder = (df_byte_size % parse_bytes(chunksize)) > 0\n expected += int(remainder) * nparts\n assert ddf2.npartitions == max(nparts, expected)\n\n\n@write_read_engines()\ndef test_roundtrip_pandas_chunksize(tmpdir, write_engine, read_engine):\n path = str(tmpdir.join(\"test.parquet\"))\n pdf = df.copy()\n pdf.index.name = \"index\"\n pdf.to_parquet(\n path, engine=\"pyarrow\" if write_engine.startswith(\"pyarrow\") else \"fastparquet\"\n )\n\n ddf_read = dd.read_parquet(\n path,\n engine=read_engine,\n chunksize=\"10 kiB\",\n gather_statistics=True,\n split_row_groups=True,\n index=\"index\",\n )\n\n assert_eq(pdf, ddf_read)\n\n\ndef test_read_pandas_fastparquet_partitioned(tmpdir, engine):\n check_fastparquet()\n\n pdf = pd.DataFrame(\n [{\"str\": str(i), \"int\": i, \"group\": \"ABC\"[i % 3]} for i in range(6)]\n )\n path = str(tmpdir)\n pdf.to_parquet(path, partition_cols=[\"group\"], engine=\"fastparquet\")\n ddf_read = dd.read_parquet(path, engine=engine)\n\n assert len(ddf_read[\"group\"].compute()) == 6\n assert len(ddf_read.compute().group) == 6\n\n\ndef test_read_parquet_getitem_skip_when_getting_getitem(tmpdir, engine):\n # https://github.com/dask/dask/issues/5893\n pdf = pd.DataFrame({\"A\": [1, 2, 3, 4, 5, 6], \"B\": [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]})\n path = os.path.join(str(tmpdir), \"data.parquet\")\n pd_engine = \"pyarrow\" if engine.startswith(\"pyarrow\") else \"fastparquet\"\n pdf.to_parquet(path, engine=pd_engine)\n\n ddf = dd.read_parquet(path, engine=engine)\n a, b = dask.optimize(ddf[\"A\"], ddf)\n\n\[email protected](\"gather_statistics\", [None, True])\n@write_read_engines()\ndef test_filter_nonpartition_columns(\n tmpdir, write_engine, read_engine, gather_statistics\n):\n tmpdir = str(tmpdir)\n df_write = pd.DataFrame(\n {\n \"id\": [1, 2, 3, 4] * 4,\n \"time\": np.arange(16),\n \"random\": np.random.choice([\"cat\", \"dog\"], size=16),\n }\n )\n ddf_write = dd.from_pandas(df_write, npartitions=4)\n ddf_write.to_parquet(\n tmpdir, write_index=False, partition_on=[\"id\"], engine=write_engine\n )\n ddf_read = dd.read_parquet(\n tmpdir,\n index=False,\n engine=read_engine,\n gather_statistics=gather_statistics,\n filters=[((\"time\", \"<\", 5))],\n )\n df_read = ddf_read.compute()\n assert len(df_read) == len(df_read[df_read[\"time\"] < 5])\n assert df_read[\"time\"].max() < 5\n\n\ndef test_pandas_metadata_nullable_pyarrow(tmpdir):\n\n check_pyarrow()\n if pa.__version__ < LooseVersion(\"0.16.0\") or pd.__version__ < LooseVersion(\n \"1.0.0\"\n ):\n pytest.skip(\"PyArrow>=0.16 and Pandas>=1.0.0 Required.\")\n tmpdir = str(tmpdir)\n\n ddf1 = dd.from_pandas(\n pd.DataFrame(\n {\n \"A\": pd.array([1, None, 2], dtype=\"Int64\"),\n \"B\": pd.array([\"dog\", \"cat\", None], dtype=\"str\"),\n }\n ),\n npartitions=1,\n )\n ddf1.to_parquet(tmpdir, engine=\"pyarrow\")\n ddf2 = dd.read_parquet(tmpdir, engine=\"pyarrow\")\n\n assert_eq(ddf1, ddf2, check_index=False)\n\n\ndef test_pandas_timestamp_overflow_pyarrow(tmpdir):\n\n check_pyarrow()\n if pa.__version__ < LooseVersion(\"0.17.0\"):\n pytest.skip(\"PyArrow>=0.17 Required.\")\n\n info = np.iinfo(np.dtype(\"int64\"))\n arr_numeric = np.linspace(\n start=info.min + 2, stop=info.max, num=1024, dtype=\"int64\"\n )\n arr_dates = arr_numeric.astype(\"datetime64[ms]\")\n\n table = pa.Table.from_arrays([pa.array(arr_dates)], names=[\"ts\"])\n pa.parquet.write_table(\n table, f\"{tmpdir}/file.parquet\", use_deprecated_int96_timestamps=False\n )\n\n # This will raise by default due to overflow\n with pytest.raises(pa.lib.ArrowInvalid) as e:\n dd.read_parquet(str(tmpdir), engine=\"pyarrow-legacy\").compute()\n assert \"out of bounds\" in str(e.value)\n\n from dask.dataframe.io.parquet.arrow import ArrowEngine\n\n class ArrowEngineWithTimestampClamp(ArrowEngine):\n @classmethod\n def clamp_arrow_datetimes(cls, arrow_table: pa.Table) -> pa.Table:\n \"\"\"Constrain datetimes to be valid for pandas\n\n Since pandas works in ns precision and arrow / parquet defaults to ms\n precision we need to clamp our datetimes to something reasonable\"\"\"\n\n new_columns = []\n for i, col in enumerate(arrow_table.columns):\n if pa.types.is_timestamp(col.type) and (\n col.type.unit in (\"s\", \"ms\", \"us\")\n ):\n multiplier = {\"s\": 1_0000_000_000, \"ms\": 1_000_000, \"us\": 1_000}[\n col.type.unit\n ]\n\n original_type = col.type\n\n series: pd.Series = col.cast(pa.int64()).to_pandas(\n types_mapper={pa.int64(): pd.Int64Dtype}\n )\n info = np.iinfo(np.dtype(\"int64\"))\n # constrain data to be within valid ranges\n series.clip(\n lower=info.min // multiplier + 1,\n upper=info.max // multiplier,\n inplace=True,\n )\n new_array = pa.array(series, pa.int64())\n new_array = new_array.cast(original_type)\n new_columns.append(new_array)\n else:\n new_columns.append(col)\n\n return pa.Table.from_arrays(new_columns, names=arrow_table.column_names)\n\n @classmethod\n def _arrow_table_to_pandas(\n cls, arrow_table: pa.Table, categories, **kwargs\n ) -> pd.DataFrame:\n fixed_arrow_table = cls.clamp_arrow_datetimes(arrow_table)\n return super()._arrow_table_to_pandas(\n fixed_arrow_table, categories, **kwargs\n )\n\n # this should not fail, but instead produce timestamps that are in the valid range\n dd.read_parquet(str(tmpdir), engine=ArrowEngineWithTimestampClamp).compute()\n\n\n@write_read_engines_xfail\ndef test_partitioned_preserve_index(tmpdir, write_engine, read_engine):\n\n if write_engine.startswith(\"pyarrow\") and pa.__version__ < LooseVersion(\"0.15.0\"):\n pytest.skip(\"PyArrow>=0.15 Required.\")\n\n tmp = str(tmpdir)\n size = 1_000\n npartitions = 4\n b = np.arange(npartitions).repeat(size // npartitions)\n data = pd.DataFrame(\n {\n \"myindex\": np.arange(size),\n \"A\": np.random.random(size=size),\n \"B\": pd.Categorical(b),\n }\n ).set_index(\"myindex\")\n data.index.name = None\n df1 = dd.from_pandas(data, npartitions=npartitions)\n df1.to_parquet(tmp, partition_on=\"B\", engine=write_engine)\n\n expect = data[data[\"B\"] == 1]\n got = dd.read_parquet(tmp, engine=read_engine, filters=[(\"B\", \"==\", 1)])\n assert_eq(expect, got)\n\n\ndef test_from_pandas_preserve_none_index(tmpdir, engine):\n\n check_pyarrow()\n if pa.__version__ < LooseVersion(\"0.15.0\"):\n pytest.skip(\"PyArrow>=0.15 Required.\")\n\n fn = str(tmpdir.join(\"test.parquet\"))\n df = pd.DataFrame({\"a\": [1, 2], \"b\": [4, 5], \"c\": [6, 7]}).set_index(\"c\")\n df.index.name = None\n df.to_parquet(\n fn,\n engine=\"pyarrow\" if engine.startswith(\"pyarrow\") else \"fastparquet\",\n index=True,\n )\n\n expect = pd.read_parquet(fn)\n got = dd.read_parquet(fn, engine=engine)\n assert_eq(expect, got)\n\n\ndef test_multi_partition_none_index_false(tmpdir, engine):\n check_pyarrow()\n if pa.__version__ < LooseVersion(\"0.15.0\"):\n pytest.skip(\"PyArrow>=0.15 Required.\")\n\n # Write dataset without dast.to_parquet\n ddf1 = ddf.reset_index(drop=True)\n for i, part in enumerate(ddf1.partitions):\n path = tmpdir.join(f\"test.{i}.parquet\")\n part.compute().to_parquet(str(path), engine=\"pyarrow\")\n\n # Read back with index=False\n ddf2 = dd.read_parquet(str(tmpdir), index=False)\n assert_eq(ddf1, ddf2)\n\n\n@write_read_engines()\ndef test_from_pandas_preserve_none_rangeindex(tmpdir, write_engine, read_engine):\n # See GitHub Issue#6348\n fn = str(tmpdir.join(\"test.parquet\"))\n df0 = pd.DataFrame({\"t\": [1, 2, 3]}, index=pd.RangeIndex(start=1, stop=4))\n df0.to_parquet(\n fn, engine=\"pyarrow\" if write_engine.startswith(\"pyarrow\") else \"fastparquet\"\n )\n\n df1 = dd.read_parquet(fn, engine=read_engine)\n assert_eq(df0, df1.compute())\n\n\ndef test_illegal_column_name(tmpdir, engine):\n # Make sure user is prevented from preserving a \"None\" index\n # name if there is already a column using the special `null_name`\n null_name = \"__null_dask_index__\"\n fn = str(tmpdir.join(\"test.parquet\"))\n df = pd.DataFrame({\"x\": [1, 2], null_name: [4, 5]}).set_index(\"x\")\n df.index.name = None\n ddf = dd.from_pandas(df, npartitions=2)\n\n # If we don't want to preserve the None index name, the\n # write should work, but the user should be warned\n with pytest.warns(UserWarning, match=null_name):\n ddf.to_parquet(fn, engine=engine, write_index=False)\n\n # If we do want to preserve the None index name, should\n # get a ValueError for having an illegal column name\n with pytest.raises(ValueError) as e:\n ddf.to_parquet(fn, engine=engine)\n assert null_name in str(e.value)\n\n\ndef test_divisions_with_null_partition(tmpdir, engine):\n df = pd.DataFrame({\"a\": [1, 2, None, None], \"b\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(str(tmpdir), engine=engine, write_index=False)\n\n ddf_read = dd.read_parquet(str(tmpdir), engine=engine, index=\"a\")\n assert ddf_read.divisions == (None, None, None)\n\n\ndef test_pyarrow_dataset_simple(tmpdir, engine):\n check_pyarrow()\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [4, 5, 6], \"b\": [\"a\", \"b\", \"b\"]})\n df.set_index(\"a\", inplace=True, drop=True)\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=engine)\n read_df = dd.read_parquet(fn, engine=\"pyarrow-legacy\")\n read_df.compute(scheduler=\"synchronous\")\n assert_eq(ddf, read_df)\n\n\[email protected](\"test_filter\", [True, False])\ndef test_pyarrow_dataset_partitioned(tmpdir, engine, test_filter):\n check_pyarrow()\n\n if pa.__version__ <= LooseVersion(\"0.17.1\"):\n # Using pyarrow.dataset API does not produce\n # Categorical type for partitioned columns.\n pytest.skip(\"PyArrow>0.17.1 Required.\")\n\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [4, 5, 6], \"b\": [\"a\", \"b\", \"b\"]})\n df[\"b\"] = df[\"b\"].astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=engine, partition_on=\"b\")\n read_df = dd.read_parquet(\n fn,\n engine=\"pyarrow\",\n filters=[(\"b\", \"==\", \"a\")] if test_filter else None,\n )\n\n if test_filter:\n assert_eq(ddf[ddf[\"b\"] == \"a\"].compute(), read_df.compute())\n else:\n assert_eq(ddf, read_df)\n\n\[email protected](\"read_from_paths\", [True, False])\[email protected](\"test_filter_partitioned\", [True, False])\ndef test_pyarrow_dataset_read_from_paths(\n tmpdir, read_from_paths, test_filter_partitioned\n):\n check_pyarrow()\n\n if pa.__version__ <= LooseVersion(\"0.17.1\"):\n # Using pyarrow.dataset API does not produce\n # Categorical type for partitioned columns.\n pytest.skip(\"PyArrow>0.17.1 Required.\")\n\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [4, 5, 6], \"b\": [\"a\", \"b\", \"b\"]})\n df[\"b\"] = df[\"b\"].astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=2)\n\n if test_filter_partitioned:\n ddf.to_parquet(fn, engine=\"pyarrow\", partition_on=\"b\")\n else:\n ddf.to_parquet(fn, engine=\"pyarrow\")\n read_df = dd.read_parquet(\n fn,\n engine=\"pyarrow\",\n filters=[(\"b\", \"==\", \"a\")] if test_filter_partitioned else None,\n read_from_paths=read_from_paths,\n )\n\n if test_filter_partitioned:\n assert_eq(ddf[ddf[\"b\"] == \"a\"].compute(), read_df.compute())\n else:\n assert_eq(ddf, read_df)\n\n\ndef test_parquet_pyarrow_write_empty_metadata(tmpdir):\n # https://github.com/dask/dask/issues/6600\n check_pyarrow()\n tmpdir = str(tmpdir)\n\n df_a = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [], \"y\": []}, dtype=(\"int\", \"int\")\n )\n df_b = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 1, 2, 2], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n df_c = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 2, 1, 2], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n\n df = dd.from_delayed([df_a, df_b, df_c])\n\n try:\n df.to_parquet(\n tmpdir,\n engine=\"pyarrow\",\n partition_on=[\"x\"],\n append=False,\n )\n\n except AttributeError:\n pytest.fail(\"Unexpected AttributeError\")\n\n # Check that metadata files where written\n files = os.listdir(tmpdir)\n assert \"_metadata\" in files\n assert \"_common_metadata\" in files\n\n # Check that the schema includes pandas_metadata\n schema_common = pq.ParquetFile(\n os.path.join(tmpdir, \"_common_metadata\")\n ).schema.to_arrow_schema()\n pandas_metadata = schema_common.pandas_metadata\n assert pandas_metadata\n assert pandas_metadata.get(\"index_columns\", False)\n\n\ndef test_parquet_pyarrow_write_empty_metadata_append(tmpdir):\n # https://github.com/dask/dask/issues/6600\n check_pyarrow()\n tmpdir = str(tmpdir)\n\n df_a = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 1, 2, 2], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n df_b = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 2, 1, 2], \"y\": [2, 0, 2, 0]}, dtype=(\"int64\", \"int64\")\n )\n\n df1 = dd.from_delayed([df_a, df_b])\n df1.to_parquet(\n tmpdir,\n engine=\"pyarrow\",\n partition_on=[\"x\"],\n append=False,\n )\n\n df_c = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [], \"y\": []}, dtype=(\"int64\", \"int64\")\n )\n df_d = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [3, 3, 4, 4], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n\n df2 = dd.from_delayed([df_c, df_d])\n df2.to_parquet(\n tmpdir,\n engine=\"pyarrow\",\n partition_on=[\"x\"],\n append=True,\n ignore_divisions=True,\n )\n\n\[email protected](\"partition_on\", [None, \"a\"])\n@write_read_engines()\ndef test_create_metadata_file(tmpdir, write_engine, read_engine, partition_on):\n\n check_pyarrow()\n tmpdir = str(tmpdir)\n\n # Write ddf without a _metadata file\n df1 = pd.DataFrame({\"b\": range(100), \"a\": [\"A\", \"B\", \"C\", \"D\"] * 25})\n df1.index.name = \"myindex\"\n ddf1 = dd.from_pandas(df1, npartitions=10)\n ddf1.to_parquet(\n tmpdir,\n write_metadata_file=False,\n partition_on=partition_on,\n engine=write_engine,\n )\n\n # Add global _metadata file\n if partition_on:\n fns = glob.glob(os.path.join(tmpdir, partition_on + \"=*/*.parquet\"))\n else:\n fns = glob.glob(os.path.join(tmpdir, \"*.parquet\"))\n dd.io.parquet.create_metadata_file(\n fns,\n engine=\"pyarrow\",\n split_every=3, # Force tree reduction\n )\n\n # Check that we can now read the ddf\n # with the _metadata file present\n ddf2 = dd.read_parquet(\n tmpdir,\n gather_statistics=True,\n split_row_groups=False,\n engine=read_engine,\n index=\"myindex\", # python-3.6 CI\n )\n if partition_on:\n ddf1 = df1.sort_values(\"b\")\n ddf2 = ddf2.compute().sort_values(\"b\")\n ddf2.a = ddf2.a.astype(\"object\")\n assert_eq(ddf1, ddf2)\n\n # Check if we can avoid writing an actual file\n fmd = dd.io.parquet.create_metadata_file(\n fns,\n engine=\"pyarrow\",\n split_every=3, # Force tree reduction\n out_dir=False, # Avoid writing file\n )\n\n # Check that the in-memory metadata is the same as\n # the metadata in the file.\n fmd_file = pq.ParquetFile(os.path.join(tmpdir, \"_metadata\")).metadata\n assert fmd.num_rows == fmd_file.num_rows\n assert fmd.num_columns == fmd_file.num_columns\n assert fmd.num_row_groups == fmd_file.num_row_groups\n\n\ndef test_read_write_overwrite_is_true(tmpdir, engine):\n # https://github.com/dask/dask/issues/6824\n\n # Create a Dask DataFrame if size (100, 10) with 5 partitions and write to local\n ddf = dd.from_pandas(\n pd.DataFrame(\n np.random.randint(low=0, high=100, size=(100, 10)),\n columns=[\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\"],\n ),\n npartitions=5,\n )\n ddf = ddf.reset_index(drop=True)\n dd.to_parquet(ddf, tmpdir, engine=engine, overwrite=True)\n\n # Keep the contents of the DataFrame constatn but change the # of partitions\n ddf2 = ddf.repartition(npartitions=3)\n\n # Overwrite the existing Dataset with the new dataframe and evaluate\n # the number of files against the number of dask partitions\n dd.to_parquet(ddf2, tmpdir, engine=engine, overwrite=True)\n\n # Assert the # of files written are identical to the number of\n # Dask DataFrame partitions (we exclude _metadata and _common_metadata)\n files = os.listdir(tmpdir)\n files = [f for f in files if f not in [\"_common_metadata\", \"_metadata\"]]\n assert len(files) == ddf2.npartitions\n\n\ndef test_read_write_partition_on_overwrite_is_true(tmpdir, engine):\n # https://github.com/dask/dask/issues/6824\n from pathlib import Path\n\n # Create a Dask DataFrame with 5 partitions and write to local, partitioning on the column A and column B\n df = pd.DataFrame(\n np.vstack(\n (\n np.full((50, 3), 0),\n np.full((50, 3), 1),\n np.full((20, 3), 2),\n )\n )\n )\n df.columns = [\"A\", \"B\", \"C\"]\n ddf = dd.from_pandas(df, npartitions=5)\n dd.to_parquet(ddf, tmpdir, engine=engine, partition_on=[\"A\", \"B\"], overwrite=True)\n\n # Get the total number of files and directories from the original write\n files_ = Path(tmpdir).rglob(\"*\")\n files = [f.as_posix() for f in files_]\n # Keep the contents of the DataFrame constant but change the # of partitions\n ddf2 = ddf.repartition(npartitions=3)\n\n # Overwrite the existing Dataset with the new dataframe and evaluate\n # the number of files against the number of dask partitions\n # Get the total number of files and directories from the original write\n dd.to_parquet(ddf2, tmpdir, engine=engine, partition_on=[\"A\", \"B\"], overwrite=True)\n files2_ = Path(tmpdir).rglob(\"*\")\n files2 = [f.as_posix() for f in files2_]\n # After reducing the # of partitions and overwriting, we expect\n # there to be fewer total files than were originally written\n assert len(files2) < len(files)\n\n\ndef test_to_parquet_overwrite_raises(tmpdir, engine):\n # https://github.com/dask/dask/issues/6824\n # Check that overwrite=True will raise an error if the\n # specified path is the current working directory\n df = pd.DataFrame({\"a\": range(12)})\n ddf = dd.from_pandas(df, npartitions=3)\n with pytest.raises(ValueError):\n dd.to_parquet(ddf, \"./\", engine=engine, overwrite=True)\n with pytest.raises(ValueError):\n dd.to_parquet(ddf, tmpdir, engine=engine, append=True, overwrite=True)\n\n\ndef test_dir_filter(tmpdir, engine):\n # github #6898\n df = pd.DataFrame.from_dict(\n {\n \"A\": {\n 0: 351.0,\n 1: 355.0,\n 2: 358.0,\n 3: 266.0,\n 4: 266.0,\n 5: 268.0,\n 6: np.nan,\n },\n \"B\": {\n 0: 2063.0,\n 1: 2051.0,\n 2: 1749.0,\n 3: 4281.0,\n 4: 3526.0,\n 5: 3462.0,\n 6: np.nan,\n },\n \"year\": {0: 2019, 1: 2019, 2: 2020, 3: 2020, 4: 2020, 5: 2020, 6: 2020},\n }\n )\n ddf = dask.dataframe.from_pandas(df, npartitions=1)\n ddf.to_parquet(tmpdir, partition_on=\"year\", engine=engine)\n dd.read_parquet(tmpdir, filters=[(\"year\", \"==\", 2020)], engine=engine)\n assert all\n\n\ndef test_roundtrip_decimal_dtype(tmpdir):\n # https://github.com/dask/dask/issues/6948\n check_pyarrow()\n tmpdir = str(tmpdir)\n\n data = [\n {\n \"ts\": pd.to_datetime(\"2021-01-01\", utc=\"Europe/Berlin\"),\n \"col1\": Decimal(\"123.00\"),\n }\n for i in range(23)\n ]\n ddf1 = dd.from_pandas(pd.DataFrame(data), npartitions=1)\n\n ddf1.to_parquet(path=tmpdir, engine=\"pyarrow\")\n ddf2 = dd.read_parquet(tmpdir, engine=\"pyarrow\")\n\n assert ddf1[\"col1\"].dtype == ddf2[\"col1\"].dtype\n assert_eq(ddf1, ddf2, check_divisions=False)\n\n\ndef test_roundtrip_rename_columns(tmpdir, engine):\n # https://github.com/dask/dask/issues/7017\n\n path = os.path.join(str(tmpdir), \"test.parquet\")\n df1 = pd.DataFrame(columns=[\"a\", \"b\", \"c\"], data=np.random.uniform(size=(10, 3)))\n df1.to_parquet(path)\n\n # read it with dask and rename columns\n ddf2 = dd.read_parquet(path, engine=engine)\n ddf2.columns = [\"d\", \"e\", \"f\"]\n df1.columns = [\"d\", \"e\", \"f\"]\n\n assert_eq(df1, ddf2.compute())\n" ]
[ [ "pandas.to_datetime", "numpy.linspace", "numpy.isnat", "pandas.RangeIndex", "pandas.DataFrame", "numpy.dtype", "pandas.read_parquet", "numpy.random.randn", "numpy.random.randint", "numpy.unique", "numpy.arange", "pandas.Index", "numpy.full", "numpy.repeat", "pandas.concat", "numpy.random.choice", "pandas.Categorical", "pandas.array", "numpy.random.rand", "pandas.DataFrame.from_dict", "numpy.random.random", "numpy.array_equal", "numpy.datetime64", "numpy.random.normal", "numpy.random.permutation", "numpy.random.uniform", "pandas.Timestamp" ] ]
MichQiu/PreSumm
[ "b826ac7d23cd78da5a5293c2ac625b7bd7af1364" ]
[ "src/models/encoder.py" ]
[ "import math\n\nimport torch\nimport torch.nn as nn\n\nfrom models.neural import MultiHeadedAttention, PositionwiseFeedForward\n\n\nclass Classifier(nn.Module):\n def __init__(self, hidden_size):\n super(Classifier, self).__init__()\n self.linear1 = nn.Linear(hidden_size, 1)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x, mask_cls):\n h = self.linear1(x).squeeze(-1)\n sent_scores = self.sigmoid(h) * mask_cls.float() # predict from cls label\n return sent_scores\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, dropout, dim, max_len=5000):\n pe = torch.zeros(max_len, dim)\n position = torch.arange(0, max_len).unsqueeze(1)\n div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) *\n -(math.log(10000.0) / dim)))\n pe[:, 0::2] = torch.sin(position.float() * div_term)\n pe[:, 1::2] = torch.cos(position.float() * div_term)\n pe = pe.unsqueeze(0)\n super(PositionalEncoding, self).__init__()\n self.register_buffer('pe', pe)\n self.dropout = nn.Dropout(p=dropout)\n self.dim = dim\n\n def forward(self, emb, step=None):\n emb = emb * math.sqrt(self.dim)\n if (step):\n emb = emb + self.pe[:, step][:, None, :]\n\n else:\n emb = emb + self.pe[:, :emb.size(1)]\n emb = self.dropout(emb)\n return emb\n\n def get_emb(self, emb):\n return self.pe[:, :emb.size(1)]\n\n\nclass TransformerEncoderLayer(nn.Module):\n def __init__(self, d_model, heads, d_ff, dropout):\n super(TransformerEncoderLayer, self).__init__()\n\n self.self_attn = MultiHeadedAttention(\n heads, d_model, dropout=dropout)\n self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, iter, query, inputs, mask):\n if (iter != 0):\n input_norm = self.layer_norm(inputs)\n else:\n input_norm = inputs\n\n mask = mask.unsqueeze(1)\n context = self.self_attn(input_norm, input_norm, input_norm,\n mask=mask)\n out = self.dropout(context) + inputs\n return self.feed_forward(out) # [batch_size, seq_len, dim]\n\n\nclass ExtTransformerEncoder(nn.Module):\n def __init__(self, d_model, d_ff, heads, dropout, num_inter_layers=0):\n super(ExtTransformerEncoder, self).__init__()\n self.d_model = d_model\n self.num_inter_layers = num_inter_layers\n self.pos_emb = PositionalEncoding(dropout, d_model)\n self.transformer_inter = nn.ModuleList(\n [TransformerEncoderLayer(d_model, heads, d_ff, dropout)\n for _ in range(num_inter_layers)])\n self.dropout = nn.Dropout(dropout)\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n self.wo = nn.Linear(d_model, 1, bias=True)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, top_vecs, mask):\n \"\"\" See :obj:`EncoderBase.forward()`\"\"\"\n\n batch_size, n_sents = top_vecs.size(0), top_vecs.size(1) # top_vecs: sentence vectors output after BERT\n pos_emb = self.pos_emb.pe[:, :n_sents]\n # apply CLS masks [batch_size, n_sents, dim]\n x = top_vecs * mask[:, :, None].float() # mask.size() = [batch_size, n_sent], only at the beginning of the sentence\n x = x + pos_emb # add positional embedding\n\n for i in range(self.num_inter_layers):\n x = self.transformer_inter[i](i, x, x, ~mask) # all_sents * max_tokens * dim\n\n x = self.layer_norm(x)\n sent_scores = self.sigmoid(self.wo(x))\n # squeeze embedding dim to retain batch and sentence dimensions only\n sent_scores = sent_scores.squeeze(-1) * mask.float()\n\n return sent_scores\n\n" ]
[ [ "torch.nn.Dropout", "torch.zeros", "torch.nn.LayerNorm", "torch.nn.Sigmoid", "torch.nn.Linear", "torch.arange" ] ]
jluttine/bayespy
[ "d31d9c395c2cd6c276671dd9f13fd951fd19065a" ]
[ "bayespy/inference/vmp/nodes/expfamily.py" ]
[ "################################################################################\n# Copyright (C) 2013-2014 Jaakko Luttinen\n#\n# This file is licensed under the MIT License.\n################################################################################\n\n\nimport warnings\n\nimport numpy as np\n\nfrom bayespy.utils import misc\n\nfrom .node import ensureparents\nfrom .stochastic import Stochastic, Distribution\n\nclass ExponentialFamilyDistribution(Distribution):\n \"\"\"\n Sub-classes implement distribution specific computations.\n \"\"\"\n\n #\n # The following methods are for ExponentialFamily distributions\n #\n\n def compute_message_to_parent(self, parent, index, u_self, *u_parents):\n raise NotImplementedError()\n\n def compute_phi_from_parents(self, *u_parents, mask=True):\n raise NotImplementedError()\n\n def compute_moments_and_cgf(self, phi, mask=True):\n raise NotImplementedError()\n\n #\n # The following methods are for Mixture class\n #\n\n def compute_cgf_from_parents(self, *u_parents):\n raise NotImplementedError()\n \n def compute_fixed_moments_and_f(self, x, mask=True):\n raise NotImplementedError()\n\n def compute_logpdf(self, u, phi, g, f, ndims):\n \"\"\" Compute E[log p(X)] given E[u], E[phi], E[g] and\n E[f]. Does not sum over plates.\"\"\"\n\n # TODO/FIXME: Should I take into account what is latent or\n # observed, or what is even totally ignored (by the mask).\n L = g + f\n for (phi_i, u_i, ndims_i) in zip(phi, u, ndims):\n # Axes to sum (dimensions of the variable, not the plates)\n axis_sum = tuple(range(-ndims_i,0))\n # Compute the term\n # TODO/FIXME: Use einsum!\n L = L + np.sum(\n np.where(u_i != 0, phi_i, 0) * u_i,\n axis=axis_sum\n )\n return L\n\n\n def compute_gradient(self, g, u, phi):\n r\"\"\"\n Compute the standard gradient with respect to the natural parameters.\n \"\"\"\n\n raise NotImplementedError(\"Standard gradient not yet implemented for %s\"\n % (self.__class__.__name__))\n\n\n\ndef useconstructor(__init__):\n def constructor_decorator(self, *args, **kwargs):\n if (self.dims is None or\n self._distribution is None or\n self._moments is None or \n self._parent_moments is None):\n\n (args, kwargs, dims, plates, dist, stats, pstats) = \\\n self._constructor(*args, **kwargs)\n \n self.dims = dims\n self._distribution = dist\n self._moments = stats\n self._parent_moments = pstats\n self.plates = plates\n\n __init__(self, *args, **kwargs)\n\n return constructor_decorator\n\nclass ExponentialFamily(Stochastic):\n \"\"\"\n A base class for nodes using natural parameterization `phi`.\n\n phi\n\n Sub-classes must implement the following static methods:\n _compute_message_to_parent(index, u_self, *u_parents)\n _compute_phi_from_parents(*u_parents, mask)\n _compute_moments_and_cgf(phi, mask)\n _compute_fixed_moments_and_f(x, mask=True)\n\n Sub-classes may need to re-implement:\n 1. If they manipulate plates:\n _compute_weights_to_parent(index, weights)\n _compute_plates_to_parent(self, index, plates)\n _compute_plates_from_parent(self, index, plates)\n \n \"\"\"\n\n # Sub-classes should overwrite this (possibly using _constructor)\n dims = None\n \n # Sub-classes should overwrite this\n _distribution = None\n\n @useconstructor\n def __init__(self, *parents, initialize=True, **kwargs):\n\n self.annealing = 1.0\n\n # Terms for the lower bound (G for latent and F for observed)\n self.g = np.array(np.nan)\n self.f = np.array(np.nan)\n\n super().__init__(*parents,\n initialize=initialize,\n dims=self.dims,\n **kwargs)\n\n if not initialize:\n axes = len(self.plates)*(1,)\n self.phi = [misc.nans(axes+dim) for dim in self.dims]\n\n\n @classmethod\n @ensureparents\n def _constructor(cls, *parents, **kwargs):\n \"\"\"\n Constructs distribution and moments objects.\n\n If __init__ uses useconstructor decorator, this method is called to\n construct distribution and moments objects.\n\n The method is given the same inputs as __init__. For some nodes, some of\n these can't be \"static\" class attributes, then the node class must\n overwrite this method to construct the objects manually.\n\n The point of distribution class is to move general distribution but\n not-node specific code. The point of moments class is to define the\n messaging protocols.\n \"\"\"\n parent_plates = [cls._distribution.plates_from_parent(ind, parent.plates)\n for (ind, parent) in enumerate(parents)]\n return (parents,\n kwargs,\n cls.dims,\n cls._total_plates(kwargs.get('plates'), *parent_plates),\n cls._distribution, \n cls._moments, \n cls._parent_moments)\n\n def _initialize_from_parent_moments(self, *u_parents):\n if not np.all(self.observed):\n # Update natural parameters using parents\n self._update_phi_from_parents(*u_parents)\n\n # Update moments\n mask = np.logical_not(self.observed)\n (u, g) = self._distribution.compute_moments_and_cgf(self.phi,\n mask=mask)\n # TODO/FIXME/BUG: You should use observation mask in order to not\n # overwrite them!\n self._set_moments_and_cgf(u, g, mask=mask)\n \n\n def initialize_from_prior(self):\n u_parents = self._message_from_parents()\n self._initialize_from_parent_moments(*u_parents)\n\n\n def initialize_from_parameters(self, *args):\n u_parents = [p_mom.compute_fixed_moments(x) \n for (p_mom, x) in zip(self._parent_moments, args)]\n self._initialize_from_parent_moments(*u_parents)\n \n\n def initialize_from_value(self, x, *args):\n # Update moments from value\n mask = np.logical_not(self.observed)\n u = self._moments.compute_fixed_moments(x, *args)\n # Check that the shape is correct\n for i in range(len(u)):\n ndim = len(self.dims[i])\n if ndim > 0:\n if np.shape(u[i])[-ndim:] != self.dims[i]:\n raise ValueError(\"The initial value for node %s has invalid shape %s.\"\n % (np.shape(x)))\n self._set_moments_and_cgf(u, np.inf, mask=mask)\n\n def initialize_from_random(self):\n \"\"\"\n Set the variable to a random sample from the current distribution.\n \"\"\"\n #self.initialize_from_prior()\n X = self.random()\n self.initialize_from_value(X)\n\n\n def _update_phi_from_parents(self, *u_parents):\n\n # TODO/FIXME: Could this be combined to the function\n # _update_distribution_and_lowerbound ?\n # No, because some initialization methods may want to use this.\n\n # This makes correct broadcasting\n self.phi = self._distribution.compute_phi_from_parents(*u_parents)\n #self.phi = self._compute_phi_from_parents(*u_parents)\n self.phi = list(self.phi)\n # Make sure phi has the correct number of axes. It makes life\n # a bit easier elsewhere.\n for i in range(len(self.phi)):\n axes = len(self.plates) + self.ndims[i] - np.ndim(self.phi[i])\n if axes > 0:\n # Add axes\n self.phi[i] = misc.add_leading_axes(self.phi[i], axes)\n elif axes < 0:\n # Remove extra leading axes\n first = -(len(self.plates)+self.ndims[i])\n sh = np.shape(self.phi[i])[first:]\n self.phi[i] = np.reshape(self.phi[i], sh)\n # Check that the shape is correct\n if not misc.is_shape_subset(np.shape(self.phi[i]),\n self.get_shape(i)):\n raise ValueError(\"Incorrect shape of phi[%d] in node class %s. \"\n \"Shape is %s but it should be broadcastable \"\n \"to shape %s.\"\n % (i,\n self.__class__.__name__,\n np.shape(self.phi[i]),\n self.get_shape(i)))\n\n def _set_moments_and_cgf(self, u, g, mask=True):\n self._set_moments(u, mask=mask)\n\n self.g = np.where(mask, g, self.g)\n\n return\n\n\n def get_riemannian_gradient(self):\n r\"\"\"\n Computes the Riemannian/natural gradient.\n \"\"\"\n u_parents = self._message_from_parents()\n m_children = self._message_from_children()\n \n # TODO/FIXME: Put observed plates to zero?\n # Compute the gradient\n phi = self._distribution.compute_phi_from_parents(*u_parents)\n for i in range(len(self.phi)):\n phi[i] = self.annealing * (phi[i] + m_children[i]) - self.phi[i]\n phi[i] = phi[i] * np.ones(self.get_shape(i))\n\n return phi\n\n\n def get_gradient(self, rg):\n r\"\"\" Computes gradient with respect to the natural parameters.\n\n The function takes the Riemannian gradient as an input. This is for\n three reasons: 1) You probably want to use the Riemannian gradient\n anyway so this helps avoiding accidental use of this function. 2) The\n gradient is computed by using the Riemannian gradient and chain rules.\n 3) Probably you need both Riemannian and normal gradients anyway so you\n can provide it to this function to avoid re-computing it.\"\"\"\n \n g = self._distribution.compute_gradient(rg, self.u, self.phi)\n for i in range(len(g)):\n g[i] /= self.annealing\n return g\n\n\n ## def update_parameters(self, d, scale=1.0):\n ## r\"\"\"\n ## Update the parameters of the VB distribution given a change.\n\n ## The parameters should be such that they can be used for\n ## optimization, that is, use log transformation for positive\n ## parameters.\n ## \"\"\"\n ## phi = self.get_parameters()\n ## for i in range(len(phi)):\n ## phi[i] = phi[i] + scale*d[i]\n ## self.set_parameters(phi)\n ## return\n\n\n def get_parameters(self):\n r\"\"\"\n Return parameters of the VB distribution.\n\n The parameters should be such that they can be used for\n optimization, that is, use log transformation for positive\n parameters.\n \"\"\"\n return [np.copy(p) for p in self.phi]\n \n\n\n def _decode_parameters(self, x):\n return [np.copy(p) for p in x]\n\n\n def set_parameters(self, x):\n r\"\"\"\n Set the parameters of the VB distribution.\n\n The parameters should be such that they can be used for\n optimization, that is, use log transformation for positive\n parameters.\n \"\"\"\n self.phi = self._decode_parameters(x)\n self._update_moments_and_cgf()\n return\n\n\n def _update_distribution_and_lowerbound(self, m_children, *u_parents):\n\n # Update phi first from parents..\n self._update_phi_from_parents(*u_parents)\n # .. then just add children's message\n self.phi = [self.annealing * (phi + m)\n for (phi, m) in zip(self.phi, m_children)]\n\n # Update u and g\n self._update_moments_and_cgf()\n\n\n def _update_moments_and_cgf(self):\n \"\"\"\n Update moments and cgf based on current phi.\n \"\"\"\n # Mask for plates to update (i.e., unobserved plates)\n update_mask = np.logical_not(self.observed)\n\n # Compute the moments (u) and CGF (g)...\n (u, g) = self._distribution.compute_moments_and_cgf(self.phi,\n mask=update_mask)\n # ... and store them\n self._set_moments_and_cgf(u, g, mask=update_mask)\n\n\n def observe(self, x, *args, mask=True):\n \"\"\"\n Fix moments, compute f and propagate mask.\n \"\"\"\n\n # Compute fixed moments\n (u, f) = self._distribution.compute_fixed_moments_and_f(x, *args,\n mask=mask)\n\n # # Check the dimensionality of the observations\n # self._check_shape()\n # for (i,v) in enumerate(u):\n # # This is what the dimensionality \"should\" be\n # s = self.plates + self.dims[i]\n # t = np.shape(v)\n # if s != t:\n # msg = \"Dimensionality of the observations incorrect.\"\n # msg += \"\\nShape of input: \" + str(t)\n # msg += \"\\nExpected shape: \" + str(s)\n # msg += \"\\nCheck plates.\"\n # raise Exception(msg)\n\n # Set the moments. Shape checking is done there.\n self._set_moments(u, mask=mask, broadcast=False)\n\n self.f = np.where(mask, f, self.f)\n\n # Observed nodes should not be ignored\n self.observed = mask\n self._update_mask()\n\n def lower_bound_contribution(self, gradient=False, ignore_masked=True):\n r\"\"\"Compute E[ log p(X|parents) - log q(X) ]\n\n If deterministic annealing is used, the term E[ -log q(X) ] is\n divided by the anneling coefficient. That is, phi and cgf of q\n are multiplied by the temperature (inverse annealing\n coefficient).\n \n \"\"\"\n\n # Annealing temperature\n T = 1 / self.annealing\n \n # Messages from parents\n u_parents = self._message_from_parents()\n phi = self._distribution.compute_phi_from_parents(*u_parents)\n # G from parents\n L = self._distribution.compute_cgf_from_parents(*u_parents)\n\n # G for unobserved variables (ignored variables are handled properly\n # automatically)\n latent_mask = np.logical_not(self.observed)\n\n # G and F\n if np.all(self.observed):\n z = np.nan\n elif T == 1:\n z = -self.g\n else:\n z = -T * self.g\n ## TRIED THIS BUT IT WAS WRONG:\n ## z = -T * self.g + (1-T) * self.f\n ## if np.any(np.isnan(self.f)):\n ## warnings.warn(\"F(x) not implemented for node %s. This \"\n ## \"is required for annealed lower bound \"\n ## \"computation.\" % self.__class__.__name__)\n ##\n ## It was wrong because the optimal q distribution has f which is\n ## weighted by 1/T and here the f of q is weighted by T so the\n ## total weight is 1, thus it cancels out with f of p.\n\n L = L + np.where(self.observed, self.f, z)\n\n for (phi_p, phi_q, u_q, dims) in zip(phi, self.phi, self.u, self.dims):\n # Form a mask which puts observed variables to zero and\n # broadcasts properly\n latent_mask_i = misc.add_trailing_axes(\n misc.add_leading_axes(\n latent_mask,\n len(self.plates) - np.ndim(latent_mask)),\n len(dims))\n axis_sum = tuple(range(-len(dims),0))\n\n # Compute the term\n phi_q = np.where(latent_mask_i, phi_q, 0)\n # Apply annealing\n phi_diff = phi_p - T * phi_q\n # Handle 0 * -inf\n phi_diff = np.where(u_q != 0, phi_diff, 0)\n # TODO/FIXME: Use einsum here?\n Z = np.sum(phi_diff * u_q, axis=axis_sum)\n\n L = L + Z\n\n if ignore_masked:\n return (np.sum(np.where(self.mask, L, 0))\n * self.broadcasting_multiplier(self.plates,\n np.shape(L),\n np.shape(self.mask))\n * np.prod(self.plates_multiplier))\n else:\n return (np.sum(L)\n * self.broadcasting_multiplier(self.plates,\n np.shape(L))\n * np.prod(self.plates_multiplier))\n\n\n def logpdf(self, X, mask=True):\n \"\"\"\n Compute the log probability density function Q(X) of this node.\n \"\"\"\n if mask is not True:\n raise NotImplementedError('Mask not yet implemented')\n (u, f) = self._distribution.compute_fixed_moments_and_f(X, mask=mask)\n Z = 0\n for (phi_d, u_d, dims) in zip(self.phi, u, self.dims):\n axis_sum = tuple(range(-len(dims),0))\n # TODO/FIXME: Use einsum here?\n Z = Z + np.sum(phi_d * u_d, axis=axis_sum)\n #Z = Z + misc.sum_multiply(phi_d, u_d, axis=axis_sum)\n\n return (self.g + f + Z)\n \n\n def pdf(self, X, mask=True):\n \"\"\"\n Compute the probability density function of this node.\n \"\"\"\n return np.exp(self.logpdf(X, mask=mask))\n \n\n def _save(self, group):\n \"\"\"\n Save the state of the node into a HDF5 file.\n\n group can be the root\n \"\"\"\n ## if name is None:\n ## name = self.name\n ## subgroup = group.create_group(name)\n \n for i in range(len(self.phi)):\n misc.write_to_hdf5(group, self.phi[i], 'phi%d' % i)\n misc.write_to_hdf5(group, self.f, 'f')\n misc.write_to_hdf5(group, self.g, 'g')\n super()._save(group)\n\n \n def _load(self, group):\n \"\"\"\n Load the state of the node from a HDF5 file.\n \"\"\"\n # TODO/FIXME: Check that the shapes are correct!\n for i in range(len(self.phi)):\n phii = group['phi%d' % i][...]\n self.phi[i] = phii\n \n self.f = group['f'][...]\n self.g = group['g'][...]\n super()._load(group)\n\n\n def random(self):\n \"\"\"\n Draw a random sample from the distribution.\n \"\"\"\n return self._distribution.random(*(self.phi), plates=self.plates)\n" ]
[ [ "numpy.logical_not", "numpy.reshape", "numpy.all", "numpy.ndim", "numpy.copy", "numpy.shape", "numpy.prod", "numpy.array", "numpy.where", "numpy.sum" ] ]
ullrichd21/CantmeraOS
[ "d17a6e918132e7f19045e710d1b278779fd22c42" ]
[ "Cantmera/generate_grayscale.py" ]
[ "from matplotlib import pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy\n\n#image = \"ct34\"\nimage_ext = \".jpg\"\nimage_list = [\"ct1\",\"ct3\",\"ct4\",\"ct5\",\"ct6\",\"ct7\",\"ct8\",\"ct9\",\"ct10\",\"ct11\",\"ct12\",\"ct13\",\n\t\t\t\"ct14\",\"ct15\",\"ct16\",\"ct17\",\"ct18\",\"ct19\",\"ct20\",\"ct21\",\"ct22\",\"ct23\",\"ct24\",\"ct25\",\"ct26\",\n\t\t\t\"ct27\",\"ct28\",\"ct29\",\"ct30\",\"ct31\",\"ct32\",\"ct33\",\"ct34\", \"ct35\"]\nmethods = [\"BT601\", \"BT709\"]\nmethod = \"BT601\"\n\nfor current_image in image_list:\n\t# img = mpimg.imread(image + image_ext) #Get the image\n\timg = mpimg.imread(current_image + image_ext)\n\tfor method in methods:\n\t\tR, G, B = img[:,:,0], img[:,:,1], img[:,:,2] #Split RGB channels\n\t\t\n\t\tif method == \"BT601\":\n\t\t\timgGray = 0.2989 * R + 0.5870 * G + 0.1140 * B #Convert all channels to grayscale.\n\t\telif method == \"BT709\":\n\t\t\timgGray = 0.2126 * R + 0.7152 * G + 0.0722 * B\n\t\telif method == \"Decomposition_MAX\":\n\t\t\timgGray = numpy.copy(img)\n\t\t\tfor ix in range(len(img)):\n\t\t\t\tfor iy in range(len(img[ix])):\n\t\t\t\t\tval = max(img[ix, iy, 0], img[ix, iy, 1], img[ix, iy, 2]) #Determine max value of channels.\n\t\t\t\t\timgGray[ix, iy, 0] = val #Set all channels to the same value.\n\t\t\t\t\timgGray[ix, iy, 1] = val\n\t\t\t\t\timgGray[ix, iy, 2] = val\n\t\t\t\n\t\telif method == \"Decomposition_MIN\":\n\t\t\timgGray = numpy.copy(img)\n\t\t\tfor ix in range(len(img)):\n\t\t\t\tfor iy in range(len(img[ix])):\n\t\t\t\t\tval = min(img[ix, iy, 0], img[ix, iy, 1], img[ix, iy, 2]) #Determine min value of channels.\n\t\t\t\t\timgGray[ix, iy, 0] = val #Set all channels to the same value.\n\t\t\t\t\timgGray[ix, iy, 1] = val\n\t\t\t\t\timgGray[ix, iy, 2] = val\n\n\t\tplt.title(current_image + \"_\" + method + image_ext)\n\t\tfig = plt.gcf()\n\t\tfig.canvas.set_window_title(current_image + \"_\" + method + image_ext)\n\t\tmpimg.imsave(current_image + \"_\" + method + image_ext, imgGray, cmap='gray')\n\t\t# plt.imshow(imgGray, cmap='gray') #Show the image.\n\t\t# plt.show()\n" ]
[ [ "matplotlib.image.imsave", "matplotlib.pyplot.title", "matplotlib.pyplot.gcf", "matplotlib.image.imread", "numpy.copy" ] ]
evandropp10/predict_energy_star_score_nyc
[ "fed291c6714d8be14abade686efc04a3ef93625a" ]
[ "model02.py" ]
[ "# encoding: utf-8\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\n\n\ndef resultToInt(x):\n xint = int(x)\n\n if xint > 100:\n xint = 100\n if xint < 0:\n xint = 0\n\n return xint\n\ndef trainTest(dftt, x, y):\n # dftt - DF Train and Test\n ## SPlit Dataset to train and test\n pf = PolynomialFeatures(degree=2)\n x_poly = pf.fit_transform(dftt[x])\n \n x_train, x_test, y_train, y_test = train_test_split(x_poly, dftt[y], test_size=0.20, random_state=101)\n \n model = LinearRegression()\n \n model.fit(x_train, y_train)\n \n prediction = model.predict(x_test)\n \n \n result = pd.DataFrame(columns=['Test', 'Prediction'])\n result['Test'] = y_test\n result['Prediction'] = prediction\n result['Prediction'] = result['Prediction'].apply(resultToInt)\n return result\n \n \ndef trainPredict(dft, dfp, x, y):\n # dft - DF Train,\n # dfp - DF Pprediction\n pf = PolynomialFeatures(degree=2)\n x_poly_train = pf.fit_transform(dft[x])\n x_poly_test = pf.fit_transform(dfp[x])\n\n model = LinearRegression()\n \n model.fit(x_poly_train, dft[y])\n \n prediction = model.predict(x_poly_test)\n\n result = pd.DataFrame(columns=['Property Id', 'score'])\n result['Property Id'] = dfp['Property Id']\n result['score'] = prediction\n result['score'] = result['score'].apply(resultToInt)\n \n return result\n\n" ]
[ [ "sklearn.linear_model.LinearRegression", "sklearn.model_selection.train_test_split", "sklearn.preprocessing.PolynomialFeatures", "pandas.DataFrame" ] ]
PhantomJoker07/BastardSword
[ "62b9832136c05516c14c7acd65c2edcba3cfb0d0" ]
[ "imageviewer.py" ]
[ "import matplotlib\nimport matplotlib.pyplot as plt\nimport sys\nimport numpy as np\nimport struct\n\ndef draw_image(fileName):\n with open(fileName, 'rb') as file:\n width, height = struct.unpack('ii', file.read(4*2))\n image_data_bytes = file.read((width*height*4) * 4)\n image_data_float = struct.unpack('f'*(width*height*4), image_data_bytes)\n npimage = np.array(image_data_float).reshape((height, width, 4))[:,:,0:3]\n plt.imshow(npimage)\n plt.show() \n\nif __name__ == \"__main__\":\n fileName = sys.argv[1]\n draw_image(fileName)" ]
[ [ "numpy.array", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show" ] ]
dcxSt/mcgill_physics_hackathon_2019
[ "925b2322dae988d0b14d444a61d7ece4116c9b98" ]
[ "main.py" ]
[ "from aiohttp import web\nimport socketio\nimport numpy as np\n\ndef load_eigenvector(k,d):\n vec_path = \"eigenvectors/eigen_k=\" + str(k) + \",d=\" + str(d) + \".npy\"\n eigenvector_np = np.load(vec_path)\n eigenvector_str = \"\"\n for x in np.nditer(eigenvector_np):\n eigenvector_str += str(x) + \" \"\n\n # print()\n # print(eigenvector_str)\n return eigenvector_str\n\n# creates a new Async Socket IO Server\nsio = socketio.AsyncServer(cors_allowed_origins=\"*\")\n# Creates a new Aiohttp Web Application\napp = web.Application()\n# Binds our Socket.IO server to our Web App\n# instance\nsio.attach(app)\n\n# we can define aiohttp endpoints just as we normally\n# would with no change\nasync def index(request):\n with open('index.html') as f:\n return web.Response(text=f.read(), content_type='text/html')\n\nasync def test(request):\n with open('test.js') as f:\n return web.Response(text=f.read(), content_type='text/js')\n\n# If we wanted to create a new websocket endpoint,\n# use this decorator, passing in the name of the\n# event we wish to listen out for\[email protected]('hi')\nasync def print_message(sid, message, d_JS):\n k = message\n d = d_JS\n # print(k)\n # print(d)\n messageToJS = load_eigenvector(k,d)\n # print()\n # print(messageToJS)\n\n # print()\n # print(messageToJS)\n # When we receive a new event of type\n # 'message' through a socket.io connection\n # we print the socket ID and the message\n # print(\"Socket ID: \" , sid)\n # print(message) #message is the value sent from the HTML\n await sio.emit('message', messageToJS) \n # notice it has to be of type 'message' and then pass the \n # value to send to html doc \n# @sio.on('d')\n# async def get_d_val(sid, message):\n # d = message\n# We bind our aiohttp endpoint to our app\n# router\napp.router.add_get('/', index)\napp.router.add_get('/test.js', test)\n\n\n# We kick off our server\nif __name__ == '__main__':\n web.run_app(app)\n\n\n" ]
[ [ "numpy.load", "numpy.nditer" ] ]
debnoob/em_examples
[ "c137108b51118855296d02ac8c31ed299bb23e34" ]
[ "em_examples/MT.py" ]
[ "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nfrom scipy.constants import epsilon_0, mu_0\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom ipywidgets import *\nimport warnings\nwarnings.filterwarnings('ignore') # ignore warnings: only use this once you are sure things are working\nfrom .Base import widgetify\n\n\n\"\"\"\nMT1D: n layered earth problem\n*****************************\n\nAuthor: Thibaut Astic\nContact: [email protected]\nDate: January 2016\n\nThis code compute the analytic response of a n-layered Earth to a plane wave (Magneto-Tellurics).\n\nWe start by looking at Maxwell's equations in the electric\nfield \\\\\\(\\\\\\mathbf{E}\\\\) and the magnetic flux\n\\\\\\(\\\\\\mathbf{H}\\\\) to write the wave equations\n\\\\(\\\\ \\nabla ^2 \\mathbf{E_x} + k^2 \\mathbf{E_x} = 0 \\\\) &\n\\\\(\\\\ \\nabla ^2 \\mathbf{H_y} + k^2 \\mathbf{H_y} = 0 \\\\)\n\nThen solving the equations in each layer \"j\" between z_{j-1} and z_j in the form of\n\\\\(\\\\ E_{x, j} (z) = U_j e^{i k (z-z_{j-1})} + D_j e^{-i k (z-z_{j-1})} \\\\)\n\\\\(\\\\ H_{y, j} (z) = \\frac{1}{Z_j} (D_j e^{-i k (z-z_{j-1})} - U_j e^{i k (z-z_{j-1})}) \\\\)\n\nWith U and D the Up and Down components of the E-field.\n\nThe iteration from one layer to another is ensure by:\n\n\\\\(\\\\ \\left(\\begin{matrix} E_{x, j} \\\\ H_{y, j} \\end{matrix} \\right) =\n P_j T_j P^{-1}_J \\left(\\begin{matrix} E_{x, j+1} \\\\ H_{y, j+1} \\end{matrix} \\right) \\\\)\n\nAnd the Boundary Condition is set for the E-field in the last layer, with no Up component (=0)\nand only a down component (=1 then normalized by the highest amplitude to ensure numeric stability)\n\nThe layer 0 is assumed to be the air layer.\n\n\"\"\"\n\n# Frequency conversion\nomega = lambda f: 2.*np.pi*f\n\n# Evaluate k wavenumber\nk = lambda mu, sig, eps, f: np.sqrt(mu*mu_0*eps*epsilon_0*(2.*np.pi*f)**2.-1.j*mu*mu_0*sig*omega(f))\n\n# Define a frquency range for a survey\nfrange = lambda minfreq, maxfreq, step: np.logspace(minfreq, maxfreq, num = int(step), base = 10.)\n\n# Functions to create random physical Perties for a n-layered earth\nthick = lambda minthick, maxthick, nlayer: np.append(np.array([1.2*10.**5]),\n np.ndarray.round(minthick + (maxthick-minthick)* np.random.rand(int(nlayer)-1, 1)\n , decimals =1))\n\nsig = lambda minsig, maxsig, nlayer: np.append(np.array([0.]),\n np.ndarray.round(10.**minsig + (10.**maxsig-10.**minsig)* np.random.rand(int(nlayer), 1)\n , decimals=3))\n\nmu = lambda minmu, maxmu, nlayer: np.append(np.array([1.]),\n np.ndarray.round(minmu + (maxmu-minmu)* np.random.rand(int(nlayer), 1)\n , decimals=1))\n\neps = lambda mineps, maxeps, nlayer: np.append(np.array([1.]),\n np.ndarray.round(mineps + (maxeps-mineps)* np.random.rand(int(nlayer), 1)\n , decimals=1))\n\n# Evaluate Impedance Z of a layer\nImpZ = lambda f, mu, k: omega(f)*mu*mu_0/k\n\n# Complex Cole-Cole Conductivity - EM utils\nPCC= lambda siginf, m, t, c, f: siginf*(1.-(m/(1.+(1j*omega(f)*t)**c)))\n\n\n# Converted thickness array into top of layer array\ndef top(thick):\n topv= np.zeros(len(thick)+1)\n\n topv[0]=-thick[0]\n\n for i in range(1, len(topv), 1):\n topv[i] = topv[i-1] + thick[i-1]\n\n return topv\n\n# Propagation Matrix and theirs inverses\n\n# matrix T for transition of Up and Down components accross a layer\nT = lambda h, k: np.matrix([[np.exp(1j*k*h), 0.], [0., np.exp(-1j*k*h)]], dtype='complex_')\n\nTinv = lambda h, k: np.matrix([[np.exp(-1j*k*h), 0.], [0., np.exp(1j*k*h)]], dtype='complex_')\n\n# transition of Up and Down components accross a layer\nUD_Z = lambda UD, z, zj, k : T((z-zj), k)*UD\n\n\n# matrix P relating Up and Down components with E and H fields\nP = lambda z: np.matrix([[1., 1, ], [-1./z, 1./z]], dtype='complex_')\n\nPinv = lambda z: np.matrix([[1., -z], [1., z]], dtype='complex_')/2.\n\n\n# Time Variation of E and H\nE_ZT = lambda U, D, f, t : np.exp(1j*omega(f)*t)*(U+D)\nH_ZT = lambda U, D, Z, f, t : (1./Z)*np.exp(1j*omega(f)*t)*(D-U)\n\n# Plot the configuration of the problem\ndef PlotConfiguration(thick, sig, eps, mu, ax, widthg, z):\n\n topn = top(thick)\n widthn = np.arange(-widthg, widthg+widthg/10., widthg/10.)\n\n ax.set_ylim([z.min(), z.max()])\n ax.set_xlim([-widthg, widthg])\n\n ax.set_ylabel(\"Depth (m)\", fontsize=16.)\n ax.yaxis.tick_right()\n ax.yaxis.set_label_position(\"right\")\n\n # define filling for the different layers\n hatches=['/' , '+', 'x', '|' , '\\\\', '-' , 'o' , 'O' , '.' , '*' ]\n\n # Write the physical properties of air\n ax.annotate((\"Air, $\\\\rho$ =%g ohm-m\")%(np.inf),\n xy=(-widthg/2., -np.abs(z.max())/2.), xycoords='data',\n xytext=(-widthg/2., -np.abs(z.max())/2.), textcoords='data',\n fontsize=14.)\n\n ax.annotate((\"$\\epsilon_r$= %g\")%(eps[0]),\n xy=(-widthg/2., -np.abs(z.max())/3.), xycoords='data',\n xytext=(-widthg/2., -np.abs(z.max())/3.), textcoords='data',\n fontsize=14.)\n\n ax.annotate((\"$\\mu_r$= %g\")%(mu[0]),\n xy=(-widthg/2., -np.abs(z.max())/3.), xycoords='data',\n xytext=(0, -np.abs(z.max())/3.), textcoords='data',\n fontsize=14.)\n\n # Write the physical properties of the differents layers up to the (n-1)-th and fill it with pattern\n for i in range(1, len(topn)-1, 1):\n if topn[i] == topn[i+1]:\n pass\n else:\n ax.annotate((\"$\\\\rho$ =%g ohm-m\")%(1./sig[i]),\n xy=(0., (2.*topn[i]+topn[i+1])/3), xycoords='data',\n xytext=(0., (2.*topn[i]+topn[i+1])/3), textcoords='data',\n fontsize=14.)\n\n ax.annotate((\"$\\epsilon_r$= %g\")%(eps[i]),\n xy=(-widthg/1.1, (2.*topn[i]+topn[i+1])/3), xycoords='data',\n xytext=(-widthg/1.1, (2.*topn[i]+topn[i+1])/3), textcoords='data',\n fontsize=14.)\n\n ax.annotate((\"$\\mu_r$= %g\")%(mu[i]),\n xy=(-widthg/2., (2.*topn[i]+topn[i+1])/3), xycoords='data',\n xytext=(-widthg/2., (2.*topn[i]+topn[i+1])/3), textcoords='data',\n fontsize=14.)\n\n ax.plot(widthn, topn[i]*np.ones_like(widthn), color='black')\n ax.fill_between(widthn, topn[i], topn[i+1], alpha=0.3, color=\"none\", edgecolor='black', hatch=hatches[(i-1)%10])\n\n # Write the physical properties of the n-th layer and fill it with pattern\n ax.plot(widthn, topn[-1]*np.ones_like(widthn), color='black')\n ax.fill_between(widthn, topn[-1], z.max(), alpha=0.3, color=\"none\", edgecolor='black', hatch=hatches[(len(topn)-2)%10])\n\n ax.annotate((\"$\\\\rho$ =%g ohm-m\")%(1./sig[-1]),\n xy=(0., (2.*topn[-1]+z.max())/3), xycoords='data',\n xytext=(0., (2.*topn[-1]+z.max())/3), textcoords='data',\n fontsize=14.)\n\n ax.annotate((\"$\\epsilon_r$= %g\")%(eps[-1]),\n xy=(-widthg/1.1, (2.*topn[-1]+z.max())/3), xycoords='data',\n xytext=(-widthg/1.1, (2.*topn[-1]+z.max())/3), textcoords='data',\n fontsize=14.)\n\n ax.annotate((\"$\\mu_r$= %g\")%(mu[-1]),\n xy=(-widthg/2., (2.*topn[-1]+z.max())/3), xycoords='data',\n xytext=(-widthg/2., (2.*topn[-1]+z.max())/3), textcoords='data',\n fontsize=14.)\n\n # plot Trees!\n ax.annotate(\"\",\n xy=(widthg/2., -1.*z.max()/10.), xycoords='data',\n xytext=(widthg/2., 0.), textcoords='data',\n arrowprops=dict(arrowstyle='->, head_width=0.5, head_length=0.5', color='green', linewidth=2.)\n )\n\n ax.annotate(\"\",\n xy=(widthg/2., -3./4.*z.max()/10.), xycoords='data',\n xytext=(widthg/2., 0.), textcoords='data',\n arrowprops=dict(arrowstyle='->, head_width=0.6, head_length=0.6', color='green', linewidth=2.)\n )\n\n ax.annotate(\"\",\n xy=(widthg/2., -1./2.*z.max()/10.), xycoords='data',\n xytext=(widthg/2., 0.), textcoords='data',\n arrowprops=dict(arrowstyle='->, head_width=0.7, head_length=0.7', color='green', linewidth=2.)\n )\n\n ax.annotate(\"\",\n xy=(1.2*widthg/2., -1.*z.max()/10.), xycoords='data',\n xytext=(1.2*widthg/2., 0.), textcoords='data',\n arrowprops=dict(arrowstyle='->, head_width=0.5, head_length=0.5', color='green', linewidth=2.)\n )\n\n ax.annotate(\"\",\n xy=(1.2*widthg/2., -3./4.*z.max()/10.), xycoords='data',\n xytext=(1.2*widthg/2., 0.), textcoords='data',\n arrowprops=dict(arrowstyle='->, head_width=0.6, head_length=0.6', color='green', linewidth=2.)\n )\n\n ax.annotate(\"\",\n xy=(1.2*widthg/2., -1./2.*z.max()/10.), xycoords='data',\n xytext=(1.2*widthg/2., 0.), textcoords='data',\n arrowprops=dict(arrowstyle='->, head_width=0.7, head_length=0.7', color='green', linewidth=2.)\n )\n\n ax.annotate(\"\",\n xy=(1.5*widthg/2., -1.*z.max()/10.), xycoords='data',\n xytext=(1.5*widthg/2., 0.), textcoords='data',\n arrowprops=dict(arrowstyle='->, head_width=0.5, head_length=0.5', color='green', linewidth=2.)\n )\n\n ax.annotate(\"\",\n xy=(1.5*widthg/2., -3./4.*z.max()/10.), xycoords='data',\n xytext=(1.5*widthg/2., 0.), textcoords='data',\n arrowprops=dict(arrowstyle='->, head_width=0.6, head_length=0.6', color='green', linewidth=2.)\n )\n\n ax.annotate(\"\",\n xy=(1.5*widthg/2., -1./2.*z.max()/10.), xycoords='data',\n xytext=(1.5*widthg/2., 0.), textcoords='data',\n arrowprops=dict(arrowstyle='->, head_width=0.7, head_length=0.7', color='green', linewidth=2.)\n )\n\n\n ax.invert_yaxis()\n\n return ax\n\n# Propagate Up and Down component for a certain frequency & evaluate E and H field\n\ndef Propagate(f, H, sig, chg, taux, c, mu, eps, n):\n\n sigcm = np.zeros_like(sig, dtype='complex_')\n\n for j in range(1, len(sig)):\n sigcm[j]=PCC(sig[j], chg[j], taux[j], c[j], f)\n\n K = k(mu, sigcm, eps, f)\n Z = ImpZ(f, mu, K)\n\n EH = np.matrix(np.zeros((2, n+1), dtype = 'complex_'), dtype = 'complex_')\n UD = np.matrix(np.zeros((2, n+1), dtype = 'complex_'), dtype = 'complex_')\n\n UD[1, -1] = 1.\n\n for i in range(-2, -(n+2), -1):\n\n UD[:, i] = Tinv(H[i+1], K[i])*Pinv(Z[i])*P(Z[i+1])*UD[:, i+1]\n UD = UD/((np.abs(UD[0, :]+UD[1, :])).max())\n\n for j in range(0, n+1):\n EH[:, j] = np.matrix([[1., 1, ], [-1./Z[j], 1./Z[j]]])*UD[:, j]\n\n return UD, EH, Z , K\n\n\n# Evaluate the apparent resistivity and phase for a frequency range\ndef appres(F, H, sig, chg, taux, c, mu, eps, n):\n\n Res = np.zeros_like(F)\n Phase = np.zeros_like(F)\n App_ImpZ= np.zeros_like(F, dtype='complex_')\n\n for i in range(0, len(F)):\n\n UD, EH, Z , K = Propagate(F[i], H, sig, chg, taux, c, mu, eps, n)\n\n App_ImpZ[i] = EH[0, 1]/EH[1, 1]\n\n Res[i] = np.abs(App_ImpZ[i])**2./(mu_0*omega(F[i]))\n Phase[i] = np.angle(App_ImpZ[i], deg = True)\n\n return Res, Phase\n\n# Evaluate Up, Down components, E and H field, for a frequency range,\n# a discretized depth range and a time range (use to calculate envelope)\ndef calculateEHzt(F, H, sig, chg, taux, c, mu, eps, n, zsample, tsample):\n\n topc = top(H)\n\n layer = np.zeros(len(zsample), dtype=np.int)-1\n\n Exzt = np.matrix(np.zeros((len(zsample), len(tsample)), dtype = 'complex_'), dtype = 'complex_')\n Hyzt = np.matrix(np.zeros((len(zsample), len(tsample)), dtype = 'complex_'), dtype = 'complex_')\n Uz = np.matrix(np.zeros((len(zsample), len(tsample)), dtype = 'complex_'), dtype = 'complex_')\n Dz = np.matrix(np.zeros((len(zsample), len(tsample)), dtype = 'complex_'), dtype = 'complex_')\n UDaux = np.matrix(np.zeros((2, len(zsample)), dtype = 'complex_'), dtype = 'complex_')\n\n for i in range(0, n+1, 1):\n layer = layer+(zsample>=topc[i])*1\n\n for j in range(0, len(F)):\n\n UD, EH, Z , K = Propagate(F[j], H, sig, chg, taux, c, mu, eps, n)\n\n for p in range(0, len(zsample)):\n\n UDaux[:, p] = UD_Z(UD[:, layer[p]], zsample[p], topc[layer[p]], K[layer[p]])\n\n for q in range(0, len(tsample)):\n\n Exzt[p, q] = Exzt[p, q] + E_ZT(UDaux[0, p], UDaux[1, p], F[j], tsample[q])/len(F)\n Hyzt[p, q] = Hyzt[p, q] + H_ZT(UDaux[0, p], UDaux[1, p], Z[layer[p]], F[j], tsample[q])/len(F)\n Uz[p, q] = Uz[p, q] + UDaux[0, p]*np.exp(1j*omega(F[j])*tsample[q])/len(F)\n Dz[p, q] = Dz[p, q] + UDaux[1, p]*np.exp(1j*omega(F[j])*tsample[q])/len(F)\n\n return Exzt, Hyzt, Uz, Dz, UDaux, layer\n\n\n# Function to Plot Apparent Resistivity and Phase\ndef PlotAppRes(F, H, sig, chg, taux, c, mu, eps, n, fenvelope, PlotEnvelope):\n\n Res, Phase = appres(F, H, sig, chg, taux, c, mu, eps, n)\n\n figwdith = 18\n figheight = 12\n fig = plt.figure(figsize=(figwdith, figheight))\n ax0 = plt.subplot2grid((figheight, figwdith), (0, 0), colspan=int(2*figwdith/3-1), rowspan=int(figheight/2-1))\n ax1 = plt.subplot2grid((figheight, figwdith), (int(figheight/2), 0), colspan=int(2*figwdith/3-1), rowspan=int(figheight/2-1))\n ax2 = plt.subplot2grid((figheight, figwdith), (0, int(2*figwdith/3+1)), colspan=int(figwdith/3-1), rowspan=int(figheight))\n ax = [ax0, ax1, ax2]\n\n ax[0].scatter(F, Res, color='black')\n ax[0].set_xscale('Log')\n ax[0].set_yscale('Log')\n ax[0].set_ylim([10.**(np.round(np.log10(Res.min()))-1.), 10.**(np.round(np.log10(Res.max()))+1.)])\n ax[0].set_xlim([F.max(), F.min()])\n ax[0].set_ylabel('Apparent Resistivity (Ohm-m)', fontsize=16., color=\"black\")\n ax[0].set_xlabel('Frequency (Hz)', fontsize=16.)\n ax[0].grid(which='major')\n\n ax[1].set_ylim([0., 90.])\n ax[1].set_xscale('Log')\n ax[1].set_xlim([F.max(), F.min()])\n ax[1].scatter(F, Phase, color='purple')\n ax[1].set_ylabel('Phase (Degrees)', fontsize=16., color=\"purple\")\n ax[1].grid(which='major')\n\n\n zc=np.arange(-(H[1:].max()+10)*n, (H[1:].max()+10)*n, 10.)\n\n ax[0].tick_params(labelsize=16)\n ax[1].tick_params(labelsize=16)\n if PlotEnvelope:\n\n widthn=np.logspace(np.floor(np.log10(Res.min())-1.), np.ceil(np.log10(Res.max())+1.), num=100, endpoint=True, base=10.0)\n fenvelope1n=np.ones(100)*fenvelope\n ax[0].plot(fenvelope1n, widthn, linestyle='dashed', color='black', linewidth =3.)\n ax[1].plot(fenvelope1n, widthn, linestyle='dashed', color='black', linewidth =3.)\n\n tc=np.arange(0., 1./fenvelope, 0.01/(fenvelope))\n Exzt, Hyzt, Uz, Dz, UDaux, layer = calculateEHzt(np.array([fenvelope]), H, sig, chg, taux, c, mu, eps, n, zc, tc)\n\n axH=ax[2].twiny()\n\n ax[2].tick_params(labelsize=16)\n axH.tick_params(labelsize=16)\n\n ax[2].set_xlabel('Amplitude Electric Field E (V/m)', color='blue', fontsize=16)\n\n axH.set_xlabel('Amplitude Magnetic Field H (A/m)', color='red', fontsize=16)\n\n ax[2].fill_betweenx(zc, np.squeeze(np.asarray(np.real(Exzt.min(axis=1)))),\n np.squeeze(np.asarray(np.real(Exzt.max(axis=1)))),\n color='blue', alpha=0.1)\n\n axH.fill_betweenx(zc, np.squeeze(np.asarray(np.real(Hyzt.min(axis=1)))),\n np.squeeze(np.asarray(np.real(Hyzt.max(axis=1)))),\n color='red', alpha=0.1)\n\n ax[2] = PlotConfiguration(H, sig, eps, mu, ax[2], (1.5*np.abs(Exzt).max()), zc)\n axH.set_xlim([-1.5*np.abs(Hyzt).max(), 1.5*np.abs(Hyzt).max()])\n axH.set_xlim([-1.5*np.abs(Hyzt).max(), 1.5*np.abs(Hyzt).max()])\n else:\n # print 'No envelop (if True, might be slow)'\n ax[2] = PlotConfiguration(H, sig, eps, mu, ax[2], 1., zc)\n ax[2].get_xaxis().set_ticks([])\n\n #return fig\n plt.show()\n\n# Interactive MT for Notebook\ndef PlotAppRes3Layers_wrapper(fmin, fmax, nbdata, h1, h2, rhol1, rhol2, rhol3, mul1, mul2, mul3, epsl1, epsl2, epsl3, PlotEnvelope, F_Envelope):\n\n frangn=frange(np.log10(fmin), np.log10(fmax), nbdata)\n sig3= np.array([0., 0.001, 0.1, 0.001])\n thick3 = np.array([120000., 50., 50.])\n eps3=np.array([1., 1., 1., 1])\n mu3=np.array([1., 1., 1., 1])\n chg3=np.array([0., 0.1, 0., 0.2])\n chg3_0=np.array([0., 0.1, 0., 0.])\n taux3=np.array([0., 0.1, 0., 0.1])\n c3=np.array([1., 1., 1., 1.])\n\n sig3[1]=1./rhol1\n sig3[2]=1./rhol2\n sig3[3]=1./rhol3\n mu3[1]=mul1\n mu3[2]=mul2\n mu3[3]=mul3\n eps3[1]=epsl1\n eps3[2]=epsl2\n eps3[3]=epsl3\n thick3[1]=h1\n thick3[2]=h2\n\n PlotAppRes(frangn, thick3, sig3, chg3_0, taux3, c3, mu3, eps3, 3, F_Envelope, PlotEnvelope)\n\ndef MT1D_app():\n app = widgetify(\n PlotAppRes3Layers_wrapper,\n fmin = FloatText(min=1e-5, max=1e5, value = 1e-5, continuous_update=False),\n fmax = FloatText(min=1e-5, max=1e5, value = 1e5, continuous_update=False),\n nbdata = IntSlider(min =10, max=100, value = 100, step =10, continuous_update=False),\n h1=FloatSlider(min=0., max=10000., step=50., value=500., continuous_update=False),\n h2=FloatSlider(min=0., max=10000., step=50., value=1000., continuous_update=False),\n rhol1=FloatText(min=1e-8, max=1e8, value = 100., continuous_update=False, description='$\\\\rho_1$'),\n rhol2=FloatText(min=1e-8, max=1e8, value = 1000., continuous_update=False, description='$\\\\rho_2$'),\n rhol3=FloatText(min=1e-8, max=1e8, value = 10., continuous_update=False, description='$\\\\rho_3$'),\n mul1=FloatSlider(min=1., max=1.2, step=0.01, value=1., continuous_update=False, description='$\\\\mu_1$'),\n mul2=FloatSlider(min=1., max=1.2, step=0.01, value=1., continuous_update=False, description='$\\\\mu_2$'),\n mul3=FloatSlider(min=1., max=1.2, step=0.01, value=1., continuous_update=False, description='$\\\\mu_3$'),\n epsl1=FloatSlider(min=1., max=80., step=1., value=1., continuous_update=False, description='$\\\\varepsilon_1$'),\n epsl2=FloatSlider(min=1., max=80., step=1, value=1., continuous_update=False, description='$\\\\varepsilon_2$'),\n epsl3=FloatSlider(min=1., max=80., step=1., value=1., continuous_update=False, description='$\\\\varepsilon_3$'),\n PlotEnvelope=ToggleButton(options =True, description='Plot Envelope fields'),\n F_Envelope=FloatText(min = 1e-5, max=1e5, value=1e4, continuous_update=False, description='F')\n )\n return app\n\ndef run(n, plotIt=True):\n # something to make a plot\n\n F = frange(-5., 5., 20)\n H = thick(50., 100., n)\n sign = sig(-5., 0., n)\n mun = mu(1., 2., n)\n epsn = eps(1., 9., n)\n chg = np.zeros_like(sign)\n taux = np.zeros_like(sign)\n c = np.zeros_like(sign)\n\n Res, Phase = appres(F, H, sign, chg, taux, c, mun, epsn, n)\n\n if plotIt:\n\n PlotAppRes(F, H, sign, chg, taux, c, mun, epsn, n, fenvelope=1000., PlotEnvelope=True)\n\n return Res, Phase\n\nif __name__ == '__main__':\n run(3)\n" ]
[ [ "numpy.matrix", "numpy.ones_like", "numpy.abs", "numpy.arange", "numpy.ones", "numpy.log10", "numpy.zeros_like", "numpy.angle", "numpy.exp", "numpy.array", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
jglaser2/ssa
[ "35326b1d9e96501ebda664f9fe4a0225fbbc1168" ]
[ "ssa/util.py" ]
[ "############ Import packages\n\nimport numpy as np\nimport numpy.random as npr\nimport copy\nimport time\n\nimport torch\n\n\n\n############ Utilities\n\ndef get_sample_weights(Y,eps=.1):\n\n \"\"\"\n Function for getting weights for each sample (So different time points can be upweighted/downweighted in the cost function)\n The weights are inversely related to the norm of the activity across all neurons\n\n Parameters\n ----------\n Y: neural data\n numpy 2d array of shape [n_time,n_output_neurons]\n eps: a small offset that limits the maximal sample weight of a time point (in the scenario there is zero activity)\n scalar\n\n Returns\n -------\n The sample weights - an array of shape [n_time,1]\n\n \"\"\"\n\n\n tmp=1/(np.sqrt(np.sum(Y**2,axis=1))+eps)\n tmp2=tmp/np.mean(tmp)\n return tmp2[:,None]\n\n\ndef torchify(array_list):\n\n \"\"\"\n Function that turns a list of arrays into a list of torch tensors.\n\n Parameters\n ----------\n array_list: a list of numpy arrays\n\n Returns\n -------\n a list of torch tensors (corresponding to the original arrays)\n\n \"\"\"\n\n return [torch.tensor(array_entry, dtype=torch.float) for array_entry in array_list]\n" ]
[ [ "numpy.mean", "numpy.sum", "torch.tensor" ] ]
2runo/dl_numpy
[ "3aa498146189fd3e5b4b0c0a97830253edba12af" ]
[ "dl_numpy/core/base.py" ]
[ "\"\"\"\n연산 magic method 정의\n\"\"\"\nimport numpy as np\nfrom .decorators import for_all_methods, _graph_debugging_decorator\nfrom ..autograd.backward import Backward\nfrom ..utils.core import ValueRepr\n\n\ndef check_tensor_decorator(class_method=True):\n # 인자를 Tensor로 변환하는 decorator\n def _check_tensor_decorator(fn):\n def to_tensor(x):\n try:\n if x.is_tensor:\n return x\n except AttributeError:\n pass\n return Tensor(x, var=False)\n\n def inner(*args, **kwargs):\n if class_method:\n args = [args[0]] + [to_tensor(i) for i in args[1:]]\n else:\n args = [to_tensor(i) for i in args]\n return fn(*args, **kwargs)\n\n return inner\n return _check_tensor_decorator\n\n\n@for_all_methods(check_tensor_decorator())\nclass Calc(ValueRepr):\n value = None\n is_tensor = True\n\n def __add__(self, other):\n # self + other\n return Add(self, other)\n\n def __radd__(self, other):\n # other + self\n return Add(other, self)\n\n def __sub__(self, other):\n # self - other\n return Add(self, -other)\n\n def __rsub__(self, other):\n # other - self\n return Add(other, -self)\n\n def __neg__(self):\n # -self\n return -1 * self\n\n def __mul__(self, other):\n # self * other\n return Mul(self, other)\n\n def __rmul__(self, other):\n # other * self\n return Mul(other, self)\n\n def __truediv__(self, other):\n # self / other\n return Div(self, other)\n\n def __rtruediv__(self, other):\n # other / self\n return Div(other, self)\n\n def __pow__(self, other):\n # self ** other\n return Pow(self, other)\n\n def __rpow__(self, other):\n # other ** self\n return Pow(other, self)\n\n\nclass Tensor(Calc):\n def __init__(self, value, var=True):\n if type(value) == Tensor:\n self.value = value.value\n self.value = np.array(value)\n self.var = var # 변수인가? (기울기 계산 대상인가?)\n\n\nclass Add(Calc, Backward):\n @_graph_debugging_decorator\n def __init__(self, a, b):\n self.a, self.b = a, b\n self.value = a.value + b.value\n\n\nclass Mul(Calc, Backward):\n @_graph_debugging_decorator\n def __init__(self, a, b):\n self.a, self.b = a, b\n self.value = a.value * b.value\n\n\nclass Div(Calc, Backward):\n @_graph_debugging_decorator\n def __init__(self, a, b):\n self.a, self.b = a, b\n self.value = a.value / b.value\n\n\nclass Pow(Calc, Backward):\n @_graph_debugging_decorator\n def __init__(self, a, b):\n self.a, self.b = a, b\n self.value = a.value ** b.value\n" ]
[ [ "numpy.array" ] ]
awsaf49/keras_cv_attention_models
[ "242aaf02fd46f68d57f710b9e805afe96e3067e5" ]
[ "keras_cv_attention_models/resnet_family/resnet_quad.py" ]
[ "import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import backend as K\nfrom keras_cv_attention_models.download_and_load import reload_model_weights\nfrom keras_cv_attention_models.attention_layers import batchnorm_with_activation, conv2d_no_bias, drop_block\n\n\nPRETRAINED_DICT = {\n \"resnet51q\": {\"imagenet\": \"2b54c5e252bd58f37454e6fb273716f7\"},\n}\n\n\ndef quad_block(inputs, filters, groups_div=32, strides=1, conv_shortcut=False, expansion=4, extra_conv=False, drop_rate=0, activation=\"swish\", name=\"\"):\n expanded_filter = filters * expansion\n groups = filters // groups_div if groups_div != 0 else 1\n if conv_shortcut:\n shortcut = conv2d_no_bias(inputs, expanded_filter, 1, strides=strides, name=name + \"shortcut_\")\n shortcut = batchnorm_with_activation(shortcut, activation=None, zero_gamma=False, name=name + \"shortcut_\")\n else:\n shortcut = inputs\n\n if groups != 1: # Edge block\n nn = conv2d_no_bias(inputs, filters, 1, strides=1, padding=\"VALID\", name=name + \"1_\")\n nn = batchnorm_with_activation(nn, activation=activation, zero_gamma=False, name=name + \"1_\")\n else:\n nn = inputs\n\n nn = conv2d_no_bias(nn, filters, 3, strides=strides, padding=\"SAME\", groups=groups, name=name + \"groups_\")\n nn = batchnorm_with_activation(nn, activation=activation, zero_gamma=False, name=name + \"2_\")\n\n if extra_conv:\n nn = conv2d_no_bias(nn, filters, 3, strides=1, padding=\"SAME\", groups=groups, name=name + \"extra_groups_\")\n nn = batchnorm_with_activation(nn, activation=activation, zero_gamma=False, name=name + \"extra_2_\")\n\n nn = conv2d_no_bias(nn, expanded_filter, 1, strides=1, padding=\"VALID\", name=name + \"3_\")\n nn = batchnorm_with_activation(nn, activation=None, zero_gamma=True, name=name + \"3_\")\n\n # print(\">>>> shortcut:\", shortcut.shape, \"nn:\", nn.shape)\n nn = drop_block(nn, drop_rate)\n nn = keras.layers.Add(name=name + \"add\")([shortcut, nn])\n return keras.layers.Activation(activation, name=name + \"out\")(nn)\n\n\ndef quad_stack(inputs, blocks, filters, groups_div, strides=2, expansion=4, extra_conv=False, stack_drop=0, activation=\"swish\", name=\"\"):\n nn = inputs\n stack_drop_s, stack_drop_e = stack_drop if isinstance(stack_drop, (list, tuple)) else [stack_drop, stack_drop]\n for id in range(blocks):\n conv_shortcut = True if id == 0 and (strides != 1 or inputs.shape[-1] != filters * expansion) else False\n cur_strides = strides if id == 0 else 1\n block_name = name + \"block{}_\".format(id + 1)\n block_drop_rate = stack_drop_s + (stack_drop_e - stack_drop_s) * id / blocks\n nn = quad_block(nn, filters, groups_div, cur_strides, conv_shortcut, expansion, extra_conv, block_drop_rate, activation, name=block_name)\n return nn\n\n\ndef quad_stem(inputs, stem_width, activation=\"swish\", stem_act=False, name=\"\"):\n nn = conv2d_no_bias(inputs, stem_width // 8, 3, strides=2, padding=\"same\", name=name + \"1_\")\n if stem_act:\n nn = batchnorm_with_activation(nn, activation=activation, name=name + \"1_\")\n nn = conv2d_no_bias(nn, stem_width // 4, 3, strides=1, padding=\"same\", name=name + \"2_\")\n if stem_act:\n nn = batchnorm_with_activation(nn, activation=activation, name=name + \"2_\")\n nn = conv2d_no_bias(nn, stem_width // 2, 3, strides=1, padding=\"same\", name=name + \"3_\")\n nn = batchnorm_with_activation(nn, activation=activation, name=name + \"3_\")\n nn = conv2d_no_bias(nn, stem_width, 3, strides=2, padding=\"same\", name=name + \"4_\")\n return nn\n\n\ndef ResNetQ(\n num_blocks,\n out_channels=[64, 128, 384, 384],\n stem_width=128,\n stem_act=False,\n expansion=4,\n groups_div=32,\n extra_conv=False,\n num_features=2048,\n strides=2,\n stem_downsample=False,\n input_shape=(224, 224, 3),\n num_classes=1000,\n activation=\"swish\",\n drop_connect_rate=0,\n classifier_activation=\"softmax\",\n pretrained=\"imagenet\",\n model_name=\"resnetq\",\n kwargs=None,\n):\n inputs = keras.layers.Input(shape=input_shape)\n nn = quad_stem(inputs, stem_width, activation=activation, stem_act=stem_act, name=\"stem_\")\n nn = batchnorm_with_activation(nn, activation=activation, name=\"stem_\")\n if stem_downsample:\n nn = keras.layers.ZeroPadding2D(padding=1, name=\"stem_pool_pad\")(nn)\n nn = keras.layers.MaxPooling2D(pool_size=3, strides=2, name=\"stem_pool\")(nn)\n\n total_blocks = sum(num_blocks)\n global_block_id = 0\n drop_connect_s, drop_connect_e = 0, drop_connect_rate\n strides = strides if isinstance(strides, (list, tuple)) else [1, 2, 2, strides]\n for id, (num_block, out_channel, stride) in enumerate(zip(num_blocks, out_channels, strides)):\n name = \"stack{}_\".format(id + 1)\n stack_drop_s = drop_connect_rate * global_block_id / total_blocks\n stack_drop_e = drop_connect_rate * (global_block_id + num_block) / total_blocks\n stack_drop = (stack_drop_s, stack_drop_e)\n cur_expansion = expansion[id] if isinstance(expansion, (list, tuple)) else expansion\n cur_extra_conv = extra_conv[id] if isinstance(extra_conv, (list, tuple)) else extra_conv\n cur_groups_div = groups_div[id] if isinstance(groups_div, (list, tuple)) else groups_div\n nn = quad_stack(nn, num_block, out_channel, cur_groups_div, stride, cur_expansion, cur_extra_conv, stack_drop, activation, name=name)\n global_block_id += num_block\n\n if num_features != 0: # efficientnet like\n nn = conv2d_no_bias(nn, num_features, 1, strides=1, name=\"features_\")\n nn = batchnorm_with_activation(nn, activation=activation, name=\"features_\")\n\n if num_classes > 0:\n nn = keras.layers.GlobalAveragePooling2D(name=\"avg_pool\")(nn)\n nn = keras.layers.Dense(num_classes, dtype=\"float32\", activation=classifier_activation, name=\"predictions\")(nn)\n\n model = keras.models.Model(inputs, nn, name=model_name)\n reload_model_weights(model, pretrained_dict=PRETRAINED_DICT, sub_release=\"resnet_family\", input_shape=input_shape, pretrained=pretrained)\n return model\n\n\ndef ResNet51Q(input_shape=(224, 224, 3), num_classes=1000, activation=\"swish\", classifier_activation=\"softmax\", pretrained=\"imagenet\", **kwargs):\n num_blocks = [2, 4, 6, 4]\n out_channels = [64, 128, 384, 384 * 4]\n stem_width = 128\n stem_act = False\n expansion = [4, 4, 4, 1]\n groups_div = [32, 32, 32, 1]\n extra_conv = False\n num_features = 2048\n return ResNetQ(**locals(), model_name=\"resnet51q\", **kwargs)\n\n\ndef ResNet61Q(input_shape=(224, 224, 3), num_classes=1000, activation=\"swish\", classifier_activation=\"softmax\", pretrained=\"imagenet\", **kwargs):\n num_blocks = [1, 4, 6, 4]\n out_channels = [256, 128, 384, 384 * 4]\n stem_width = 128\n stem_act = True\n expansion = [1, 4, 4, 1]\n groups_div = [0, 32, 32, 1]\n extra_conv = [False, True, True, True]\n num_features = 2048\n return ResNetQ(**locals(), model_name=\"resnet61q\", **kwargs)\n" ]
[ [ "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.GlobalAveragePooling2D", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.ZeroPadding2D", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Input" ] ]
gvanhorn38/active_neurofinder
[ "e10bd893ff3224925b3cf2f4329959c6c19714d8" ]
[ "movie.py" ]
[ "\"\"\"\nCreate a video out of the images\n\"\"\"\n\nfrom matplotlib import pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\nfrom scipy import interpolate\n\nfrom glob import glob\nfrom util import load_images, load_regions\nimport os\nfrom scipy.misc import imread\nfrom PIL import Image\n\ndef create_movie(dataset_path, output_path, outline=False, fps=30, dpi=100):\n \"\"\"\n Creates a mp4 file.\n \"\"\"\n \n files = sorted(glob(os.path.join(dataset_path, 'images/*.tiff')))\n images = [Image.open(files[0])]\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_aspect('equal')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n if outline: \n regions = load_regions(dataset_path)\n boundaries = regions.mask(dims=images[0].shape, stroke='red', fill=None, base=np.zeros(images[0].shape))\n boundaries = (boundaries[:,:,0] > 0) + 0\n \n mx = np.max(images)\n mn = np.min(images)\n \n f = interpolate.interp1d([mn, mx], [0, 255])\n \n mod_images = []\n for image in images:\n g_i_r = f(image).astype(np.uint8)\n g_i_r[boundaries > 0] = 255\n g_i_g = f(image).astype(np.uint8)\n g_i_g[boundaries > 0] = 0\n g_i_b = f(image).astype(np.uint8)\n g_i_b[boundaries > 0] = 0\n i = np.dstack([g_i_r, g_i_g, g_i_b])\n mod_images.append(i)\n \n images = np.array(mod_images)\n \n im = ax.imshow(images[0] ,cmap='gray')\n \n fig.set_size_inches([5,5])\n plt.tight_layout()\n\n def update_img(n):\n tmp = Image.open(files[n])\n im.set_data(tmp)\n return im\n\n animator = animation.FuncAnimation(fig,update_img,len(files),interval=fps)\n writer = animation.writers['ffmpeg'](fps=fps)\n animator.save(output_path,writer=writer,dpi=dpi)\n\n \n \n \n \n" ]
[ [ "matplotlib.pyplot.tight_layout", "numpy.min", "numpy.dstack", "numpy.max", "scipy.interpolate.interp1d", "numpy.array", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
cjdjr/cosp_nas
[ "f54243176e8398d79df25d295704f24238f24cc7" ]
[ "search/COSP/search.py" ]
[ "import os\nimport time\nimport glob\nimport numpy as np\nimport pickle\nimport torch\nimport logging\nimport argparse\nimport torch\nimport random\nimport json\nimport pprint as pp\nfrom tqdm import tqdm\nfrom torch.nn import DataParallel\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nimport torch.optim as optim\nfrom tensorboard_logger import Logger as TbLogger\nimport torch.distributed as dist\nimport collections\nimport sys\nsys.setrecursionlimit(10000)\nsys.path.append(\"..\")\n\nimport functools\nprint = functools.partial(print, flush=True)\n\nfrom cosp_nas.utils import torch_load_cpu, get_inner_model, set_random_seed, get_logger, move_to, clip_grad_norms, log_values\n\nfrom .network import AttentionModel, set_decode_type\nfrom .reinforce_baselines import NoBaseline, WarmupBaseline, ExponentialBaseline\n\nfrom .subnet_dataset import SubnetDataset\n\n\n\nclass COSPSearcher(object):\n\n def __init__(self, args, supernet):\n\n self.args = args\n self.logger = get_logger(os.path.join(self.args.save_dir, 'process.log'))\n # supernet\n self.supernet = supernet\n\n def check_tblogger(self):\n self.tb_logger = None\n if not self.args.no_tensorboard:\n self.tb_logger = TbLogger(os.path.join(self.args.save_dir))\n # Pretty print the run args\n pp.pprint(vars(self.args))\n\n def validate(self, model, log=False):\n # Validate\n self.logger.info('Validating...')\n\n # Fix the process of sample in validate \n\n set_random_seed( self.args.eval_seed )\n\n input = SubnetDataset(num_samples=1,test=True)[0][None,:,:].expand(self.args.val_sample, self.args.n_layer * self.args.n_op,2).to(self.args.device)\n # print(\"val dataset input : \",input[0])\n with torch.no_grad():\n model.eval()\n set_decode_type(model, \"sampling\")\n cost_1, cost_5, _ , pi = model(input, return_pi=True)\n cost_1 = cost_1[:,None]\n # cost_1_min = cost_1.min()\n pi = pi.float()\n info = torch.cat((cost_1,pi),dim=1)\n cost_1_mean = cost_1.mean()\n cost_5_min = cost_5.min()\n cost_5_mean = cost_5.mean()\n\n\n cost_1_min_id = torch.argmin(info[:,0]).item()\n cost_1_min = info[cost_1_min_id][0].item()\n cand = tuple([x%4 for x in map(int,info[cost_1_min_id][1:].cpu().numpy())])\n\n self.logger.info(\"cost_1_min = {} cost_1_mean = {} cost_5_min={} cost_5_mean={} \\n cand : {}\".format(cost_1_min,cost_1_mean,cost_5_min,cost_5_mean,cand))\n\n # cannot make the seed same in all epoch\n self.args.seed += 1 \n set_random_seed( self.args.seed )\n\n return cost_1_min,cost_1_mean,cost_5_min,cost_5_mean\n \n def train_batch(\n self,\n model,\n optimizer,\n baseline,\n epoch,\n batch_id,\n step,\n batch,\n ):\n\n # x, bl_val = baseline.unwrap_batch(batch)\n\n x = batch\n\n x = move_to(x, self.args.device)\n\n # bl_val = move_to(bl_val, self.args.device) if bl_val is not None else None\n # if batch_id == 0:\n # print(args.device,\" : \",batch_id,\" : \",x[0])\n # return \n # Evaluate model, get costs and log probabilities\n\n cost_1, cost_5, log_likelihood= model(x)\n\n\n # global info\n # for i in range(cost_1.size(0)):\n # info[pi[i].item()]={'err':(cost_1[i].item(),cost_5[i].item())}\n\n # Evaluate baseline, get baseline loss if any (only for critic)\n # bl_val, bl_loss = baseline.eval(x, cost_1) if bl_val is None else (bl_val, 0)\n\n # Calculate loss\n reinforce_loss = (cost_1 * log_likelihood).mean()\n loss = reinforce_loss \n\n # Perform backward pass and optimization step\n optimizer.zero_grad()\n loss.backward()\n # Clip gradient norms and get (clipped) gradient norms for logging\n grad_norms = clip_grad_norms(optimizer.param_groups, self.args.max_grad_norm)\n optimizer.step()\n\n # gather \n\n cost_1 = cost_1.mean()\n log_likelihood = log_likelihood.mean()\n reinforce_loss = reinforce_loss\n # bl_loss = gather_mean(bl_loss)\n # Logging\n\n if step % int(self.args.log_step) == 0:\n log_values(cost_1, grad_norms, epoch, batch_id, step,\n log_likelihood, reinforce_loss, None, self.tb_logger, self.args)\n self.logger.info(\"Epoch = {} , Step = {}\".format(epoch,step))\n\n def train_epoch(self, model, optimizer, baseline, lr_scheduler, epoch):\n\n self.logger.info(\"Train!\")\n self.logger.info(\"Start train epoch {}, lr={} for run {} , seed={}\".format(epoch, optimizer.param_groups[0]['lr'], self.args.run_name, self.args.seed))\n\n step = epoch * (self.args.epoch_size // self.args.batch_size)\n\n start_time = time.time()\n\n if not self.args.no_tensorboard:\n self.tb_logger.log_value('learnrate_pg0', optimizer.param_groups[0]['lr'], step)\n\n # Generate new training data for each epoch\n training_dataset = baseline.wrap_dataset(SubnetDataset(num_samples=self.args.epoch_size))\n training_dataloader = DataLoader(training_dataset, batch_size=self.args.batch_size, num_workers=0)\n\n # Put model in train mode!\n model.train()\n set_decode_type(model, \"sampling\")\n\n for batch_id, batch in enumerate(tqdm(training_dataloader, disable=self.args.no_progress_bar)):\n\n self.train_batch(\n model,\n optimizer,\n baseline,\n epoch,\n batch_id,\n step,\n batch\n )\n step += 1\n\n epoch_duration = time.time() - start_time\n\n # print(epoch_duration.shape)\n\n self.logger.info(\"Finished epoch {}, took {} s\".format(epoch, time.strftime('%H:%M:%S', time.gmtime(epoch_duration))))\n\n self.logger.info('Saving model and state...')\n torch.save(\n {\n 'model': get_inner_model(model).state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'rng_state': torch.get_rng_state(),\n 'cuda_rng_state': torch.cuda.get_rng_state_all(),\n 'baseline': baseline.state_dict(),\n },\n os.path.join(self.args.save_dir, 'epoch-{}.pt'.format(epoch))\n )\n\n self.args.eval_model = os.path.join(self.args.save_dir, 'epoch-{}.pt'.format(epoch))\n\n cost_1_min,cost_1_mean,cost_5_min,cost_5_mean = self.validate(model, True)\n\n if not self.args.no_tensorboard:\n self.tb_logger.log_value('cost_1_min', cost_1_min, step)\n self.tb_logger.log_value('cost_1_mean', cost_1_mean, step)\n self.tb_logger.log_value('cost_5_min', cost_5_min, step)\n self.tb_logger.log_value('cost_5_mean', cost_5_mean, step)\n\n baseline.epoch_callback(model, epoch)\n\n # lr_scheduler should be called at end of epoch\n lr_scheduler.step()\n\n def search(self):\n\n # print(\"search\")\n # from IPython import embed\n # embed()\n self.check_tblogger()\n\n with open(os.path.join(self.args.save_dir, \"args.json\"), 'w') as f:\n json.dump(vars(self.args), f, indent=True)\n \n # Load data from load_path\n load_data = {}\n assert self.args.load_path is None or self.args.resume is None, \"Only one of load path and resume can be given\"\n load_path = self.args.load_path if self.args.load_path is not None else self.args.resume\n if load_path is not None:\n print(' [*] Loading data from {}'.format(load_path))\n load_data = torch_load_cpu(load_path)\n \n # Set the random seed\n set_random_seed(self.args.seed)\n\n # Set the device \n if torch.cuda.is_available():\n self.args.device = torch.device('cuda')\n else:\n self.args.device = torch.device('cpu')\n\n model = AttentionModel(\n self.args.embedding_dim,\n self.args.hidden_dim,\n self.args.feed_forward_hidden,\n self.supernet,\n n_encode_layers=self.args.n_encode_layers,\n mask_inner=True,\n mask_logits=True,\n normalization=self.args.normalization,\n tanh_clipping=self.args.tanh_clipping,\n ).to(self.args.device)\n model = torch.nn.DataParallel(model).cuda()\n\n model_ = get_inner_model(model)\n model_.load_state_dict({**model_.state_dict(), **load_data.get('model', {})})\n\n # Initialize baseline\n if self.args.baseline == 'exponential':\n baseline = ExponentialBaseline(self.args.exp_beta)\n else:\n assert self.args.baseline is None, \"Unknown baseline: {}\".format(self.args.baseline)\n baseline = NoBaseline()\n\n if self.args.bl_warmup_epochs > 0:\n baseline = WarmupBaseline(baseline, self.args.bl_warmup_epochs, warmup_exp_beta=self.args.exp_beta)\n\n # Load baseline from data, make sure script is called with same type of baseline\n if 'baseline' in load_data:\n baseline.load_state_dict(load_data['baseline'])\n \n # Initialize optimizer\n optimizer = optim.Adam(\n [{'params': model.parameters(), 'lr': self.args.lr_model}]\n + (\n [{'params': baseline.get_learnable_parameters(), 'lr': self.args.lr_critic}]\n if len(baseline.get_learnable_parameters()) > 0\n else []\n )\n )\n\n # Load optimizer state\n if 'optimizer' in load_data:\n optimizer.load_state_dict(load_data['optimizer'])\n for state in optimizer.state.values():\n for k, v in state.items():\n # if isinstance(v, torch.Tensor):\n if torch.is_tensor(v):\n state[k] = v.to(self.args.device)\n\n # Initialize learning rate scheduler, decay by lr_decay once per epoch!\n lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: self.args.lr_decay ** epoch)\n\n if self.args.resume:\n epoch_resume = int(os.path.splitext(os.path.split(self.args.resume)[-1])[0].split(\"-\")[1])\n\n torch.set_rng_state(load_data['rng_state'])\n if self.args.use_cuda:\n torch.cuda.set_rng_state_all(load_data['cuda_rng_state'])\n # Set the random states\n # Dumping of state was done before epoch callback, so do that now (model is loaded)\n baseline.epoch_callback(model, epoch_resume)\n print(\"Resuming after {}\".format(epoch_resume))\n self.args.epoch_start = epoch_resume + 1\n \n print(\"build finish ! \")\n \n\n if self.args.eval_only:\n self.args.eval_model = self.args.load_path\n self.validate(model, True)\n else:\n for epoch in range(self.args.epoch_start, self.args.epoch_start + self.args.n_epochs):\n self.train_epoch(\n model,\n optimizer,\n baseline,\n lr_scheduler,\n epoch,\n )\n\n" ]
[ [ "torch.set_rng_state", "torch.optim.lr_scheduler.LambdaLR", "torch.cat", "torch.cuda.get_rng_state_all", "torch.utils.data.DataLoader", "torch.argmin", "torch.is_tensor", "torch.get_rng_state", "torch.cuda.set_rng_state_all", "torch.no_grad", "torch.cuda.is_available", "torch.device", "torch.nn.DataParallel" ] ]
awalker88/auto-ts
[ "7935394dbf7ce386f574ccf83f4e2ae748ae2790" ]
[ "auto_bots/AutoTS.py" ]
[ "import warnings\nimport datetime as dt\nfrom functools import reduce\nfrom typing import Union, List, Tuple\n\nimport pandas as pd\nimport numpy as np\nfrom pmdarima import auto_arima\nfrom statsmodels.tsa.holtwinters import ExponentialSmoothing\n\nfrom tbats import BATS\n\nfrom auto_bots.utils.error_metrics import mase, mse, rmse\nfrom auto_bots.utils import validation as val\nfrom auto_bots.utils.CandidateModel import CandidateModel\n\n\nclass AutoTS:\n \"\"\"\n Automatic modeler that finds the best time-series method to model your data\n :param model_names: Models to consider when fitting. Currently supported models are\n 'auto_arima', 'exponential_smoothing', 'tbats', and 'ensemble'. default is all available models\n :param error_metric: Which error metric to use when ranking models. Currently supported metrics\n are 'mase', 'mse', and 'rmse'. default='mase'\n :param seasonal_period: period of the data's seasonal trend. 3 would mean your data has quarterly\n trends. Supported models can use multiple seasonalities if a list is provided (Non-supported models\n will use the first item in list). None implies no seasonality.\n \"\"\"\n\n def __init__(\n self,\n model_names: Union[Tuple[str], List[str]] = (\n \"auto_arima\",\n \"exponential_smoothing\",\n \"tbats\",\n \"ensemble\",\n ),\n error_metric: str = \"mase\",\n seasonal_period: Union[int, float, List[int], List[float]] = None,\n verbose: int = 0,\n auto_arima_args: dict = None,\n exponential_smoothing_args: dict = None,\n tbats_args: dict = None,\n ):\n self.verbose = verbose\n\n # fix mutable args\n if auto_arima_args is None:\n auto_arima_args = {}\n if exponential_smoothing_args is None:\n exponential_smoothing_args = {}\n if tbats_args is None:\n tbats_args = {}\n\n # input validation\n val.check_models(model_names)\n valid_error_metrics = [\"mase\", \"mse\", \"rmse\"]\n if error_metric.lower() not in valid_error_metrics:\n raise ValueError(f\"Error metric must be one of {valid_error_metrics}\")\n\n self.model_names = [model.lower() for model in model_names]\n self.error_metric = error_metric.lower()\n self.is_seasonal = True if seasonal_period is not None else False\n self.seasonal_period = val.set_seasonal_period(self, seasonal_period)\n self.auto_arima_args = auto_arima_args\n self.exponential_smoothing_args = exponential_smoothing_args\n self.tbats_args = tbats_args\n\n # Set during fitting or by other methods\n self.data = None\n self.series_column_name = None\n self.freq = None\n self.exogenous = None\n self.using_exogenous = False\n self.candidate_models = []\n self.fit_model = None\n self.fit_model_type = None\n self.best_model_error = None\n self.is_fitted = False\n self.prediction_index = None\n\n warnings.filterwarnings(\"ignore\", module=\"statsmodels\")\n\n def fit(\n self,\n data: pd.DataFrame,\n series_column_name: str,\n freq: str = \"infer\",\n exogenous: Union[str, list] = None,\n ) -> None:\n \"\"\"\n Fit model to given training data.\n :param data: pandas dataframe containing series you would like to predict and any exogenous\n variables you'd like to be used. The dataframe's index MUST be a datetime index\n :param series_column_name: name of the column containing the series you would like to predict\n :param freq: frequency of your time series. pandas does a pretty good job inferring,\n but you can also specify via one of the options here\n https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects\n :param exogenous: column name or list of column names you would like to be used as exogenous\n regressors. auto_arima is the only model that supports exogenous regressors. The repressor\n columns should not be a constant or a trend\n \"\"\"\n val.check_datetime_index(data)\n self.data = data\n self.series_column_name = series_column_name\n\n if freq == \"infer\":\n self.freq = pd.infer_freq(data.index.to_series())\n else:\n if freq not in list(pd.tseries.frequencies._offset_to_period_map):\n raise ValueError(\n f\"'{freq}' is not a recognized frequency option. \"\n f\"`freq` must be 'infer' or one of the offsets described at this link: \"\n f\"https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases \"\n )\n\n # if user passes a string value (single column), make sure we can always assume exogenous is a list\n if isinstance(exogenous, str):\n exogenous = [exogenous]\n\n if exogenous is not None:\n self.using_exogenous = True\n self.exogenous = exogenous\n\n if \"auto_arima\" in self.model_names:\n self.candidate_models.append(self._fit_auto_arima())\n if self.verbose >= 1:\n print(\n f\"\\tTrained auto_arima model with error {self.candidate_models[-1].error:.4f}\"\n )\n if \"exponential_smoothing\" in self.model_names:\n self.candidate_models.append(self._fit_exponential_smoothing())\n if self.verbose >= 1:\n print(\n f\"\\tTrained exponential_smoothing model with error {self.candidate_models[-1].error:.4f}\"\n )\n if \"tbats\" in self.model_names:\n self.candidate_models.append(self._fit_tbats())\n if self.verbose >= 1:\n print(\n f\"\\tTrained tbats model with error {self.candidate_models[-1].error:.4f}\"\n )\n if \"ensemble\" in self.model_names:\n if self.candidate_models is None:\n raise ValueError(\"No candidate models to ensemble\")\n self.candidate_models.append(self._fit_ensemble())\n if self.verbose >= 1:\n print(\n f\"\\tTrained ensemble model with error {self.candidate_models[-1].error:.4f}\"\n )\n\n self.candidate_models = sorted(self.candidate_models, key=lambda x: x.error)\n self.best_model_error = self.candidate_models[0].error\n self.fit_model = self.candidate_models[0].fit_model\n self.fit_model_type = self.candidate_models[0].model_type\n self.is_fitted = True\n\n def _fit_auto_arima(self) -> CandidateModel:\n \"\"\"\n Fits an ARIMA model using pmdarima's auto_arima\n :return: Currently returns a list where the first item is the error on the test set, the\n second is the arima model, the third is the name of the model, and the fourth is the\n predictions made on the test set\n \"\"\"\n exog = None\n if self.using_exogenous:\n exog = self.data[self.exogenous]\n\n auto_arima_seasonal_period = self.seasonal_period\n if self.seasonal_period is None:\n auto_arima_seasonal_period = (\n 1 # need to use auto_arima default if there's no seasonality defined\n )\n else:\n # since auto_arima supports only 1 seasonality, select the first one as \"main\" seasonality\n auto_arima_seasonal_period = int(auto_arima_seasonal_period[0])\n\n try:\n model = auto_arima(\n self.data[self.series_column_name],\n error_action=\"ignore\",\n supress_warning=True,\n seasonal=self.is_seasonal,\n m=auto_arima_seasonal_period,\n exogenous=exog,\n **self.auto_arima_args,\n )\n\n # occasionally while determining the necessary level of seasonal differencing, we get a weird\n # numpy dot product error due to array sizes mismatching. If that happens, we try using\n # Canova-Hansen test for seasonal differencing instead\n except ValueError:\n if self.verbose >= 2:\n print(\n \"\\tSeasonal differencing for auto_arima failed. Trying Canova-Hansen method.\"\n )\n if (\n \"seasonal_test\" in self.auto_arima_args.keys()\n and self.auto_arima_args[\"seasonal_test\"] == \"ocsb\"\n ):\n warnings.warn(\n 'Forcing `seasonal_test=\"ch\"` as \"ocsb\" occasionally causes numpy errors',\n UserWarning,\n )\n self.auto_arima_args[\"seasonal_test\"] = \"ch\"\n model = auto_arima(\n self.data[self.series_column_name],\n error_action=\"ignore\",\n supress_warning=True,\n seasonal=self.is_seasonal,\n m=auto_arima_seasonal_period,\n exogenous=exog,\n **self.auto_arima_args,\n )\n\n test_predictions = pd.DataFrame(\n {\n \"actuals\": self.data[self.series_column_name],\n \"aa_test_predictions\": model.predict_in_sample(exogenous=exog),\n }\n )\n\n test_error = self._error_metric(\n test_predictions, \"aa_test_predictions\", \"actuals\"\n )\n\n return CandidateModel(test_error, model, \"auto_arima\", test_predictions)\n\n def _fit_exponential_smoothing(self) -> CandidateModel:\n \"\"\"\n Fits an exponential smoothing model using statsmodels's ExponentialSmoothing model\n :return: Currently returns a list where the first item is the error on the test set, the\n second is the exponential smoothing model, the third is the name of the model, and the\n fourth is the predictions made on the test set\n \"\"\"\n # if user doesn't specify with kwargs, set these defaults\n if \"trend\" not in self.exponential_smoothing_args.keys():\n self.exponential_smoothing_args[\"trend\"] = \"add\"\n if \"seasonal\" not in self.exponential_smoothing_args.keys():\n self.exponential_smoothing_args[\"seasonal\"] = (\n \"add\" if self.seasonal_period is not None else None\n )\n\n es_seasonal_period = self.seasonal_period\n if self.seasonal_period is not None:\n es_seasonal_period = int(\n es_seasonal_period[0]\n ) # es supports only 1 seasonality\n\n model = ExponentialSmoothing(\n self.data[self.series_column_name],\n seasonal_periods=es_seasonal_period,\n **self.exponential_smoothing_args,\n ).fit()\n\n test_predictions = pd.DataFrame(\n {\n \"actuals\": self.data[self.series_column_name],\n \"es_test_predictions\": model.predict(\n self.data.index[0], self.data.index[-1]\n ),\n }\n )\n\n error = self._error_metric(test_predictions, \"es_test_predictions\", \"actuals\")\n\n return CandidateModel(error, model, \"exponential_smoothing\", test_predictions)\n\n def _fit_tbats(self) -> CandidateModel:\n \"\"\"\n Fits a BATS model using tbats's BATS model\n :return: Currently returns a list where the first item is the error on the test set, the\n second is the BATS model, the third is the name of the model, and the\n fourth is the predictions made on the test set\n \"\"\"\n tbats_seasonal_periods = self.seasonal_period\n if self.seasonal_period is not None:\n tbats_seasonal_periods = self.seasonal_period\n\n # if user doesn't specify with kwargs, set these defaults\n if \"n_jobs\" not in self.tbats_args.keys():\n self.tbats_args[\"n_jobs\"] = 1\n if \"use_arma_errors\" not in self.tbats_args.keys():\n self.tbats_args[\"use_arma_errors\"] = False # helps speed up modeling a bit\n\n model = BATS(\n seasonal_periods=tbats_seasonal_periods,\n use_box_cox=False,\n **self.tbats_args,\n )\n fit_model = model.fit(self.data[self.series_column_name])\n\n test_predictions = pd.DataFrame(\n {\n \"actuals\": self.data[self.series_column_name],\n \"tb_test_predictions\": fit_model.y_hat,\n }\n )\n error = self._error_metric(test_predictions, \"tb_test_predictions\", \"actuals\")\n\n return CandidateModel(error, fit_model, \"tbats\", test_predictions)\n\n def _fit_ensemble(self) -> CandidateModel:\n \"\"\"\n Fits a model that is the ensemble of all other models specified during auto_bots's initialization\n :return: Currently returns a list where the first item is the error on the test set, the\n second is the exponential smoothing model, the third is the name of the model, and the\n fourth is the predictions made on the test set\n \"\"\"\n model_predictions = [\n candidate.predictions for candidate in self.candidate_models\n ]\n all_predictions = reduce(\n lambda left, right: pd.merge(\n left,\n right.drop(\"actuals\", axis=\"columns\"),\n left_index=True,\n right_index=True,\n ),\n model_predictions,\n )\n predictions_columns = [\n col for col in all_predictions.columns if str(col).endswith(\"predictions\")\n ]\n all_predictions[\"en_test_predictions\"] = all_predictions[\n predictions_columns\n ].mean(axis=\"columns\")\n\n error = self._error_metric(all_predictions, \"en_test_predictions\", \"actuals\")\n\n return CandidateModel(\n error, None, \"ensemble\", all_predictions[[\"actuals\", \"en_test_predictions\"]]\n )\n\n def _error_metric(\n self, data: pd.DataFrame, predictions_column: str, actuals_column: str\n ) -> float:\n \"\"\"\n Computes error using the error metric specified during initialization\n :param data: pandas dataframe containing predictions and actuals\n :param predictions_column: name of the predictions column\n :param actuals_column: name of the actuals column\n :return: error for given data\n \"\"\"\n if self.error_metric == \"mase\":\n return mase(data, predictions_column, actuals_column)\n if self.error_metric == \"mse\":\n return mse(data, predictions_column, actuals_column)\n if self.error_metric == \"rmse\":\n return rmse(data, predictions_column, actuals_column)\n\n def _predict_auto_arima(\n self,\n start_date: dt.datetime,\n end_date: dt.datetime,\n last_data_date: dt.datetime,\n exogenous: pd.DataFrame = None,\n ) -> pd.Series:\n \"\"\"Uses a fit ARIMA model to predict between the given dates\"\"\"\n # start date and end date are both in-sample\n if end_date <= self.data.index[-1]:\n preds = self.fit_model.predict_in_sample(\n start=self.data.index.get_loc(start_date),\n end=self.data.index.get_loc(end_date),\n exogenous=exogenous,\n )\n\n # start date is in-sample but end date is not\n elif start_date < self.data.index[-1] < end_date:\n num_extra_periods = (\n len(pd.date_range(start=last_data_date, end=end_date, freq=self.freq))\n - 1\n )\n\n in_sample_exog, out_of_sample_exog = None, None\n if self.using_exogenous:\n in_sample_exog = exogenous.iloc[\n exogenous.index.get_loc(start_date) : exogenous.index.get_loc(\n last_data_date\n )\n + 1\n ]\n out_of_sample_exog = exogenous.iloc[\n exogenous.index.get_loc(last_data_date) + 1 :\n ]\n\n # get all in sample predictions and stitch them together with out of sample predictions\n in_sample_preds = self.fit_model.predict_in_sample(\n start=self.data.index.get_loc(start_date), exogenous=in_sample_exog\n )\n out_of_sample_preds = self.fit_model.predict(\n num_extra_periods, exogenous=out_of_sample_exog\n )\n preds = np.concatenate([in_sample_preds, out_of_sample_preds])\n\n # only possible scenario at this point is start date is 1 period past last data date\n else:\n periods_to_predict = len(\n pd.date_range(start=start_date, end=end_date, freq=self.freq)\n )\n preds = self.fit_model.predict(periods_to_predict, exogenous=exogenous)\n\n return pd.Series(\n preds, index=pd.date_range(start_date, end_date, freq=self.freq)\n )\n\n def _predict_exponential_smoothing(\n self, start_date: dt.datetime, end_date: dt.datetime\n ) -> pd.Series:\n \"\"\"Uses a fit exponential smoothing model to predict between the given dates\"\"\"\n return self.fit_model.predict(start=start_date, end=end_date)\n\n def _predict_tbats(\n self,\n start_date: dt.datetime,\n end_date: dt.datetime,\n last_data_date: dt.datetime,\n ) -> pd.Series:\n \"\"\"Uses a fit BATS model to predict between the given dates\"\"\"\n in_sample_preds = pd.Series(\n self.fit_model.y_hat,\n index=pd.date_range(\n start=self.data.index[0], end=self.data.index[-1], freq=self.freq\n ),\n )\n\n # start date and end date are both in-sample\n if end_date <= in_sample_preds.index[-1]:\n preds = in_sample_preds.loc[\n self.prediction_index[0] : self.prediction_index[-1]\n ]\n\n # start date is in-sample but end date is not\n elif start_date < self.data.index[-1] < end_date:\n num_extra_periods = (\n len(pd.date_range(start=last_data_date, end=end_date, freq=self.freq))\n - 1\n )\n # get all in sample predictions and stitch them together with out of sample predictions\n in_sample_portion = in_sample_preds.loc[start_date:]\n out_of_sample_portion = self.fit_model.forecast(num_extra_periods)\n preds = np.concatenate([in_sample_portion, out_of_sample_portion])\n\n # only possible scenario at this point is start date is 1 period past last data date\n else:\n preds = self.fit_model.forecast(len(self.prediction_index))\n\n return pd.Series(\n preds, index=pd.date_range(start=start_date, end=end_date, freq=self.freq)\n )\n\n def _predict_ensemble(\n self,\n start_date: dt.datetime,\n end_date: dt.datetime,\n last_data_date: dt.datetime,\n exogenous: pd.DataFrame,\n ) -> pd.Series:\n \"\"\"Uses all other fit models to predict between the given dates and averages them\"\"\"\n ensemble_model_predictions = []\n\n if \"auto_arima\" in self.model_names:\n # todo: the way this works is kind of janky right now. probably want to move away from setting\n # and resetting the fit_model attribute for each candidate model\n for candidate in self.candidate_models:\n if candidate.model_type == \"auto_arima\":\n self.fit_model = candidate.fit_model\n preds = self._predict_auto_arima(\n start_date, end_date, last_data_date, exogenous\n )\n preds = preds.rename(\"auto_arima_predictions\")\n ensemble_model_predictions.append(preds)\n\n if \"exponential_smoothing\" in self.model_names:\n for candidate in self.candidate_models:\n if candidate.model_type == \"exponential_smoothing\":\n self.fit_model = candidate.fit_model\n preds = self._predict_exponential_smoothing(start_date, end_date)\n preds = preds.rename(\"exponential_smoothing_predictions\")\n ensemble_model_predictions.append(preds)\n\n if \"tbats\" in self.model_names:\n for candidate in self.candidate_models:\n if candidate.model_type == \"tbats\":\n self.fit_model = candidate.fit_model\n preds = self._predict_tbats(start_date, end_date, last_data_date)\n preds = preds.rename(\"tbats_predictions\")\n ensemble_model_predictions.append(preds)\n\n all_predictions = reduce(\n lambda left, right: pd.merge(\n left, right, left_index=True, right_index=True\n ),\n ensemble_model_predictions,\n )\n all_predictions[\"en_test_predictions\"] = all_predictions.mean(axis=\"columns\")\n\n self.fit_model = None\n self.fit_model_type = \"ensemble\"\n\n return pd.Series(\n all_predictions[\"en_test_predictions\"].values,\n index=pd.date_range(start=start_date, end=end_date, freq=self.freq),\n )\n\n def predict(\n self,\n start: Union[dt.datetime, str],\n end: Union[dt.datetime, str],\n exogenous: pd.DataFrame = None,\n ) -> pd.Series:\n \"\"\"\n Generates predictions (forecasts) for dates between start and end (inclusive).\n :param start: date/time to begin forecast (inclusive), must be either within the date range\n given during fit or one interval after the last period given during fit\n :param end: date/time to end forecast (inclusive)\n :param exogenous: A dataframe of the exogenous regressor column(s) provided during fit().\n The dataframe should be of equal length to the number of predictions you would like to receive\n :return: A pandas Series of length equal to the number of intervals between star and\n end, where the interval is equal to the frequency given or inferred during fit.\n The series' will have a datetime index\n \"\"\"\n if not self.is_fitted:\n raise AttributeError(\n \"Model can't make predictions without first calling `.fit()`!\"\n )\n\n val.validate_predict_dates(start, end)\n self._set_prediction_index(start, end)\n pred_start = self.prediction_index[0]\n pred_end = self.prediction_index[-1]\n\n last_period = self.data.index[-1]\n # check that start date is before or right after that last date given during training\n latest_valid_start = pd.date_range(\n self.data.index[-1], periods=2, freq=self.data.index.inferred_freq\n )[-1]\n if pred_start > latest_valid_start:\n raise ValueError(\n f\"`start` must be no more than 1 period past the last date of data received\"\n f\" during fit. `start` = {pred_start} is too far ahead of latest valid start \"\n f\"{latest_valid_start}\"\n )\n\n # check that start date comes after first date in training\n if pred_start < self.data.index[0]:\n raise ValueError(\n f\"`start` must be later than the earliest date received during fit, \"\n f\"{self.data.index[0]}. Received `start` = {pred_start}\"\n )\n\n # check that, if the user fit models with exogenous regressors, future values are provided\n # if we are predicting any out-of-sample periods\n if self.using_exogenous and last_period < pred_end and exogenous is None:\n raise ValueError(\n \"Exogenous regressor(s) must be provided as a dataframe since they were provided during training\"\n )\n\n if self.using_exogenous and not isinstance(exogenous.index, pd.DatetimeIndex):\n raise ValueError(\n \"The index of your `exogenous` must be a series of datetimes\"\n )\n\n # auto_arima requires a dataframe for the exogenous argument. If user provides a series, go\n # ahead and make it a dataframe, just to be nice :)\n if isinstance(exogenous, pd.Series):\n exogenous = pd.DataFrame(exogenous)\n\n # limit exogenous to dates that are specified by start and end date\n if self.using_exogenous:\n exogenous = exogenous[exogenous.index.isin(list(self.prediction_index))]\n\n # check that, if the user fit models with exogenous regressors, exogenous contains all necessary dates\n if (\n self.using_exogenous\n and self.prediction_index.tolist() != exogenous.index.tolist()\n ):\n raise ValueError(\n f\"Exogenous regressor(s) must contain all dates in your prediction interval. The following dates are missing: {[d for d in self.prediction_index.tolist() if d not in exogenous.index.tolist()]}\"\n )\n\n if self.fit_model_type == \"auto_arima\":\n return self._predict_auto_arima(\n pred_start, pred_end, last_period, exogenous\n )\n\n if self.fit_model_type == \"exponential_smoothing\":\n return self._predict_exponential_smoothing(pred_start, pred_end)\n\n if self.fit_model_type == \"tbats\":\n return self._predict_tbats(pred_start, pred_end, last_period)\n\n if self.fit_model_type == \"ensemble\":\n return self._predict_ensemble(pred_start, pred_end, last_period, exogenous)\n\n def _set_prediction_index(\n self, start: Union[dt.datetime, str], end: Union[dt.datetime, str]\n ):\n self.prediction_index = pd.date_range(start, end, freq=self.freq)\n if start != self.prediction_index[0]:\n warnings.warn(\n f\"Given start {start} since is not valid with frequency {self.freq}. \"\n f\"Using {self.prediction_index[0]} instead\",\n UserWarning,\n )\n if end != self.prediction_index[-1]:\n warnings.warn(\n f\"Given end {end} since is not valid with frequency {self.freq}. \"\n f\"Using {self.prediction_index[-1]} instead\",\n UserWarning,\n )\n" ]
[ [ "numpy.concatenate", "pandas.merge", "pandas.DataFrame", "pandas.date_range" ] ]
terrencetec/kontrol
[ "ba6461784e38d01399efeb7a42911259f9254db0" ]
[ "tests/core/test_spectral.py" ]
[ "\"\"\"Tests for kontrol.core.spectral\n\"\"\"\nimport control\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.signal\n\nimport kontrol\n\n\nnp.random.seed(123)\n\n# Time axis and sampling frequency\nfs = 128\nt0 = 0\nt_end = 512\nt = np.arange(t0, t_end, 1/fs)\n\n# The coherent signal\nA = 1\nsigma = -.01\ngamma = -0.1\nomega_0 = 10*2*np.pi\nx = A*np.exp((sigma + 1j*omega_0*np.exp(gamma*t)) * t).real\n\n# The sensor dynamics.\nzeta = 1\nomega_n = 1*2*np.pi\nomega_m = 10\ns = control.tf(\"s\")\nH1 = s**2 / (s**2 + 2*zeta*omega_n*s + omega_n**2)\nH2 = H1\nH3 = omega_m / (s+omega_m)\n\n# Signals sensed by the sensors.\n_, x1 = control.forced_response(sys=H1, T=t, U=x)\n_, x2 = control.forced_response(sys=H2, T=t, U=x)\n_, x3 = control.forced_response(sys=H3, T=t, U=x)\n\n# The noises\nw1 = np.random.normal(loc=0, scale=1, size=len(t))\nw2 = np.random.normal(loc=0, scale=1, size=len(t))\nw3 = np.random.normal(loc=0, scale=1, size=len(t))\na1 = 0.5\na3 = 5\nepsilon_1 = omega_0/100\nepsilon_3 = omega_0/200\nG1 = a1 / (s+epsilon_1)\nG2 = G1\nG3 = a3 / (s+epsilon_3)**2\n_, n1 = control.forced_response(sys=G1, T=t, U=w1)\n_, n2 = control.forced_response(sys=G2, T=t, U=w2)\n_, n3 = control.forced_response(sys=G3, T=t, U=w3)\n\n# The readouts\ny1 = x1 + n1\ny2 = x2 + n2\ny3 = x3 + n3\n\nf, P_x = scipy.signal.welch(x, fs=fs)\nf, P_n1 = scipy.signal.welch(n1, fs=fs)\nf, P_n2 = scipy.signal.welch(n2, fs=fs)\nf, P_n3 = scipy.signal.welch(n3, fs=fs)\nf, P_y1 = scipy.signal.welch(y1, fs=fs)\nf, P_y2 = scipy.signal.welch(y2, fs=fs)\nf, P_y3 = scipy.signal.welch(y3, fs=fs)\n\n\ndef test_two_channel_correlation():\n \"\"\"\n Tests for `kontrol.core.spectral.two_channel_correlation()`.\n \"\"\"\n P_n1_2channel = kontrol.spectral.two_channel_correlation(y1, y2, fs=fs)\n\n # Alternatively, use the PSD and coherence.\n _, coherence_12 = scipy.signal.coherence(y1, y2, fs=fs)\n P_n1_2channel_coh = kontrol.spectral.two_channel_correlation(\n P_y1, P_y2, fs=fs, coherence=coherence_12)\n\n # Alternatively, use the PSD and cross power spectral density.\n _, cpsd_12 = scipy.signal.csd(y1, y2, fs=fs)\n _, cpsd_21 = scipy.signal.csd(y2, y1, fs=fs)\n P_n1_2channel_cpsd = kontrol.spectral.two_channel_correlation(\n P_y1, P_y2, fs=fs, cpsd=cpsd_12)\n\n ts_equal_coh = np.allclose(P_n1_2channel, P_n1_2channel_coh)\n ts_equal_cpsd = np.allclose(P_n1_2channel, P_n1_2channel_cpsd)\n predict_equal_true = np.allclose(\n np.log10(P_n1_2channel), np.log10(P_n1), rtol=0, atol=0.1)\n\n assert np.all([ts_equal_coh, ts_equal_cpsd, predict_equal_true])\n\n\ndef test_three_channel_correlation():\n \"\"\"Tests for `kontrol.core.spectral.three_channel_correlation()`.\n \"\"\"\n P_n1_3channel = kontrol.spectral.three_channel_correlation(\n y1, y2, y3, fs=fs)\n P_n2_3channel = kontrol.spectral.three_channel_correlation(\n y2, y1, y3, fs=fs)\n P_n3_3channel = kontrol.spectral.three_channel_correlation(\n y3, y1, y2, fs=fs)\n\n # Alternatively, use PSD and coherences\n _, coherence_12 = scipy.signal.coherence(y1, y2, fs=fs)\n _, coherence_13 = scipy.signal.coherence(y1, y3, fs=fs)\n _, coherence_21 = scipy.signal.coherence(y2, y1, fs=fs)\n _, coherence_23 = scipy.signal.coherence(y2, y3, fs=fs)\n _, coherence_31 = scipy.signal.coherence(y3, y1, fs=fs)\n _, coherence_32 = scipy.signal.coherence(y3, y2, fs=fs)\n\n n1_kwargs = {\n \"coherence_13\": coherence_13,\n \"coherence_23\": coherence_23,\n \"coherence_21\": coherence_21,\n }\n # Notice the changes.\n n2_kwargs = {\n \"coherence_13\": coherence_23,\n \"coherence_23\": coherence_13,\n \"coherence_21\": coherence_12,\n }\n n3_kwargs = {\n \"coherence_13\": coherence_32,\n \"coherence_23\": coherence_12,\n \"coherence_21\": coherence_13,\n }\n\n P_n1_3channel_coh = kontrol.spectral.three_channel_correlation(\n P_y1, **n1_kwargs)\n P_n2_3channel_coh = kontrol.spectral.three_channel_correlation(\n P_y2, **n2_kwargs)\n P_n3_3channel_coh = kontrol.spectral.three_channel_correlation(\n P_y3, **n3_kwargs)\n\n\n # And Alternatively, use PSD and cross power spectral densities.\n _, cpsd_12 = scipy.signal.csd(y1, y2, fs=fs)\n _, cpsd_13 = scipy.signal.csd(y1, y3, fs=fs)\n _, cpsd_21 = scipy.signal.csd(y2, y1, fs=fs)\n _, cpsd_23 = scipy.signal.csd(y2, y3, fs=fs)\n _, cpsd_31 = scipy.signal.csd(y3, y1, fs=fs)\n _, cpsd_32 = scipy.signal.csd(y3, y2, fs=fs)\n\n n1_kwargs = {\n \"cpsd_13\": cpsd_13,\n \"cpsd_23\": cpsd_23,\n \"cpsd_21\": cpsd_21\n }\n n2_kwargs = {\n \"cpsd_13\": cpsd_23,\n \"cpsd_23\": cpsd_13,\n \"cpsd_21\": cpsd_12,\n }\n n3_kwargs = {\n \"cpsd_13\": cpsd_32,\n \"cpsd_23\": cpsd_12,\n \"cpsd_21\": cpsd_13\n }\n\n P_n1_3channel_cpsd = kontrol.spectral.three_channel_correlation(\n P_y1, **n1_kwargs)\n P_n2_3channel_cpsd = kontrol.spectral.three_channel_correlation(\n P_y2, **n2_kwargs)\n P_n3_3channel_cpsd = kontrol.spectral.three_channel_correlation(\n P_y3, **n3_kwargs)\n\n ts_equal_coh = np.allclose(P_n3_3channel, P_n3_3channel_coh)\n ts_equal_cpsd = np.allclose(P_n3_3channel, P_n3_3channel_coh)\n predict_equal_true = np.allclose(P_n3_3channel, P_n3, rtol=0, atol=0.5)\n\n assert np.all([ts_equal_coh, ts_equal_cpsd, predict_equal_true])\n" ]
[ [ "numpy.allclose", "numpy.random.seed", "numpy.arange", "numpy.all", "numpy.log10", "numpy.exp" ] ]
moj-analytical-services/data_engineering_utils
[ "fb05b48553184f3a9e38d9d20ef14e4009335b4a" ]
[ "dataengineeringutils/pd_metadata_conformance.py" ]
[ "import pkg_resources\nimport json\nimport pandas as pd\nimport numpy as np\n\ndef _remove_paritions_from_table_metadata(table_metadata):\n\n if \"partitions\" in table_metadata:\n table_metadata[\"columns\"] = [c for c in table_metadata[\"columns\"] if c[\"name\"] not in table_metadata[\"partitions\"]]\n table_metadata[\"partitions\"] = []\n\n return table_metadata\n\n\ndef _get_np_datatype_from_metadata(col_name, table_metadata):\n \"\"\"\n Lookup the datatype from the metadata, and our conversion table\n \"\"\"\n\n columns_metadata = table_metadata[\"columns\"]\n with pkg_resources.resource_stream(__name__, \"data/data_type_conversion.csv\") as io:\n type_conversion = pd.read_csv(io)\n type_conversion = type_conversion.set_index(\"metadata\")\n type_conversion_dict = type_conversion.to_dict(orient=\"index\")\n\n col = None\n for c in columns_metadata:\n if c[\"name\"] == col_name:\n col = c\n\n if col:\n agnostic_type = col[\"type\"]\n numpy_type = type_conversion_dict[agnostic_type][\"pandas\"]\n return np.typeDict[numpy_type]\n else:\n return None\n\ndef _pd_dtype_dict_from_metadata(table_metadata, ignore_partitions=False):\n \"\"\"\n Convert the table metadata to the dtype dict that needs to be\n passed to the dtype argument of pd.read_csv\n \"\"\"\n\n with pkg_resources.resource_stream(__name__, \"data/data_type_conversion.csv\") as io:\n type_conversion = pd.read_csv(io)\n\n type_conversion = type_conversion.set_index(\"metadata\")\n\n type_conversion_dict = type_conversion.to_dict(orient=\"index\")\n\n table_metadata_columns = table_metadata[\"columns\"]\n\n if ignore_partitions:\n table_metadata = _remove_paritions_from_table_metadata(table_metadata)\n\n dtype = {}\n parse_dates = []\n\n for c in table_metadata_columns:\n colname = c[\"name\"]\n coltype = c[\"type\"]\n coltype = type_conversion_dict[coltype]['pandas']\n dtype[colname] = np.typeDict[coltype]\n\n return dtype\n\ndef _pd_date_parse_list_from_metadatadata(table_metadata):\n \"\"\"\n Get list of columns to pass to the pandas.to_csv date_parse argument from table metadata\n \"\"\"\n\n table_metadata = table_metadata[\"columns\"]\n\n parse_dates = []\n\n for c in table_metadata:\n colname = c[\"name\"]\n coltype = c[\"type\"]\n\n if coltype in [\"date\", \"datetime\"]:\n parse_dates.append(colname)\n\n return parse_dates\n\ndef pd_read_csv_using_metadata(filepath_or_buffer, table_metadata, ignore_partitions=False, *args, **kwargs):\n \"\"\"\n Use pandas to read a csv imposing the datatypes specified in the table_metadata\n\n Passes through kwargs to pandas.read_csv\n\n If ignore_partitions=True, assume that partitions are not columns in the dataset\n \"\"\"\n if ignore_partitions:\n table_metadata = _remove_paritions_from_table_metadata(table_metadata)\n\n dtype = _pd_dtype_dict_from_metadata(table_metadata, ignore_partitions)\n parse_dates = _pd_date_parse_list_from_metadatadata(table_metadata)\n\n return pd.read_csv(filepath_or_buffer, dtype = dtype, parse_dates = parse_dates, *args, **kwargs)\n\ndef _pd_df_cols_match_metadata_cols(df, table_metadata):\n \"\"\"\n Is the set of columns in the metadata equal to the set of columns in the dataframe?\n\n This check is irrespective of column order, does not check for duplicates.\n \"\"\"\n\n pd_columns = set(df.columns)\n md_columns = set([c[\"name\"] for c in table_metadata[\"columns\"]])\n\n return pd_columns == md_columns\n\ndef _check_pd_df_cols_match_metadata_cols(df, table_metadata):\n\n if not _pd_df_cols_match_metadata_cols(df, table_metadata):\n raise ValueError(\"Your pandas dataframe contains different columns to your metadata\")\n\n\ndef _pd_df_cols_match_metadata_cols_ordered(df, table_metadata):\n \"\"\"\n Are the columns in the metadata exactly the same as those in the dataframe\n i.e. same columns in the same order\n \"\"\"\n\n pd_columns = list(df.columns)\n md_columns = [c[\"name\"] for c in table_metadata[\"columns\"]]\n\n return pd_columns == md_columns\n\ndef _check_pd_df_cols_match_metadata_cols_ordered(df, table_metadata):\n\n if not _pd_df_cols_match_metadata_cols(df, table_metadata):\n raise ValueError(\"Your pandas dataframe contains different columns to your metadata\")\n\n if not _pd_df_cols_match_metadata_cols_ordered(df, table_metadata):\n raise ValueError(\"Your pandas dataframe contains different columns to your metadata\")\n\n\ndef pd_df_datatypes_match_metadata_data_types(df, table_metadata, ignore_partitions=False):\n \"\"\"\n Do the data types in the pandas dataframe match those in table_metadata\n \"\"\"\n\n if ignore_partitions:\n table_metadata = _remove_paritions_from_table_metadata(table_metadata)\n\n expected_dtypes = _pd_dtype_dict_from_metadata(table_metadata)\n date_cols = _pd_date_parse_list_from_metadatadata(table_metadata)\n\n for col in date_cols:\n expected_dtypes[col] = np.typeDict[\"datetime64\"]\n\n actual_numpy_types = dict(df.dtypes)\n\n for dt in actual_numpy_types:\n actual_numpy_types[dt] = actual_numpy_types[dt].type\n\n return actual_numpy_types == expected_dtypes\n\ndef _check_pd_df_datatypes_match_metadata_data_types(df, table_metadata):\n\n if not pd_df_datatypes_match_metadata_data_types(df, table_metadata):\n raise ValueError(\"Your pandas dataframe contains different datatypes to those expected by the metadata\")\n\n\ndef check_pd_df_exactly_conforms_to_metadata(df, table_metadata, ignore_partitions=False):\n\n if ignore_partitions:\n table_metadata = _remove_paritions_from_table_metadata(table_metadata)\n\n if not _pd_df_cols_match_metadata_cols(df, table_metadata):\n raise ValueError(\"Your pandas dataframe contains different columns to your metadata\")\n\n if not _pd_df_cols_match_metadata_cols_ordered(df, table_metadata):\n raise ValueError(\"Your pandas dataframe contains different columns to your metadata\")\n\n if not pd_df_datatypes_match_metadata_data_types(df, table_metadata):\n raise ValueError(\"Your pandas dataframe contains different datatypes to those expected by the metadata\")\n\n\ndef impose_metadata_column_order_on_pd_df(df, table_metadata, create_cols_if_not_exist=False, delete_superflous_colums=True, ignore_partitions=False):\n \"\"\"\n Return a dataframe where the column order conforms to the metadata\n Note: This does not check the types match the metadata\n \"\"\"\n\n if ignore_partitions:\n table_metadata = _remove_paritions_from_table_metadata(table_metadata)\n\n md_cols = [c[\"name\"] for c in table_metadata[\"columns\"]]\n actual_cols = df.columns\n\n md_cols_set = set(md_cols)\n actual_cols_set = set(actual_cols)\n\n if len(md_cols) != len(md_cols_set):\n raise ValueError(\"You have a duplicated column names in your metadata\")\n\n if len(actual_cols) != len(actual_cols_set):\n raise ValueError(\"You have a duplicated column names in your data\")\n\n # Delete superflous columns if option set\n superflous_cols = actual_cols_set - md_cols_set\n\n if len(superflous_cols) > 0 and not delete_superflous_colums:\n raise ValueError(f\"You chose delete_superflous_colums = False, but the following superflous columns were found: {superflous_cols}\")\n else:\n for c in superflous_cols:\n del df[c]\n\n # Create columns if not in data and option is set\n missing_cols = md_cols_set - actual_cols_set\n\n if len(missing_cols) > 0 and not create_cols_if_not_exist:\n raise ValueError(f\"You create_cols_if_not_exist = False, but the following columns are missing from your data {missing_cols}\")\n else:\n for c in missing_cols:\n np_type = _get_np_datatype_from_metadata(c, table_metadata)\n df[c] = pd.Series(dtype=np_type)\n\n return df[md_cols]\n\n\ndef impose_metadata_data_types_on_pd_df(df, table_metadata, errors='raise', ignore_partitions=False):\n \"\"\"\n Impost correct data type on all columns in metadata.\n Doesn't modify columns not in metadata\n\n Allows you to pass arguments through to the astype e.g. to errors = 'ignore'\n https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.astype.html\n \"\"\"\n\n if ignore_partitions:\n table_metadata = _remove_paritions_from_table_metadata(table_metadata)\n\n df_cols_set = set(df.columns)\n\n metadata_date_cols_set = set(_pd_date_parse_list_from_metadatadata(table_metadata))\n metadata_cols_set = set([c[\"name\"] for c in table_metadata[\"columns\"]])\n\n # Cols that may need conversion without date cols\n try_convert_nodate = df_cols_set.intersection(metadata_cols_set) - metadata_date_cols_set\n try_convert_date = df_cols_set.intersection(metadata_date_cols_set)\n\n for col in try_convert_nodate:\n expected_type = _get_np_datatype_from_metadata(col, table_metadata)\n actual_type = df[col].dtype.type\n\n if expected_type != actual_type:\n df[col] = df[col].astype(expected_type, errors=errors)\n\n if expected_type == np.typeDict[\"object\"]:\n df[col] = df[col].astype(str)\n\n for col in try_convert_date:\n expected_type = np.typeDict[\"Datetime64\"]\n actual_type = df[col].dtype.type\n\n if expected_type != actual_type:\n df[col] = pd.to_datetime(df[col], errors=errors)\n\n return df\n\n\ndef impose_exact_conformance_on_pd_df(df, table_metadata, ignore_partitions=False):\n\n if ignore_partitions:\n table_metadata = _remove_paritions_from_table_metadata(table_metadata)\n\n df = impose_metadata_column_order_on_pd_df(df, table_metadata, delete_superflous_colums=True)\n df = impose_metadata_data_types_on_pd_df(df, table_metadata)\n return df\n\n\n\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "pandas.Series" ] ]
droidiyann/tensorflow_tensorflow
[ "ed10660cf38306921faaa67ddbc3f369441bcb6d" ]
[ "tensorflow/lite/testing/op_tests/resize_nearest_neighbor.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test configs for resize_nearest_neighbor.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.lite.testing.zip_test_utils import create_tensor_data\nfrom tensorflow.lite.testing.zip_test_utils import make_zip_of_tests\nfrom tensorflow.lite.testing.zip_test_utils import register_make_test_function\n\n\n@register_make_test_function()\ndef make_resize_nearest_neighbor_tests(options):\n \"\"\"Make a set of tests to do resize_nearest_neighbor.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32, tf.int32],\n \"input_shape\": [[1, 3, 4, 3], [1, 10, 2, 1]],\n \"size\": [[1, 1], [4, 3], [2, 2], [5, 6]],\n \"align_corners\": [False],\n \"fully_quantize\": [False],\n }, {\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 3, 4, 3], [1, 10, 2, 1]],\n \"size\": [[1, 1], [4, 3], [2, 2], [5, 6]],\n \"align_corners\": [False],\n \"fully_quantize\": [True],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.compat.v1.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.image.resize_nearest_neighbor(\n input_tensor,\n size=parameters[\"size\"],\n align_corners=parameters[\"align_corners\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(\n parameters[\"dtype\"],\n parameters[\"input_shape\"],\n min_value=-1,\n max_value=1)\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(options, test_parameters, build_graph, build_inputs)\n" ]
[ [ "tensorflow.lite.testing.zip_test_utils.make_zip_of_tests", "tensorflow.lite.testing.zip_test_utils.create_tensor_data", "tensorflow.image.resize_nearest_neighbor", "tensorflow.compat.v1.placeholder", "tensorflow.lite.testing.zip_test_utils.register_make_test_function" ] ]
marcelonyc/demos
[ "00451b64816a2ab32d2b4b5307f1dd3c80c2d4e6" ]
[ "image_classification/utils.py" ]
[ "import os\nimport zipfile\nimport json\nfrom tempfile import mktemp\nimport pandas as pd\n\n\ndef open_archive(context, \n target_dir='content',\n archive_url=''):\n \"\"\"Open a file/object archive into a target directory\"\"\"\n \n os.makedirs(target_dir, exist_ok=True)\n context.logger.info('Verified directories')\n \n context.logger.info('Extracting zip')\n zip_ref = zipfile.ZipFile(archive_url, 'r')\n zip_ref.extractall(target_dir)\n zip_ref.close()\n \n context.logger.info(f'extracted archive to {target_dir}')\n context.log_artifact('content', target_path=target_dir)\n\n \nfrom mlrun.artifacts import TableArtifact\n\ndef categories_map_builder(context,\n source_dir,\n df_filename='file_categories_df.csv',\n map_filename='categories_map.json'):\n \"\"\"Read labeled images from a directory and create category map + df\n \n filename format: <category>.NN.jpg\"\"\"\n \n filenames = [file for file in os.listdir(source_dir) if file.endswith('.jpg')]\n categories = []\n \n for filename in filenames:\n category = filename.split('.')[0]\n categories.append(category)\n\n df = pd.DataFrame({\n 'filename': filenames,\n 'category': categories\n })\n df['category'] = df['category'].astype('str')\n \n categories = df.category.unique()\n categories = {i: category for i, category in enumerate(categories)}\n with open(os.path.join(context.out_path, map_filename), 'w') as f:\n f.write(json.dumps(categories))\n \n context.logger.info(categories)\n context.log_artifact('categories_map', src_path=map_filename)\n context.log_artifact(TableArtifact('file_categories', df=df, src_path=df_filename))\n\n" ]
[ [ "pandas.DataFrame" ] ]
AndreiRoibu/basic_numpy
[ "4bcaf2a5b628937f481de9d5b7a7c9061c1eb816" ]
[ "simple_exercises/MNIST_flip.py" ]
[ "'''MNIST flip\n\nAuthor: Andrei-Claudiu Roibu, 2019\n\nThis code has been created to compare the speed difference between a numpy dot product and a slow dot product. This code was written in support of my learning.\n\nThe original code comes from these two sources: \n\n # https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python\n # https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python\n\nDescription:\n\nThis code loads the MNIST data set, found at this link: https://www.kaggle.com/c/digit-recognizer/data \n\nThis code contains a function that flips an image 90 degrees clockwise, using both for loops and numpy functions, comparing their performance.\n\n'''\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom MNIST_means import data_loader\n\ndef rotate90(image):\n \"\"\" Rotate the image by 90 degrees\n\n Args:\n image (np.array): Array containing the image information\n \"\"\"\n\n return np.rot90(image, 3)\n\ndef rotate_caller(data_path):\n \"\"\" Rotation caller\n\n This function loads the MNIST data and rotates the images.\n\n Args:\n data_path (str): Path to where the data file is\n \"\"\"\n\n labels, images = data_loader(data_path)\n number_of_images = images.shape[0]\n\n for index in range(number_of_images):\n image = images[index].reshape(28,28)\n image = rotate90(image)\n\n plt.imshow(image, cmap='gray')\n plt.title(\"Label: \"+ str(labels[index]))\n plt.show()\n\n ans = input(\"Continue [y/n]: \")\n if ans and ans[0].lower() == 'n':\n break\n\nif __name__ == \"__main__\":\n data_path = '../../large_files/train.csv'\n rotate_caller(data_path)" ]
[ [ "numpy.rot90", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show" ] ]
stevegolton/toppra
[ "846e2a7f5b87e0e1884b244b07d5fd661edcd9bd" ]
[ "tests/tests/solverwrapper/test_basic_can_linear.py" ]
[ "\"\"\"A test suite for solverwrappers that implement solve methods for\ncanonical linear constraints. Wrapppers considered include:\n'cvxpy', 'qpOASES', \"ecos\", 'hotqpOASES', 'seidel'.\n\n\"\"\"\nimport pytest\nimport numpy as np\nimport numpy.testing as npt\nimport toppra\nimport toppra.constraint as constraint\nimport cvxpy\n\nfrom ..testing_flags import FOUND_CXPY, FOUND_MOSEK, FOUND_OPENRAVEPY\n\ntoppra.setup_logging(level=\"INFO\")\n\n\nclass RandomSecondOrderLinearConstraint(constraint.linear_constraint.LinearConstraint):\n \"\"\"A random Second-Order non-identical constraint.\n\n This contraint is defined solely for testing purposes. It accepts\n a degree of freedom, then generates the coefficient randomly.\n\n \"\"\"\n\n def __init__(self, dof, discretization_scheme=constraint.DiscretizationType.Collocation):\n super(RandomSecondOrderLinearConstraint, self).__init__()\n self.dof = dof\n self.set_discretization_type(discretization_scheme)\n self.identical = False\n self._format_string = \" Random Second-Order constraint (dof={:d}) \\n\".format(\n self.dof)\n\n def compute_constraint_params(self, path, gridpoints):\n N = gridpoints.shape[0] - 1\n a = np.random.randn(N + 1, self.dof)\n b = np.random.randn(N + 1, self.dof)\n c = np.random.randn(N + 1, self.dof)\n F = np.random.randn(N + 1, self.dof, self.dof)\n g = np.random.rand(N + 1, self.dof)\n for i in range(N + 1):\n g[i] += F[i].dot(c[i])\n\n if self.discretization_type == constraint.DiscretizationType.Collocation:\n return a, b, c, F, g, None, None\n elif self.discretization_type == constraint.DiscretizationType.Interpolation:\n return constraint.canlinear_colloc_to_interpolate(\n a, b, c, F, g, None, None, gridpoints, identical=False)\n else:\n raise NotImplementedError(\"Other form of discretization not supported!\")\n\n\[email protected](scope='class', params=['vel_accel'])\ndef basic_init_fixture(request):\n \"\"\" A fixture for testing basic capability of the solver wrapper.\n\n This test case has only two constraints, one velocity constraint\n and one acceleration constraint.\n \"\"\"\n dof = 6\n np.random.seed(1) # Use the same randomly generated way pts\n way_pts = np.random.randn(4, dof) * 0.6\n N = 200\n path = toppra.SplineInterpolator(np.linspace(0, 1, 4), way_pts)\n ss = np.linspace(0, 1, N + 1)\n # Velocity Constraint\n vlim_ = np.random.rand(dof) * 10 + 10\n vlim = np.vstack((-vlim_, vlim_)).T\n pc_vel = constraint.JointVelocityConstraint(vlim)\n # Acceleration Constraints\n alim_ = np.random.rand(dof) * 10 + 100\n alim = np.vstack((-alim_, alim_)).T\n pc_acc = constraint.JointAccelerationConstraint(alim)\n # random Second Order Constraint, only use for testing\n pc_rand = RandomSecondOrderLinearConstraint(dof)\n pcs = [pc_vel, pc_acc, pc_rand]\n yield pcs, path, ss, vlim, alim\n\n print(\"\\n [TearDown] Finish PP Fixture\")\n\n\[email protected](\"solver_name\", ['cvxpy', 'qpOASES', \"ecos\", 'hotqpOASES', 'seidel'])\[email protected](\"i\", [3, 10, 30])\[email protected](\"H\", [np.array([[1.5, 0], [0, 1.0]]), np.zeros((2, 2)), None])\[email protected](\"g\", [np.array([0.2, -1]), np.array([0.5, 1]), np.array([2.0, 1])])\[email protected](\"x_ineq\", [(0.1, 1), (0.2, 0.2), (0.4, 0.3), (np.nan, np.nan)])\[email protected](not FOUND_CXPY, reason=\"This test requires cvxpy to validate results.\")\ndef test_basic_correctness(basic_init_fixture, solver_name, i, H, g, x_ineq):\n \"\"\"Basic test case for solver wrappers.\n\n The input fixture `basic_init_fixture` has two constraints, one\n velocity and one acceleration. Hence, in this test, I directly\n formulate an optimization with cvxpy and compare the result with\n the result obtained from the solver wrapper.\n \"\"\"\n constraints, path, path_discretization, vlim, alim = basic_init_fixture\n if solver_name == \"cvxpy\":\n from toppra.solverwrapper.cvxpy_solverwrapper import cvxpyWrapper\n solver = cvxpyWrapper(constraints, path, path_discretization)\n elif solver_name == 'qpOASES':\n from toppra.solverwrapper.qpoases_solverwrapper import qpOASESSolverWrapper\n solver = qpOASESSolverWrapper(constraints, path, path_discretization)\n elif solver_name == 'hotqpOASES':\n from toppra.solverwrapper.hot_qpoases_solverwrapper import hotqpOASESSolverWrapper\n solver = hotqpOASESSolverWrapper(constraints, path, path_discretization)\n elif solver_name == 'ecos' and H is None:\n from toppra.solverwrapper.ecos_solverwrapper import ecosWrapper\n solver = ecosWrapper(constraints, path, path_discretization)\n elif solver_name == 'seidel' and H is None:\n from toppra.solverwrapper.cy_seidel_solverwrapper import seidelWrapper\n solver = seidelWrapper(constraints, path, path_discretization)\n else:\n return True # Skip all other tests\n\n xmin, xmax = x_ineq\n xnext_min = 0\n xnext_max = 1\n\n # Results from solverwrapper to test\n solver.setup_solver()\n result_ = solver.solve_stagewise_optim(i - 2, H, g, xmin, xmax, xnext_min, xnext_max)\n result_ = solver.solve_stagewise_optim(i - 1, H, g, xmin, xmax, xnext_min, xnext_max)\n solverwrapper_result = solver.solve_stagewise_optim(i, H, g, xmin, xmax, xnext_min, xnext_max)\n solver.close_solver()\n\n # Results from cvxpy, used as the actual, desired values\n ux = cvxpy.Variable(2)\n u = ux[0]\n x = ux[1]\n _, _, _, _, _, _, xbound = solver.params[0] # vel constraint\n a, b, c, F, h, ubound, _ = solver.params[1] # accel constraint\n a2, b2, c2, F2, h2, _, _ = solver.params[2] # random constraint\n Di = path_discretization[i + 1] - path_discretization[i]\n v = a[i] * u + b[i] * x + c[i]\n v2 = a2[i] * u + b2[i] * x + c2[i]\n cvxpy_constraints = [\n x <= xbound[i, 1],\n x >= xbound[i, 0],\n F * v <= h,\n F2[i] * v2 <= h2[i],\n x + u * 2 * Di <= xnext_max,\n x + u * 2 * Di >= xnext_min,\n ]\n if not np.isnan(xmin):\n cvxpy_constraints.append(x <= xmax)\n cvxpy_constraints.append(x >= xmin)\n if H is not None:\n objective = cvxpy.Minimize(0.5 * cvxpy.quad_form(ux, H) + g * ux)\n else:\n objective = cvxpy.Minimize(g * ux)\n problem = cvxpy.Problem(objective, cvxpy_constraints)\n problem.solve(verbose=True) # test with the same solver as cvxpywrapper\n if problem.status == \"optimal\":\n cvxpy_result = np.array(ux.value).flatten()\n solverwrapper_result = np.array(solverwrapper_result).flatten()\n npt.assert_allclose(solverwrapper_result, cvxpy_result, atol=5e-2, rtol=1e-5) # Very bad accuracy? why?\n else:\n assert np.all(np.isnan(solverwrapper_result))\n\n\[email protected](\"solver_name\", ['cvxpy', 'qpOASES', 'ecos', 'hotqpOASES', 'seidel'])\ndef test_infeasible_instance(basic_init_fixture, solver_name):\n \"\"\"If the given parameters are infeasible, the solverwrapper should\n terminate gracefully and return a numpy vector [nan, nan].\n \"\"\"\n constraints, path, path_discretization, vlim, alim = basic_init_fixture\n if solver_name == \"cvxpy\":\n from toppra.solverwrapper.cvxpy_solverwrapper import cvxpyWrapper\n solver = cvxpyWrapper(constraints, path, path_discretization)\n elif solver_name == 'qpOASES':\n from toppra.solverwrapper.qpoases_solverwrapper import qpOASESSolverWrapper\n solver = qpOASESSolverWrapper(constraints, path, path_discretization)\n elif solver_name == 'hotqpOASES':\n from toppra.solverwrapper.hot_qpoases_solverwrapper import hotqpOASESSolverWrapper\n solver = hotqpOASESSolverWrapper(constraints, path, path_discretization)\n elif solver_name == 'ecos':\n from toppra.solverwrapper.ecos_solverwrapper import ecosWrapper\n solver = ecosWrapper(constraints, path, path_discretization)\n elif solver_name == 'seidel':\n from toppra.solverwrapper.cy_seidel_solverwrapper import seidelWrapper\n solver = seidelWrapper(constraints, path, path_discretization)\n\n g = np.r_[0, 1].astype(float)\n\n solver.setup_solver()\n result = solver.solve_stagewise_optim(0, None, g, 1.1, 1.0, np.nan, np.nan)\n assert np.all(np.isnan(result))\n\n result = solver.solve_stagewise_optim(0, None, g, 1.1, 1.0, 0, -0.5)\n assert np.all(np.isnan(result))\n\n result = solver.solve_stagewise_optim(0, None, g, np.nan, np.nan, 0, -0.5)\n assert np.all(np.isnan(result))\n solver.close_solver()\n" ]
[ [ "numpy.random.seed", "numpy.linspace", "numpy.isnan", "numpy.random.randn", "numpy.random.rand", "numpy.testing.assert_allclose", "numpy.array", "numpy.zeros", "numpy.vstack" ] ]
Joiner12/TimeVisual
[ "c83c5e88adade5d2a4d75f414e051ab09d4bca8a" ]
[ "python/DrawMap.py" ]
[ "# -*- coding:utf-8 -*-\n\"\"\"\n\n1.机场数据\nhttps://ourairports.com/data/\n\n\"\"\"\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Geo\nfrom pyecharts.globals import ChartType, SymbolType\nimport pandas as pd\nfrom datetime import datetime\n\nextraPosition = {\n \"多哈\": (25.261101, 51.565102),\n \"德黑兰\": (35.7, 51.41666),\n \"缅甸仰光\": (16.77, 96.15),\n \"芝加哥\": (41.85, 87.683-180),\n \"加德满都\": (27.7, 85.3166667),\n \"马德拉斯\": (13.22, 81.33),\n \"马德里\": (40.43, 3.7),\n \"邦达\": (31.13, 97.18),\n \"稻城亚丁\": (29.323056, 100.053333),\n \"新加坡樟宜\": (1.36604863171, 104.003205457),\n \"金边\": (11.3623, 104.9154),\n \"曼谷素万那普\": (13.083, 100.483),\n \"九寨黄龙\": (32.85, 103.68),\n \"孟买\": (18.93, 72.85)\n}\n\n\ndef DrawMap(FlightArrivalFile=\"..//data//FlightArrival.xlsx\",\n FlightDepartureFile=\"..//data//FlightDeparture.xlsx\",**kw):\n if 'dataDay' in kw:\n dataDay = kw['dataDay']\n else:\n dataDay = 'test Day'\n # load data from excle\n ArrialInfo = pd.read_excel(FlightArrivalFile)\n DepartureInfo = pd.read_excel(FlightDepartureFile)\n FromCd = DepartureInfo['目的地'].to_list()\n ToCd = ArrialInfo['始发地'].to_list()\n FromCdD = dict()\n ToCdD = dict()\n for k in FromCd:\n if k in FromCdD.keys():\n FromCdD[k] += 1\n else:\n FromCdD[k] = 1\n\n for j in ToCd:\n if j in ToCdD.keys():\n ToCdD[j] += 1\n else:\n ToCdD[j] = 1\n geoData1 = list()\n geoData2 = list()\n geoData3 = list()\n testGeo = Geo()\n for k1, k2 in zip(FromCdD.keys(), FromCdD.values()):\n # lat[3.86 53.55],lon[73.66 135.05]——中国\n try:\n a = testGeo.get_coordinate(k1)\n if a[1] >= 3.86 and a[1] <= 53.55:\n if a[0] >= 73.66 and a[0] <= 135.05:\n geoData1.append((k1, k2))\n geoData2.append((\"成都双流\", k1))\n except:\n pass\n\n for k1, k2 in zip(ToCdD.keys(), ToCdD.values()):\n try:\n if True:\n b = testGeo.get_coordinate(str(k1))\n if b[1] >= 3.86 and b[1] <= 53.55:\n if b[0] >= 73.66 and b[0] <= 135.05:\n geoData3.append((k1, \"成都双流\"))\n else:\n geoData3.append((k1, \"成都双流\"))\n except:\n pass\n\n geoAd = geoData2+geoData3\n\n c = Geo(init_opts=opts.InitOpts(page_title=\"Map-CDC\",theme=\"light\", bg_color=\"transparent\"))\n # 添加其他位置\n for j in extraPosition.keys():\n c.add_coordinate(j, extraPosition[j][1], extraPosition[j][0])\n\n if False:\n c = (\n Geo(init_opts=opts.InitOpts(page_title=\"Map-CDC\", bg_color=\"#2B3427\"))\n .add_schema(maptype=\"china-cities\")\n .add(\"geo\", geoData1, color=\"blue\", symbol_size=5)\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(title_opts=opts.TitleOpts(title=\"CDC\"))\n .render(\"..//html//geoTest.html\")\n )\n else:\n c.add_schema(maptype=\"china-cities\")\n c.add(\"Destination\", geoData1, type_=ChartType.EFFECT_SCATTER)\n c.add(\"Arrrial/Departure\", geoAd, type_=ChartType.LINES,\n effect_opts=opts.EffectOpts(symbol=SymbolType.ARROW,\n symbol_size=3,\n color=\"#475EEA\"),\n linestyle_opts=opts.LineStyleOpts(curve=0.3, opacity=0.1, color=\"#5A98D4\"))\n c.set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n c.set_global_opts(title_opts=opts.TitleOpts(title=\"Shuangliu Internatinal Airport-\"+dataDay,\n subtitle=\"Update:\"\n +datetime.now().strftime('%Y-%m-%d')),\n visualmap_opts=opts.VisualMapOpts(is_piecewise=True, max_=50))\n c.render(\"..//html//geoTest.html\")\n print(\"draw map run finished...\\n\")\n return c\n\n\nif __name__ == \"__main__\":\n DrawMap(FlightArrivalFile=\"..//data//FlightArrival-2021-08-13.xlsx\",\n FlightDepartureFile=\"..//data//FlightDeparture-2021-08-13.xlsx\")\n # DrawMap()\n" ]
[ [ "pandas.read_excel" ] ]
shivam1212-shivam/OPEN_CV
[ "c2c1765b4dea1a97ae4e6f12eaa49ccf80e08cd3" ]
[ "spaces.py" ]
[ "import cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#color spaces --> rgb spaces,grayscal\nimg = cv.imread('photos/dog.jpg')\ncv.imshow('Dog',img)\n\n# plt.imshow(img)\n# plt.show()\n# point to note --> there is conversion of colour spaces in matplotlib\n\n# BGR to grayscale -->\n\ngray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\ncv.imshow('Grayscale',gray)\n\n#BGR to HSV -->\n\nhsv =cv.cvtColor(img,cv.COLOR_BGR2HSV_FULL)\ncv.imshow('HSV',hsv)\n\n#BGR to L*A*B\nlab =cv.cvtColor(img,cv.COLOR_BGR2LAB)\ncv.imshow('Lab',lab)\n\n#BGR to RGB\nrgb = cv.cvtColor(img,cv.COLOR_BGR2RGB)\ncv.imshow('RGB',rgb)\n\nplt.imshow(rgb)\nplt.show()\n\n#cann't convert hsv to BGR\n# HSV to BGR\nhsv_bgr =cv.cvtColor(img,cv.COLOR_HSV2BGR)\ncv.imshow('HSV-> BGR',hsv_bgr)\n\ncv.waitKey(0)" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.show" ] ]
pfnet/pynif3d
[ "da3680cce7e8fc4c194f13a1528cddbad9a18ab0" ]
[ "pynif3d/models/idr/sample_network.py" ]
[ "import torch\n\nfrom pynif3d.common.verification import check_true\nfrom pynif3d.log.log_funcs import func_logger\n\n\nclass IDRSampleNetwork(torch.nn.Module):\n \"\"\"\n Computes the differentiable intersection between the viewing ray and the surface.\n Please check equation 3 from the paper and section 3 from the supplementary material\n for more details.\n \"\"\"\n\n @func_logger\n def forward(\n self,\n rays_d,\n rays_o,\n z_vals,\n z_pred,\n mask_surface,\n z_at_surface,\n grad_at_surface,\n ):\n \"\"\"\n Args:\n rays_d (torch.Tensor): Tensor containing the directions of the rays. Its\n shape is ``(n_rays, 3)``.\n rays_o (torch.Tensor): Tensor containing the origins of the rays. Its shape\n is ``(n_rays, 3)``.\n z_vals (torch.Tensor): Tensor containing the distances to the surface. Its\n shape is ``(n_rays,)``.\n z_pred (torch.Tensor): Tensor containing the distances to the surface. Its\n shape is ``(n_rays,)``.\n mask_surface (torch.Tensor): Tensor containing the surface mask. Its shape\n is ``(n_rays, 3)``.\n z_at_surface (torch.Tensor): Tensor containing the SDF values computed at\n the surface points. Its shape is ``(n_points_intersect,)``.\n grad_at_surface (torch.Tensor): Tensor containing the gradient values at the\n intersection points. Its shape is ``(n_points_intersect, 3)``.\n\n Returns:\n torch.Tensor: Tensor containing the differentiable intersection points. Its\n shape is ``(n_points_intersect, 3)``.\n \"\"\"\n\n # The original implementation expects rays_d to be detached. Check whether\n # rays_d.requires_grad is False or not.\n check_true(not rays_d.requires_grad, \"not rays_d.requires_grad\")\n\n rays_o_surface = rays_o[mask_surface]\n rays_d_surface = rays_d[mask_surface]\n z_vals_surface = z_vals[mask_surface]\n z_pred_surface = z_pred[mask_surface]\n\n dot_prod = torch.bmm(grad_at_surface[..., None, :], rays_d_surface[..., None])\n dot_prod = dot_prod.squeeze()\n zs_theta = z_vals_surface - (z_pred_surface - z_at_surface) / dot_prod\n\n points = rays_o_surface + zs_theta[..., None] * rays_d_surface\n return points\n" ]
[ [ "torch.bmm" ] ]
damien-petit/perception
[ "a8d6588f3e123797ccf5a777926b62498b8b882f" ]
[ "tests/kinect2_sensor_bridge.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nInterface to the Ensenso N* Sensor\nAuthor: Jeff Mahler\n\"\"\"\nimport IPython\nimport logging\nimport numpy as np\nimport os\nimport struct\nimport sys\nimport time\nimport signal\n\ntry:\n from cv_bridge import CvBridge, CvBridgeError\n import rospy\n import sensor_msgs.msg\n import sensor_msgs.point_cloud2 as pc2\nexcept ImportError:\n logging.warning(\"Failed to import ROS in Kinect2_sensor.py. Kinect will not be able to be used in bridged mode\")\n \nfrom perception import CameraIntrinsics, CameraSensor, ColorImage, DepthImage, Image, RgbdSensorFactory, Kinect2BridgedQuality\n \ndef main(args):\n # from visualization import Visualizer2D as vis2d\n # from visualization import Visualizer3D as vis3d\n import matplotlib.pyplot as vis2d\n\n # set logging\n logging.getLogger().setLevel(logging.DEBUG)\n rospy.init_node('kinect_reader', anonymous=True)\n\n num_frames = 5\n sensor_cfg = {\"quality\": Kinect2BridgedQuality.HD, \"frame\":'kinect2_rgb_optical_frame'}\n sensor_type = \"bridged_kinect2\"\n sensor = RgbdSensorFactory.sensor(sensor_type, sensor_cfg)\n sensor.start()\n def handler(signum, frame):\n rospy.loginfo('caught CTRL+C, exiting...') \n if sensor is not None:\n sensor.stop() \n exit(0)\n signal.signal(signal.SIGINT, handler)\n\n total_time = 0\n for i in range(num_frames): \n if i > 0:\n start_time = time.time()\n\n _, depth_im, _ = sensor.frames()\n\n if i > 0:\n total_time += time.time() - start_time\n logging.info('Frame %d' %(i))\n logging.info('Avg FPS: %.5f' %(float(i) / total_time))\n \n depth_im = sensor.median_depth_img(num_img=5)\n color_im, depth_im, _ = sensor.frames()\n\n sensor.stop()\n\n vis2d.figure()\n vis2d.subplot('211')\n vis2d.imshow(depth_im.data)\n vis2d.title('Kinect - depth Raw')\n \n vis2d.subplot('212')\n vis2d.imshow(color_im.data)\n vis2d.title(\"kinect color\")\n vis2d.show()\n \nif __name__ == '__main__':\n main(sys.argv)\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
skn123/BASS
[ "a2ac050a09d550d0ad121eb97d21f7e8aff40a84" ]
[ "pytorch_version/Conn_Functions.py" ]
[ "from BASS import *\n\n\nimport numpy as np\nimport cv2\n\ndef Create_LookupTable():\n return np.load('./lookup.npy')\n\ndef binary(num, length=8):\n return int(format(num, '#0{}b'.format(length + 2)))\n\ndef Create_Matrix(padded_matrix,mod):\n if(mod==1):\n pixels = padded_matrix.index_select(0, Global.idx_pixels_1).view(9, -1)\n elif (mod == 2):\n pixels = padded_matrix.index_select(0, Global.idx_pixels_2).view(9, -1)\n elif (mod == 3):\n pixels = padded_matrix.index_select(0, Global.idx_pixels_3).view(9, -1)\n elif (mod == 0):\n pixels = padded_matrix.index_select(0, Global.idx_pixels_4).view(9, -1)\n matrix=torch.add(-pixels.unsqueeze(0),pixels.unsqueeze(1))\n matrix=torch.add((matrix==0),(pixels==(-1)))\n return matrix>0\n\ndef Create_Matrix_Sub(padded_matrix_split,mod):\n pixels_sub=padded_matrix_split.index_select(0,Global.idx_pixels[mod]).view(9,-1)\n matrix_sub=torch.add(-pixels_sub.unsqueeze(0),pixels_sub.unsqueeze(1))\n matrix=((matrix_sub==0))\n return matrix>0\n\ndef Create_number(matrix):\n temp=torch.index_select(matrix,1,Global.shift_index)\n return (torch.sum(temp.byte()<<Global.shift,dim=1))\n\ndef Change_pixel(prev_r_ik,r_ik,index2,r_ik_t5,mod,c_vals,r_ik_tk,range_conn):\n\n padded_matrix = Global.Padding(prev_r_ik)\n padded_matrix = padded_matrix.view(-1)\n number = Create_number(Create_Matrix(padded_matrix, mod))\n number = torch.index_select(number, 0, Global.ne4_idx)\n number2 = torch.index_select(Global.LOOKUP_TABLE, 0, number.view(-1))\n ind = torch.all(number2.view(5, -1), dim=0).float() # ind that we can change\n index2.zero_()\n if(mod==1):\n index2.scatter_(0, Global.idx_1, ind) # 0 -same as before , 1 - Can change (All N pixels)\n if (mod == 2):\n index2.scatter_(0, Global.idx_2, ind) # 0 -same as before , 1 - Can change (All N pixels)\n if (mod == 3):\n index2.scatter_(0, Global.idx_3, ind) # 0 -same as before , 1 - Can change (All N pixels)\n if (mod == 0):\n index2.scatter_(0, Global.idx_4, ind) # 0 -same as before , 1 - Can change (All N pixels)\n # index2 = index2.scatter_(0, Global.idx[:, mod], ind) # 0 -same as before , 1 - Can change (All N pixels)\n max_neigh=torch.argmax(r_ik, dim=1)\n valsNew = torch.add(max_neigh,range_conn)\n valsK = torch.where(index2 == 1, c_vals[valsNew], prev_r_ik.view(-1))\n\n\n return valsK\n\ndef Split(prev_r_ik,split_prev_r_ik,c1,c1_idx,idx_rand,clusters_LR,it_split):\n idxL=idx_rand.long()\n padded_matrixR = Global.Padding0(split_prev_r_ik).reshape(-1)\n padded_matrixL = Global.Padding0(split_prev_r_ik).reshape(-1)\n\n distances=Global.Distances_padded\n distances.zero_()\n centers=torch.index_select(c1,0,idxL)[:,0:2]\n new_centerL= torch.add(centers, -2).floor_().int()\n if(it_split%2==0):\n new_centerL[:, 0]=new_centerL[:,0]+3\n new_centerL[:, 1] = new_centerL[:, 1] - 2\n else:\n new_centerL[:, 1]=new_centerL[:,1]+4\n new_centerL[:, 0] = new_centerL[:, 0] - 2\n idxR = idx_rand.long()\n centersR = torch.index_select(c1, 0, idxR)[:, 0:2]\n new_centerR = torch.add(centersR, +2).ceil_().int()\n\n if (it_split % 2 == 0):\n new_centerR[:, 0] = new_centerR[:, 0] - 3\n new_centerR[:, 1] = new_centerR[:, 1] + 2\n else:\n new_centerR[:, 1] = new_centerR[:, 1] - 4\n new_centerR[:, 0] = new_centerR[:, 0] + 2\n\n new_center_idxR = new_centerR[:, 0] * (Global.WIDTH + 2) + new_centerR[:, 1] + 1 # idx in padded\n new_center_idxL = new_centerL[:,0]*(Global.WIDTH+2)+new_centerL[:,1]+1 #idx in padded\n condL=((new_centerL[:,0]<Global.HEIGHT)&(new_centerL[:,0]>=0)&(new_centerL[:,1]<Global.WIDTH)&(new_centerL[:,1]>=0))\n condR=((new_centerR[:,0]<Global.HEIGHT)&(new_centerR[:,0]>=0)&(new_centerR[:,1]<Global.WIDTH)&(new_centerR[:,1]>=0))\n cond=(condL&condR)\n\n new_center_idxL=torch.masked_select(new_center_idxL,cond)\n new_center_idxR=torch.masked_select(new_center_idxR,cond)\n idxL=torch.masked_select(idxL,cond)\n idxR=torch.masked_select(idxR,cond)\n\n new_center_idxL =torch.masked_select(new_center_idxL,((padded_matrixL[new_center_idxL.long()]==idxL)&(new_center_idxL>0)))\n new_center_idxR =torch.masked_select(new_center_idxR,((padded_matrixR[new_center_idxR.long()]==idxR)&(new_center_idxR>0)))\n\n prev_centersL=padded_matrixL.take(new_center_idxL.long())\n prev_centersR=padded_matrixR.take(new_center_idxR.long())\n\n\n\n\n if(new_center_idxL.size()==0):\n return 0\n\n neigh=Global.neig_padded\n dataL=torch.zeros((Global.HEIGHT+2)*(Global.WIDTH+2),2).to(Global.device).int()\n dataL[:,0]=torch.arange(0,(Global.HEIGHT+2)*(Global.WIDTH+2)).int()\n data2=dataL.clone()\n dataR=dataL.clone()\n\n dataL[new_center_idxL.long(),1] = prev_centersL.int()\n dataR[:, 1].zero_()\n dataR[new_center_idxR.long(), 1] = prev_centersR.int()\n loop=1\n distance_add=Global.ones.clone()\n valid_vL = new_center_idxL\n valid_vR = new_center_idxR\n while (loop):\n distance_add.add_(1)\n exp_clustersL = torch.take(padded_matrixL, valid_vL.long()).unsqueeze(1).repeat(1, 4).reshape(-1)\n idxL = torch.add(neigh.unsqueeze(0), valid_vL.unsqueeze(1))\n padded_matrixL.index_fill_(0, valid_vL.long(), 0)\n valid_indL = torch.masked_select(idxL.reshape(-1), padded_matrixL[idxL.reshape(-1).long()] == exp_clustersL)\n dataL[valid_indL.long(), 1] = padded_matrixL[valid_indL.long()].int()\n valid_indL = torch.masked_select(dataL[:, 0], dataL[:, 1] > 0)\n exp_clustersR = torch.take(padded_matrixR, valid_vR.long()).unsqueeze(1).repeat(1, 4).reshape(-1)\n idxR = torch.add(neigh.unsqueeze(0), valid_vR.unsqueeze(1))\n padded_matrixR.index_fill_(0, valid_vR.long(), 0)\n valid_indR = torch.masked_select(idxR.reshape(-1), padded_matrixR[idxR.reshape(-1).long()] == exp_clustersR)\n dataR[valid_indR.long(), 1] = padded_matrixR[valid_indR.long()].int()\n valid_indR = torch.masked_select(dataR[:, 0], dataR[:, 1] > 0)\n if ((valid_indL.shape[0] < 1)and(valid_indL.shape[0] < 1)):\n loop = 0\n else:\n if(valid_indL.shape[0] >0):\n distances[:, 0].index_add_(0, valid_indL.long(), distance_add[0:valid_indL.shape[0]])\n dataL[:, 1].zero_()\n valid_vL = valid_indL\n if(valid_indR.shape[0]>0):\n distances[:, 1].index_add_(0, valid_indR.long(), distance_add[0:valid_indR.shape[0]])\n dataR[:, 1].zero_()\n valid_vR = valid_indR\n\n distances[new_center_idxL.long(),0]=0\n\n\n idx=idx_rand.long()\n idx_args=torch.arange(0,idx.shape[0]).to(Global.device)\n split_idx=torch.masked_select(dataL[:,0],distances[:,1]>0).long()\n idx_final=torch.zeros(idx.shape[0],2).to(Global.device)\n idx_final[:,0]=idx_args\n idx_final[:,0]=idx\n padded_matrix = Global.Padding0(split_prev_r_ik).reshape(-1)\n distances[new_center_idxR.long(),1]=0\n new_idx=torch.where(distances[split_idx,0]<distances[split_idx,1],Global.zeros[:distances[split_idx,0].shape[0]],Global.ones[:distances[split_idx,0].shape[0]])\n\n new_cluster_idx=torch.masked_select(split_idx,new_idx.bool())\n dataR[:,1].zero_()\n idx=idx.long()\n data2[idx,1]=Global.N_index[0:idx.shape[0]].int()+2+torch.max(split_prev_r_ik.reshape(-1)).int()\n\n dataR[new_cluster_idx.long(),1]= ( Global.Padding0(split_prev_r_ik).reshape(-1).to(Global.device)[new_cluster_idx]).int()\n dataR[new_cluster_idx.long(),1]=(data2[( Global.Padding0(split_prev_r_ik).reshape(-1).to(Global.device)[new_cluster_idx]).long(),1])\n padded_matrix[new_cluster_idx.long()]=(dataR[new_cluster_idx.long(),1].long()).clone()\n clusters_LR[:,1].zero_()\n clusters_LR[idx,1]=data2[idx,1]\n\n prev_r_ik_padded = Global.Padding0(prev_r_ik).reshape(-1)\n\n prev_r_ik_padded[split_idx.long()]=( Global.Padding0(split_prev_r_ik.to(Global.device)).reshape(-1)[split_idx.long()]).clone() #NEW\n\n padded_matrix=padded_matrix.reshape(Global.HEIGHT+2,-1)\n\n prev_r_ik_padded=prev_r_ik_padded.reshape(Global.HEIGHT+2,-1) # NEW\n return padded_matrix[1:Global.HEIGHT+1,1:Global.WIDTH+1].reshape(-1),prev_r_ik_padded[1:Global.HEIGHT+1,1:Global.WIDTH+1].reshape(-1),clusters_LR\n\n\ndef Merge(prev_r_ik,split_prev_r_ik,clusters_LR):\n padding = torch.nn.ConstantPad2d((1, 1, 1, 1), 0).to(Global.device)\n padded_matrix = padding(prev_r_ik).reshape(-1).to(Global.device)\n padded_matrix_split = padding(split_prev_r_ik).reshape(-1).to(Global.device)\n\n left=torch.zeros(Global.K_C+1).int().to(Global.device)\n pair=torch.zeros(Global.K_C+1).int().to(Global.device)\n\n ind_left=torch.masked_select(Global.inside_padded,((padded_matrix[Global.inside_padded]==padded_matrix[Global.inside_padded-1])&(padded_matrix[Global.inside_padded-1]>0)))\n left[padded_matrix[ind_left]]=padded_matrix[ind_left-1].int()\n\n for i in range(0,Global.K_C+1):\n val=left[i]\n if(val>0):\n if((pair[i]==0)and(pair[val]==0)):\n if(val<i):\n pair[i]=val\n pair[val]=val\n else:\n pair[val]=i\n pair[i]=i\n\n pair_temp=torch.arange(0,Global.K_C+1).to(Global.device).int()\n pair_new=torch.where(pair>0,pair,pair_temp)\n\n padded_matrix=pair_new[padded_matrix]\n\n padded_matrix=padded_matrix.reshape(Global.HEIGHT+2,-1)\n padded_matrix_split=padded_matrix_split.reshape(Global.HEIGHT+2,-1)\n return padded_matrix[1:Global.HEIGHT + 1, 1:Global.WIDTH + 1].reshape(-1),padded_matrix_split[1:Global.HEIGHT + 1, 1:Global.WIDTH + 1].reshape(-1)\n" ]
[ [ "numpy.load" ] ]
sainiprathmesh/Bitcoin_Price_Alert_and_Prediction_System
[ "6ca7803185fa1bde570a4a6b67937242de3450a5" ]
[ "prediction/deep_learning_models.py" ]
[ "import time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import LSTM, GRU\nfrom tensorflow.keras.models import Sequential\n\ndf = pd.read_csv('bitcoin_usd.csv')\n\ny = df['high']\ny = np.array(y)\nprint(y)\n\ndf = df.drop(labels=['_id', 'time'], axis=1)\n\nplt.plot(df)\nplt.show()\n\nsns.pairplot(df)\n\ndf.corr()\n\nsns.heatmap(df.corr())\n\ndf = df.drop(labels=['high'], axis=1)\n\nX = np.array(df)\n\nscaler = MinMaxScaler(feature_range=(0, 1))\nX1 = scaler.fit_transform(X.reshape(-1, 3))\n\nprint(X1.shape)\n\nn_features = 1\nX1 = X1.reshape((X1.shape[0], X1.shape[1], n_features))\n\nprint(X1.shape)\n\nscaler = MinMaxScaler(feature_range=(0, 1))\ny1 = scaler.fit_transform(y.reshape(-1, 1))\n\nX_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.2)\n\nprint(X_train.shape), print(y_train.shape)\n\nprint(X_test.shape), print(y_test.shape)\n\n# define model1 GRU\nsg_time = time.time()\nmodel1 = Sequential()\nmodel1.add(GRU(100, activation='linear', return_sequences=True, input_shape=(3, 1)))\nmodel1.add(GRU(50, activation='linear'))\nmodel1.add(Dense(1))\nmodel1.compile(optimizer='adam', loss='mse')\n\n# fit model\nmodel1.fit(X_train, y_train, epochs=100, verbose=1)\neg_time = time.time()\n\n# Lets Do the prediction and check performance metrics\ntrain_predict = model1.predict(X_train)\ntest_predict = model1.predict(X_test)\nprint(\"Train Predict: \", train_predict)\nprint(\"Test Predict: \", test_predict)\nprint(\"Execution time: \", eg_time - sg_time)\n\n# Calculate MSE performance metrics\nfrom sklearn.metrics import mean_squared_error\n\nmse0 = mean_squared_error(y_train, train_predict)\nprint(\"mse_train_gru: \", mse0)\n\n# Calculate MSE performance metrics\nfrom sklearn.metrics import mean_squared_error\n\nmse1 = mean_squared_error(y_test, test_predict)\nprint(\"mse_test_gru: \", mse1)\n\n# Calculate r2 performance metrics\nfrom sklearn.metrics import r2_score\n\nr1 = r2_score(y_test, test_predict)\nprint(\"r2_gru: \", r1)\n\n# define model2 LSTM\nsl_time = time.time()\nmodel2 = Sequential()\nmodel2.add(LSTM(100, activation='linear', return_sequences=True, input_shape=(3, 1)))\nmodel2.add(LSTM(50, activation='linear'))\nmodel2.add(Dense(1))\nmodel2.compile(optimizer='adam', loss='mse')\n# fit model\nmodel2.fit(X_train, y_train, epochs=100, verbose=1)\nel_time = time.time()\n\n# Lets Do the prediction and check performance metrics\ntrain_predict = model2.predict(X_train)\ntest_predict = model2.predict(X_test)\nprint(\"Train Predict: \", train_predict)\nprint(\"Test Predict: \", test_predict)\nprint(\"Execution Time: \", el_time - sl_time)\n\n# Calculate MSE performance metrics\nfrom sklearn.metrics import mean_squared_error\n\nmse_train_lstm = mean_squared_error(y_train, train_predict)\nprint(\"mse_train_lstm: \", mse_train_lstm)\n\n# Calculate MSE performance metrics\nfrom sklearn.metrics import mean_squared_error\n\nmse_test_lstm = mean_squared_error(y_test, test_predict)\nprint(\"mse_test_lstm: \", mse_test_lstm)\n\n# Calculate r2 performance metrics\nfrom sklearn.metrics import r2_score\n\nr2 = r2_score(y_test, test_predict)\nprint(\"r2_lstm: \", r2)\n\n# plot\nplt.plot(scaler.inverse_transform(y_test))\nplt.plot(scaler.inverse_transform(test_predict))\nplt.xlim(200, 300)\nplt.show()\n\ndf_model = pd.DataFrame({'Model_Applied': ['GRU', 'LSTM'], 'MSE': [mse1, mse_test_lstm], 'R2': [r1, r2],\n 'Execution Time': [eg_time - sg_time, el_time - sl_time]})\nprint(df_model)\n" ]
[ [ "pandas.read_csv", "sklearn.metrics.r2_score", "tensorflow.keras.layers.Dense", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.metrics.mean_squared_error", "matplotlib.pyplot.plot", "tensorflow.keras.layers.GRU", "matplotlib.pyplot.xlim", "tensorflow.keras.layers.LSTM", "sklearn.preprocessing.MinMaxScaler", "numpy.array", "tensorflow.keras.models.Sequential", "matplotlib.pyplot.show" ] ]
TomHacker/VTD
[ "3009fd53cec8a86493b5f1960e8879e5a0c7345c" ]
[ "Detection/CTPN_vertical/CP.py" ]
[ "import numpy as np\nimport os,xml.etree.ElementTree as ET\nimport cv2\nimport matplotlib.pyplot as plt\nfrom glob import glob\nfrom concurrent.futures import ThreadPoolExecutor\nimport tensorflow as tf\nfrom PIL import Image,ImageDraw\nfrom math import *\nanchor_scale = 16\n#\nIOU_NEGATIVE = 0.3\nIOU_POSITIVE = 0.7\nIOU_SELECT = 0.7\n\nRPN_POSITIVE_NUM = 150\nRPN_TOTAL_NUM = 300\n\n# bgr can find from here https://github.com/fchollet/deep-learning-models/blob/master/imagenet_utils.py\nIMAGE_MEAN = [123.68, 116.779, 103.939]\n\nDEBUG = True\ndef rotate(img_path,degree=90):\n\n img=cv2.imread(img_path)\n height, width = img.shape[:2]\n\n # 旋转后的尺寸\n heightNew = int(width * fabs(sin(radians(degree))) + height * fabs(cos(radians(degree)))) # 这个公式参考之前内容\n widthNew = int(height * fabs(sin(radians(degree))) + width * fabs(cos(radians(degree))))\n\n matRotation = cv2.getRotationMatrix2D((width / 2, height / 2), degree, 1)\n\n matRotation[0, 2] += (widthNew - width) / 2 # 因为旋转之后,坐标系原点是新图像的左上角,所以需要根据原图做转化\n matRotation[1, 2] += (heightNew - height) / 2\n\n imgRotation = cv2.warpAffine(img, matRotation, (widthNew, heightNew), borderValue=(255, 255, 255))\n\n return Image.fromarray(imgRotation)\n\ndef drawRect(gtboxes,img):\n draw=ImageDraw.Draw(img)\n for i in gtboxes:\n draw.rectangle((i[0],i[1],i[2],i[3]),outline='red')\n img.show()\n\ndef readxml(path):\n gtboxes=[]\n tree=ET.parse(path)\n root=tree.getroot()\n image_name = tree.find('filename').text\n for obj in root.iter('object'):\n try:\n bnd=obj.find('bndbox')\n xmin=int(bnd.find('xmin').text)\n ymin=int(bnd.find('ymin').text)\n xmax=int(bnd.find('xmax').text)\n ymax=int(bnd.find('ymax').text)\n gtboxes.append((xmin,ymin,xmax,ymax))\n except Exception as e:\n raise Exception(e+\" when {} loading xml file, there are errors\".format(path))\n return np.array(gtboxes),image_name\n\n\ndef gen_anchor(featuresize, scale):\n \"\"\"\n gen base anchor from feature map [HXW][10][4]\n reshape [HXW][10][4] to [HXWX10][4]\n 生成的锚框是相对于原图的,即原图中每16像素就有10个锚框\n \"\"\"\n\n heights = [11, 16, 23, 33, 48, 68, 97, 139, 198, 283]\n widths = [16, 16, 16, 16, 16, 16, 16, 16, 16, 16]\n\n # gen k=9 anchor size (h,w)\n heights = np.array(heights).reshape(len(heights), 1)\n widths = np.array(widths).reshape(len(widths), 1)\n\n # 锚框大小为16像素\n base_anchor = np.array([0, 0, 15, 15])\n # center x,y\n xt = (base_anchor[0] + base_anchor[2]) * 0.5\n yt = (base_anchor[1] + base_anchor[3]) * 0.5\n\n # x1 y1 x2 y2\n x1 = xt - widths * 0.5\n y1 = yt - heights * 0.5\n x2 = xt + widths * 0.5\n y2 = yt + heights * 0.5\n\n # 一组十个锚框\n base_anchor = np.hstack((x1, y1, x2, y2))\n\n h, w = featuresize\n shift_x = np.arange(0, w) * scale\n shift_y = np.arange(0, h) * scale\n # apply shift\n anchor = []\n for i in shift_y:\n for j in shift_x:\n anchor.append(base_anchor + [j, i, j, i])\n return np.array(anchor).reshape((-1, 4))\n\n\ndef cal_iou(box1, box1_area, boxes2, boxes2_area):\n \"\"\"\n box1 [x1,y1,x2,y2]\n boxes2 [Msample,x1,y1,x2,y2]\n \"\"\"\n x1 = np.maximum(box1[0], boxes2[:, 0])\n x2 = np.minimum(box1[2], boxes2[:, 2])\n y1 = np.maximum(box1[1], boxes2[:, 1])\n y2 = np.minimum(box1[3], boxes2[:, 3])\n\n intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)\n iou = intersection / (box1_area + boxes2_area[:] - intersection[:])\n return iou\n\n\ndef cal_overlaps(boxes1, boxes2):\n \"\"\"\n boxes1 [Nsample,x1,y1,x2,y2] anchor\n boxes2 [Msample,x1,y1,x2,y2] grouth-box\n\n \"\"\"\n area1 = (boxes1[:, 0] - boxes1[:, 2]) * (boxes1[:, 1] - boxes1[:, 3]) # (Nsample, 1)\n area2 = (boxes2[:, 0] - boxes2[:, 2]) * (boxes2[:, 1] - boxes2[:, 3]) # (Msample, 1)\n\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) # (Nsample, Msample)\n\n # calculate the intersection of boxes1(anchor) and boxes2(GT box)\n for i in range(boxes1.shape[0]):\n overlaps[i][:] = cal_iou(boxes1[i], area1[i], boxes2, area2)\n\n return overlaps\n\n\ndef bbox_transfrom(anchors, gtboxes):\n \"\"\"\n anchors: (Nsample, 4)\n gtboxes: (Nsample, 4)\n compute relative predicted vertical coordinates Vc ,Vh\n with respect to the bounding box location of an anchor\n \"\"\"\n Cy = (gtboxes[:, 1] + gtboxes[:, 3]) * 0.5 # (Nsample, )\n Cya = (anchors[:, 1] + anchors[:, 3]) * 0.5 # (Nsample, )\n h = gtboxes[:, 3] - gtboxes[:, 1] + 1.0 # (Nsample, )\n ha = anchors[:, 3] - anchors[:, 1] + 1.0 # (Nsample, )\n\n Vc = (Cy - Cya) / ha # (Nsample, )\n Vh = np.log(h / ha) # (Nsample, )\n\n ret = np.vstack((Vc, Vh))\n\n return ret.transpose() # (Nsample, 2)\n\n\ndef bbox_transfor_inv(anchor, regr):\n \"\"\"\n anchor: (NSample, 4)\n regr: (NSample, 2)\n 根据锚框和偏移量反向得到GTBox\n \"\"\"\n\n Cya = (anchor[:, 1] + anchor[:, 3]) * 0.5 # 锚框y中心点\n ha = anchor[:, 3] - anchor[:, 1] + 1\n\n Vcx = regr[..., 0] # y中心点偏移\n Vhx = regr[..., 1] # 高度偏移\n\n Cyx = Vcx * ha + Cya # GTBox y中心点\n hx = np.exp(Vhx) * ha # GTBox 高\n xt = (anchor[:, 0] + anchor[:, 2]) * 0.5 # 锚框x中心点\n\n x1 = xt - 16 * 0.5\n y1 = Cyx - hx * 0.5\n x2 = xt + 16 * 0.5\n y2 = Cyx + hx * 0.5\n bbox = np.vstack((x1, y1, x2, y2)).transpose()\n\n return bbox\n\n\ndef clip_box(bbox, im_shape):\n # x1 >= 0\n bbox[:, 0] = np.maximum(np.minimum(bbox[:, 0], im_shape[1] - 1), 0)\n # y1 >= 0\n bbox[:, 1] = np.maximum(np.minimum(bbox[:, 1], im_shape[0] - 1), 0)\n # x2 < im_shape[1]\n bbox[:, 2] = np.maximum(np.minimum(bbox[:, 2], im_shape[1] - 1), 0)\n # y2 < im_shape[0]\n bbox[:, 3] = np.maximum(np.minimum(bbox[:, 3], im_shape[0] - 1), 0)\n\n return bbox\n\n\ndef filter_bbox(bbox, minsize):\n ws = bbox[:, 2] - bbox[:, 0] + 1\n hs = bbox[:, 3] - bbox[:, 1] + 1\n keep = np.where((ws >= minsize) & (hs >= minsize))[0]\n return keep\n\n\ndef cal_rpn(imgsize, featuresize, scale, gtboxes):\n \"\"\"\n gtboxes: (Msample, 4)\n \"\"\"\n imgh, imgw = imgsize\n\n # gen base anchor\n base_anchor = gen_anchor(featuresize, scale) # (Nsample, 4)\n\n # calculate iou\n overlaps = cal_overlaps(base_anchor, gtboxes) # (Nsample, Msample)\n\n # init labels -1 don't care 0 is negative 1 is positive\n labels = np.empty(base_anchor.shape[0])\n labels.fill(-1) # (Nsample,)\n\n # for each GT box corresponds to an anchor which has highest IOU\n gt_argmax_overlaps = overlaps.argmax(axis=0) # (Msample, )\n\n # the anchor with the highest IOU overlap with a GT box\n anchor_argmax_overlaps = overlaps.argmax(axis=1) # (Nsample, )\n anchor_max_overlaps = overlaps[range(overlaps.shape[0]), anchor_argmax_overlaps] # (Nsample, )\n print('anchor max overlaps')\n for i in anchor_max_overlaps:\n if i>IOU_POSITIVE:\n print(i)\n\n # IOU > IOU_POSITIVE\n labels[anchor_max_overlaps > IOU_POSITIVE] = 1\n # IOU <IOU_NEGATIVE\n labels[anchor_max_overlaps < IOU_NEGATIVE] = 0\n # ensure that every GT box has at least one positive RPN region\n labels[gt_argmax_overlaps] = 1\n # print('内部的label',labels)\n # for i in labels:\n # print(i)\n\n # only keep anchors inside the image\n outside_anchor = np.where(\n (base_anchor[:, 0] < 0) |\n (base_anchor[:, 1] < 0) |\n (base_anchor[:, 2] >= imgw) |\n (base_anchor[:, 3] >= imgh)\n )[0]\n labels[outside_anchor] = -1\n\n # 剔除掉多余的正负样例\n # subsample positive labels ,if greater than RPN_POSITIVE_NUM(default 128)\n fg_index = np.where(labels == 1)[0]\n if (len(fg_index) > RPN_POSITIVE_NUM):\n labels[np.random.choice(fg_index, len(fg_index) - RPN_POSITIVE_NUM, replace=False)] = -1\n\n # subsample negative labels\n bg_index = np.where(labels == 0)[0]\n num_bg = RPN_TOTAL_NUM - np.sum(labels == 1)\n if (len(bg_index) > num_bg):\n # print('bgindex:',len(bg_index),'num_bg',num_bg)\n labels[np.random.choice(bg_index, len(bg_index) - num_bg, replace=False)] = -1\n\n # calculate bbox targets\n # debug here\n bbox_targets = bbox_transfrom(base_anchor, gtboxes[anchor_argmax_overlaps, :])\n # bbox_targets=[]\n\n return [labels, bbox_targets], base_anchor\n\n\ndef get_session(gpu_fraction=0.6):\n '''''Assume that you have 6GB of GPU memory and want to allocate ~2GB'''\n\n num_threads = os.environ.get('OMP_NUM_THREADS')\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)\n\n if num_threads:\n return tf.Session(config=tf.ConfigProto(\n gpu_options=gpu_options, intra_op_parallelism_threads=num_threads, allow_soft_placement=True))\n else:\n return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True))\n\n\nclass random_uniform_num():\n \"\"\"\n uniform random\n \"\"\"\n\n def __init__(self, total, start=0):\n self.total = total\n self.range = [i for i in range(total)]\n np.random.shuffle(self.range)\n self.index = start\n\n def get(self, batch_size):\n ret = []\n if self.index + batch_size > self.total:\n piece1 = self.range[self.index:]\n np.random.shuffle(self.range)\n self.index = (self.index + batch_size) - self.total\n piece2 = self.range[0:self.index]\n ret.extend(piece1)\n ret.extend(piece2)\n else:\n ret = self.range[self.index:self.index + batch_size]\n self.index = self.index + batch_size\n return ret\n\n\ndef nms(dets, thresh):\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1] # Sort from high to low\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1]\n return keep\n\n\ndef gen_sample(xmlpath, imgpath, batchsize=1):\n \"\"\"\n 由于图像大小不定,批处理大小只能为1\n \"\"\"\n\n # list xml\n xmlfiles = glob(xmlpath + '/*.xml')\n rd = random_uniform_num(len(xmlfiles))\n xmlfiles = np.array(xmlfiles)\n\n while True:\n shuf = xmlfiles[rd.get(1)]\n gtbox, imgfile = readxml(shuf[0])\n img = cv2.imread(imgpath + \"\\\\\" + imgfile)\n h, w, c = img.shape\n\n # clip image\n if np.random.randint(0, 100) > 50:\n img = img[:, ::-1, :]\n newx1 = w - gtbox[:, 2] - 1\n newx2 = w - gtbox[:, 0] - 1\n gtbox[:, 0] = newx1\n gtbox[:, 2] = newx2\n\n [cls, regr], _ = cal_rpn((h, w), (int(h / 16), int(w / 16)), 16, gtbox)\n # zero-center by mean pixel\n m_img = img - IMAGE_MEAN\n m_img = np.expand_dims(m_img, axis=0)\n\n regr = np.hstack([cls.reshape(cls.shape[0], 1), regr])\n\n #\n cls = np.expand_dims(cls, axis=0)\n cls = np.expand_dims(cls, axis=1)\n # regr = np.expand_dims(regr,axis=1)\n regr = np.expand_dims(regr, axis=0)\n\n yield m_img, {'rpn_class_reshape': cls, 'rpn_regress_reshape': regr}\n\n\ndef rpn_test():\n xmlpath = 'G:\\data\\VOCdevkit\\VOC2007\\Annotations\\img_4375.xml'\n imgpath = 'G:\\data\\VOCdevkit\\VOC2007\\JPEGImages\\img_4375.jpg'\n gtbox, _ = readxml(xmlpath)\n img = cv2.imread(imgpath)\n h, w, c = img.shape\n [cls, regr], base_anchor = cal_rpn((h, w), (int(h / 16), int(w / 16)), 16, gtbox)\n print(cls.shape)\n print(regr.shape)\n\n regr = np.expand_dims(regr, axis=0)\n inv_anchor = bbox_transfor_inv(base_anchor, regr)\n anchors = inv_anchor[cls == 1]\n anchors = anchors.astype(int)\n for i in anchors:\n cv2.rectangle(img, (i[0], i[1]), (i[2], i[3]), (255, 0, 0), 3)\n plt.imshow(img)\n\n\ndef threshold(coords, min_, max_):\n return np.maximum(np.minimum(coords, max_), min_)\n\ndef clip_boxes(boxes, im_shape):\n \"\"\"\n Clip boxes to image boundaries.\n \"\"\"\n boxes[:, 0::2]=threshold(boxes[:, 0::2], 0, im_shape[1]-1)\n boxes[:, 1::2]=threshold(boxes[:, 1::2], 0, im_shape[0]-1)\n return boxes\n\n\nclass Graph:\n def __init__(self, graph):\n self.graph=graph\n\n def sub_graphs_connected(self):\n sub_graphs=[]\n for index in range(self.graph.shape[0]):\n if not self.graph[:, index].any() and self.graph[index, :].any():\n v=index\n sub_graphs.append([v])\n while self.graph[v, :].any():\n v=np.where(self.graph[v, :])[0][0]\n sub_graphs[-1].append(v)\n return sub_graphs\nclass TextLineCfg:\n SCALE=600\n MAX_SCALE=1200\n TEXT_PROPOSALS_WIDTH=16\n MIN_NUM_PROPOSALS = 2\n MIN_RATIO=0.5\n LINE_MIN_SCORE=0.9\n MAX_HORIZONTAL_GAP=60\n TEXT_PROPOSALS_MIN_SCORE=0.7\n TEXT_PROPOSALS_NMS_THRESH=0.3\n MIN_V_OVERLAPS=0.6\n MIN_SIZE_SIM=0.6\n\nclass TextProposalGraphBuilder:\n \"\"\"\n Build Text proposals into a graph.\n \"\"\"\n def get_successions(self, index):\n box=self.text_proposals[index]\n results=[]\n for left in range(int(box[0])+1, min(int(box[0])+TextLineCfg.MAX_HORIZONTAL_GAP+1, self.im_size[1])):\n adj_box_indices=self.boxes_table[left]\n for adj_box_index in adj_box_indices:\n if self.meet_v_iou(adj_box_index, index):\n results.append(adj_box_index)\n if len(results)!=0:\n return results\n return results\n\n def get_precursors(self, index):\n box=self.text_proposals[index]\n results=[]\n for left in range(int(box[0])-1, max(int(box[0]-TextLineCfg.MAX_HORIZONTAL_GAP), 0)-1, -1):\n adj_box_indices=self.boxes_table[left]\n for adj_box_index in adj_box_indices:\n if self.meet_v_iou(adj_box_index, index):\n results.append(adj_box_index)\n if len(results)!=0:\n return results\n return results\n\n def is_succession_node(self, index, succession_index):\n precursors=self.get_precursors(succession_index)\n if self.scores[index]>=np.max(self.scores[precursors]):\n return True\n return False\n\n def meet_v_iou(self, index1, index2):\n def overlaps_v(index1, index2):\n h1=self.heights[index1]\n h2=self.heights[index2]\n y0=max(self.text_proposals[index2][1], self.text_proposals[index1][1])\n y1=min(self.text_proposals[index2][3], self.text_proposals[index1][3])\n return max(0, y1-y0+1)/min(h1, h2)\n\n def size_similarity(index1, index2):\n h1=self.heights[index1]\n h2=self.heights[index2]\n return min(h1, h2)/max(h1, h2)\n\n return overlaps_v(index1, index2)>=TextLineCfg.MIN_V_OVERLAPS and \\\n size_similarity(index1, index2)>=TextLineCfg.MIN_SIZE_SIM\n\n def build_graph(self, text_proposals, scores, im_size):\n self.text_proposals=text_proposals\n self.scores=scores\n self.im_size=im_size\n self.heights=text_proposals[:, 3]-text_proposals[:, 1]+1\n\n boxes_table=[[] for _ in range(self.im_size[1])]\n for index, box in enumerate(text_proposals):\n boxes_table[int(box[0])].append(index)\n self.boxes_table=boxes_table\n\n graph=np.zeros((text_proposals.shape[0], text_proposals.shape[0]), np.bool)\n\n for index, box in enumerate(text_proposals):\n successions=self.get_successions(index)\n if len(successions)==0:\n continue\n succession_index=successions[np.argmax(scores[successions])]\n if self.is_succession_node(index, succession_index):\n # NOTE: a box can have multiple successions(precursors) if multiple successions(precursors)\n # have equal scores.\n graph[index, succession_index]=True\n return Graph(graph)\nclass TextProposalConnector:\n def __init__(self):\n self.graph_builder=TextProposalGraphBuilder()\n\n def group_text_proposals(self, text_proposals, scores, im_size):\n graph=self.graph_builder.build_graph(text_proposals, scores, im_size)\n return graph.sub_graphs_connected()\n\n def fit_y(self, X, Y, x1, x2):\n len(X)!=0\n # if X only include one point, the function will get line y=Y[0]\n if np.sum(X==X[0])==len(X):\n return Y[0], Y[0]\n p=np.poly1d(np.polyfit(X, Y, 1))\n return p(x1), p(x2)\n\n def get_text_lines(self, text_proposals, scores, im_size):\n # tp=text proposal\n tp_groups=self.group_text_proposals(text_proposals, scores, im_size)\n text_lines=np.zeros((len(tp_groups), 5), np.float32)\n\n for index, tp_indices in enumerate(tp_groups):\n text_line_boxes=text_proposals[list(tp_indices)]\n\n x0=np.min(text_line_boxes[:, 0])\n x1=np.max(text_line_boxes[:, 2])\n\n offset=(text_line_boxes[0, 2]-text_line_boxes[0, 0])*0.5\n\n lt_y, rt_y=self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 1], x0+offset, x1-offset)\n lb_y, rb_y=self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 3], x0+offset, x1-offset)\n\n # the score of a text line is the average score of the scores\n # of all text proposals contained in the text line\n score=scores[list(tp_indices)].sum()/float(len(tp_indices))\n\n text_lines[index, 0]=x0\n text_lines[index, 1]=min(lt_y, rt_y)\n text_lines[index, 2]=x1\n text_lines[index, 3]=max(lb_y, rb_y)\n text_lines[index, 4]=score\n\n text_lines=clip_boxes(text_lines, im_size)\n\n text_recs = np.zeros((len(text_lines), 9), np.float)\n index = 0\n for line in text_lines:\n xmin,ymin,xmax,ymax=line[0],line[1],line[2],line[3]\n text_recs[index, 0] = xmin\n text_recs[index, 1] = ymin\n text_recs[index, 2] = xmax\n text_recs[index, 3] = ymin\n text_recs[index, 4] = xmin\n text_recs[index, 5] = ymax\n text_recs[index, 6] = xmax\n text_recs[index, 7] = ymax\n text_recs[index, 8] = line[4]\n index = index + 1\n\n return text_recs\n\n\nclass TextProposalConnectorOriented:\n \"\"\"\n Connect text proposals into text lines\n \"\"\"\n\n def __init__(self):\n self.graph_builder = TextProposalGraphBuilder()\n\n def group_text_proposals(self, text_proposals, scores, im_size):\n graph = self.graph_builder.build_graph(text_proposals, scores, im_size)\n return graph.sub_graphs_connected()\n\n def fit_y(self, X, Y, x1, x2):\n len(X) != 0\n # if X only include one point, the function will get line y=Y[0]\n if np.sum(X == X[0]) == len(X):\n return Y[0], Y[0]\n p = np.poly1d(np.polyfit(X, Y, 1))\n return p(x1), p(x2)\n\n def get_text_lines(self, text_proposals, scores, im_size):\n \"\"\"\n text_proposals:boxes\n\n \"\"\"\n # tp=text proposal\n tp_groups = self.group_text_proposals(text_proposals, scores, im_size) # 首先还是建图,获取到文本行由哪几个小框构成\n\n text_lines = np.zeros((len(tp_groups), 8), np.float32)\n\n for index, tp_indices in enumerate(tp_groups):\n text_line_boxes = text_proposals[list(tp_indices)] # 每个文本行的全部小框\n X = (text_line_boxes[:, 0] + text_line_boxes[:, 2]) / 2 # 求每一个小框的中心x,y坐标\n Y = (text_line_boxes[:, 1] + text_line_boxes[:, 3]) / 2\n\n z1 = np.polyfit(X, Y, 1) # 多项式拟合,根据之前求的中心店拟合一条直线(最小二乘)\n\n x0 = np.min(text_line_boxes[:, 0]) # 文本行x坐标最小值\n x1 = np.max(text_line_boxes[:, 2]) # 文本行x坐标最大值\n\n offset = (text_line_boxes[0, 2] - text_line_boxes[0, 0]) * 0.5 # 小框宽度的一半\n\n # 以全部小框的左上角这个点去拟合一条直线,然后计算一下文本行x坐标的极左极右对应的y坐标\n lt_y, rt_y = self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 1], x0 + offset, x1 - offset)\n # 以全部小框的左下角这个点去拟合一条直线,然后计算一下文本行x坐标的极左极右对应的y坐标\n lb_y, rb_y = self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 3], x0 + offset, x1 - offset)\n\n score = scores[list(tp_indices)].sum() / float(len(tp_indices)) # 求全部小框得分的均值作为文本行的均值\n\n text_lines[index, 0] = x0\n text_lines[index, 1] = min(lt_y, rt_y) # 文本行上端 线段 的y坐标的小值\n text_lines[index, 2] = x1\n text_lines[index, 3] = max(lb_y, rb_y) # 文本行下端 线段 的y坐标的大值\n text_lines[index, 4] = score # 文本行得分\n text_lines[index, 5] = z1[0] # 根据中心点拟合的直线的k,b\n text_lines[index, 6] = z1[1]\n height = np.mean((text_line_boxes[:, 3] - text_line_boxes[:, 1])) # 小框平均高度\n text_lines[index, 7] = height + 2.5\n\n text_recs = np.zeros((len(text_lines), 9), np.float)\n index = 0\n for line in text_lines:\n b1 = line[6] - line[7] / 2 # 根据高度和文本行中心线,求取文本行上下两条线的b值\n b2 = line[6] + line[7] / 2\n x1 = line[0]\n y1 = line[5] * line[0] + b1 # 左上\n x2 = line[2]\n y2 = line[5] * line[2] + b1 # 右上\n x3 = line[0]\n y3 = line[5] * line[0] + b2 # 左下\n x4 = line[2]\n y4 = line[5] * line[2] + b2 # 右下\n disX = x2 - x1\n disY = y2 - y1\n width = np.sqrt(disX * disX + disY * disY) # 文本行宽度\n\n fTmp0 = y3 - y1 # 文本行高度\n fTmp1 = fTmp0 * disY / width\n x = np.fabs(fTmp1 * disX / width) # 做补偿\n y = np.fabs(fTmp1 * disY / width)\n if line[5] < 0:\n x1 -= x\n y1 += y\n x4 += x\n y4 -= y\n else:\n x2 += x\n y2 += y\n x3 -= x\n y3 -= y\n text_recs[index, 0] = x1\n text_recs[index, 1] = y1\n text_recs[index, 2] = x2\n text_recs[index, 3] = y2\n text_recs[index, 4] = x3\n text_recs[index, 5] = y3\n text_recs[index, 6] = x4\n text_recs[index, 7] = y4\n text_recs[index, 8] = line[4]\n index = index + 1\n\n return text_recs\n\nif __name__=='__main__':\n xmlpath = 'D:\\py_projects\\data_new\\data_new\\data\\\\annotation\\\\img_calligraphy_00003_bg.xml'\n imgpath = 'D:\\py_projects\\data_new\\data_new\\data\\\\train_img\\\\img_calligraphy_00003_bg.jpg'\n gtboxes, _ = readxml(xmlpath)\n img = rotate(imgpath,90)\n print(gtboxes)\n ow,oh=img.size\n newbox = []\n for i in gtboxes:\n newbox.append([i[1], ow - i[2], i[3], ow - i[0]])\n gtboxes = np.array(newbox)\n image_shape = (512,256)\n x_scale = image_shape[0] / ow\n y_scale = image_shape[1] / oh\n newbox = []\n for i in range(len(gtboxes)):\n newbox.append(\n [gtboxes[i][0] * x_scale, gtboxes[i][1] * y_scale, gtboxes[i][2] * x_scale,\n gtboxes[i][3] * y_scale]\n )\n img = img.resize(image_shape, Image.ANTIALIAS)\n gtboxes = np.array(newbox)\n print(gtboxes)\n [cls,regr],base_anchor=cal_rpn((256,512),(256//16,512//16),16,gtboxes)\n print(cls)\n regr=np.expand_dims(regr,axis=0)\n anchor=bbox_transfor_inv(base_anchor,regr)\n anchor=anchor[cls==1]\n anchor=anchor.astype(int)\n print(anchor)\n drawRect(anchor,img)\n\n\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.expand_dims", "numpy.minimum", "numpy.polyfit", "numpy.sqrt", "numpy.vstack", "numpy.fabs", "numpy.max", "tensorflow.GPUOptions", "numpy.mean", "numpy.exp", "numpy.where", "numpy.random.randint", "numpy.hstack", "numpy.arange", "tensorflow.ConfigProto", "numpy.argmax", "numpy.zeros", "numpy.log", "numpy.min", "numpy.array", "numpy.sum", "numpy.maximum", "numpy.random.shuffle", "numpy.empty" ] ]
aj-ames/NSAC-NULLPointers
[ "36945b0f9678509213b9b17d93c36f2a3b92285c" ]
[ "firedetectionserver/run.py" ]
[ "import time\nimport numpy as np\nimport cv2\nimport sys\nimport requests\nfrom videocaptureasync import VideoCaptureAsync\nimport traceback\nimport ast\nimport random\nimport sys\n\nclass Theia:\n ''' Class to hold member methods and variables\n for performing Inference. '''\n\n def __init__(self):\n ''' Method called when class object is created. '''\n\n self.path = 'src/classes.txt'\n self.classes = self.read_class_names(self.path)\n self.num_classes = len(self.classes)\n self.colors = self.colorGenerator(self.num_classes)\n self.inference_endpoint = 'http://127.0.0.1:8000/detect_drone'\n self.source_path = sys.argv[1]\n # self.source = VideoCaptureAsync(0 if self.source_path == 'webcam' else self.source_path,\n # width=1920, height=1080)\n # self.source.start()\n self.source = cv2.VideoCapture(self.source_path)\n\n if len(sys.argv) > 2 and sys.argv[2] == '--record':\n ret, frame = self.source.read()\n if ret:\n height, width = frame.shape[0], frame.shape[1]\n self.fourcc = cv2.VideoWriter_fourcc(*'XVID')\n self.out = cv2.VideoWriter('output.avi',self.fourcc, 30.0, (width, height))\n\n cv2.namedWindow('Result', cv2.WINDOW_NORMAL)\n\n def colorGenerator(self, num):\n ''' Method to generate random colors. '''\n\n colors = dict()\n for i in range(0, num):\n temp = list()\n b = random.randint(0, 255)\n g = random.randint(0, 255)\n r = random.randint(0, 255)\n temp.append(b)\n temp.append(g)\n temp.append(r)\n colors.update({i+1:temp})\n\n return colors\n\n def read_class_names(self, path):\n ''' Method to read the class names. '''\n try:\n names = dict()\n with open(path, 'r') as data:\n for ID, name in enumerate(data):\n names[ID + 1] = name.strip('\\n')\n data.close()\n return names\n\n except:\n print('Failed to read class names')\n traceback.print_exc()\n sys.exit(0)\n\n def plotResults(self, frame, result):\n ''' Method to plot results. '''\n\n class_ids = result[0][0]\n confidence = result[0][1]\n boundaries = result[0][2]\n\n for counter in range(len(class_ids)):\n class_id = self.classes[class_ids[counter]]\n if(class_id == 'fire' or class_id == 'person'):\n conf = str(confidence[counter])\n x1 = int(boundaries[counter][0])\n y1 = int(boundaries[counter][1])\n x2 = int(boundaries[counter][2])\n y2 = int(boundaries[counter][3])\n color = self.colors[class_ids[counter]]\n cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)\n cv2.rectangle(frame, (x1, y1), (x1 + (len(class_id) + len(conf)) * 20,\n y1 - 25) , color, -1, cv2.LINE_AA)\n cv2.putText(frame, class_id + ':' + conf, (x1, y1),\n cv2.FONT_HERSHEY_COMPLEX, 0.85, (0, 0, 0), 1, cv2.LINE_AA)\n return frame\n\n def inference(self):\n ''' Method to perform inference. '''\n\n while True:\n ret, frame = self.source.read()\n if ret:\n batch = np.expand_dims(frame, axis=0)\n batch_len, height, width, channels = batch.shape[0], batch.shape[1], batch.shape[2], batch.shape[3]\n\n data = dict()\n data.update({'Batch':batch.tostring()})\n data.update({'BatchLength':batch_len})\n data.update({'ImageHeight':height})\n data.update({'ImageWidth':width})\n data.update({'Channels':channels})\n\n response = requests.post(self.inference_endpoint, data=str(data))\n result = ast.literal_eval(ast.literal_eval(response.content.decode('utf-8'))['Response'])\n frame = self.plotResults(frame, result)\n if len(sys.argv) > 2 and sys.argv[2] == '--record':\n self.out.write(frame)\n cv2.imshow('Result', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n if len(sys.argv) > 2 and sys.argv[2] == '--record':\n self.out.release()\n # self.source.stop()\n self.source.release()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n print('Python: ', sys.version)\n print('OpenCV: ', cv2.__version__)\n print('Numpy: ', np.__version__)\n # print('CPU Core Count: ', cpu_count())\n # print('Available Multiprocessing Modes: ' ,get_all_start_methods())\n # print('Selected Multiprocessing Mode: ' ,get_start_method())\n print('-------------------------------------------------------')\n\n theia = Theia()\n theia.inference()" ]
[ [ "numpy.expand_dims" ] ]
eyecan-dev/HRNet-for-Fashion-Landmark-Estimation.PyTorch
[ "63dcb75836ebb8896307c9e5a62be8a475de8323" ]
[ "lib/dataset/deepfashion2.py" ]
[ "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nfrom collections import OrderedDict\nimport logging\nimport os\n\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\nimport json_tricks as json\nimport numpy as np\n\nfrom dataset.JointsDataset import JointsDataset\nfrom nms.nms import oks_nms\nfrom nms.nms import soft_oks_nms\n\nimport time\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass DeepFashion2Dataset(JointsDataset):\n '''\n \"gt_class_keypoints_dict\":{\n 1: (0, 25), \n 2: (25, 58), \n 3: (58, 89), \n 4: (89, 128), \n 5: (128, 143), \n 6: (143, 158), \n 7: (158, 168), \n 8: (168, 182), \n 9: (182, 190), \n 10: (190, 219), \n 11: (219, 256), \n 12: (256, 275), \n 13: (275, 294)\n },\n '''\n def __init__(self, cfg, root, image_set, is_train, transform=None):\n super().__init__(cfg, root, image_set, is_train, transform)\n self.nms_thre = cfg.TEST.NMS_THRE\n self.image_thre = cfg.TEST.IMAGE_THRE\n self.soft_nms = cfg.TEST.SOFT_NMS\n self.oks_thre = cfg.TEST.OKS_THRE\n self.in_vis_thre = cfg.TEST.IN_VIS_THRE\n self.bbox_file = cfg.TEST.DEEPFASHION2_BBOX_FILE\n # self.bbox_file = cfg.TEST.COCO_BBOX_FILE\n self.use_gt_bbox = cfg.TEST.USE_GT_BBOX\n self.image_width = cfg.MODEL.IMAGE_SIZE[0]\n self.image_height = cfg.MODEL.IMAGE_SIZE[1]\n self.mini_dataset = cfg.DATASET.MINI_DATASET\n self.select_cat = cfg.DATASET.SELECT_CAT\n self.aspect_ratio = self.image_width * 1.0 / self.image_height\n self.pixel_std = 200\n\n self.coco = COCO(self._get_ann_file_keypoint())\n\n # deal with class names\n cats = [cat['name']\n for cat in self.coco.loadCats(self.coco.getCatIds())]\n self.classes = ['__background__'] + cats\n logger.info('=> classes: {}'.format(self.classes))\n self.num_classes = len(self.classes)\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds()))\n self._coco_ind_to_class_ind = dict(\n [\n (self._class_to_coco_ind[cls], self._class_to_ind[cls])\n for cls in self.classes[1:]\n ]\n )\n\n # load image file names\n self.image_set_index = self._load_image_set_index()\n self.num_images = len(self.image_set_index)\n logger.info('=> num_images: {}'.format(self.num_images))\n\n self.num_joints = 294\n self.gt_class_keypoints_dict = {1: (0, 25), 2: (25, 58), 3: (58, 89),\n 4: (89, 128), 5: (128, 143), 6: (143, 158), 7: (158, 168),\n 8: (168, 182), 9: (182, 190), 10: (190, 219),\n 11: (219, 256), 12: (256, 275), 13: (275, 294)}\n\n _flip_pairs = [\n [[2,6],[3,5],[7,25],[8,24],[9,23],[10,22],[11,21],[12,20],[13,19],[14,18],[15,17]],\n [[2,6],[3,5],[7,33],[8,32],[9,31],[10,30],[11,29],[12,28],[13,27],[14,26],[15,25],[16,24],[17,23],[18,22],[19,21]],\n [[2,26],[3,5],[4,6],[7,25],[8,24],[9,23],[10,22],[11,21],[12,20],[13,19],[14,18],[15,17],[16,29],[27,30],[28,31]],\n [[2,6],[3,5],[4,34],[7,33],[8,32],[9,31],[10,30],[11,29],[12,28],[13,27],[14,26],[15,25],[16,24],[17,23],[18,22],[19,21],[20,37],[35,38],[36,39]],\n [[2,6],[3,5],[7,15],[8,14],[9,13],[10,12]],\n [[2,6],[3,5],[7,15],[8,14],[9,13],[10,12]],\n [[1,3],[4,10],[5,9],[6,8]],\n [[1,3],[4,14],[5,13],[6,12],[7,11],[8,10]],\n [[1,3],[4,8],[5,7]],\n [[2,6],[3,5],[7,29],[8,28],[9,27],[10,26],[11,25],[12,24],[13,23],[14,22],[15,21],[16,20],[17,19]],\n [[2,6],[3,5],[7,37],[8,36],[9,35],[10,34],[11,33],[12,32],[13,31],[14,30],[15,29],[16,28],[17,27],[18,26],[19,25],[20,24],[21,23]],\n [[2,6],[3,5],[7,19],[8,18],[9,17],[10,16],[11,15],[12,14]],\n [[2,6],[3,5],[7,19],[8,18],[9,17],[10,16],[11,15],[12,14]]\n ]\n self.flip_pairs = []\n for idx, _cat_pairs in enumerate(_flip_pairs):\n start_idx = self.gt_class_keypoints_dict[idx+1][0]\n cat_pairs = []\n for pair in _cat_pairs:\n x0 = pair[0] + start_idx - 1\n x1 = pair[1] + start_idx - 1\n cat_pairs.append([x0,x1])\n self.flip_pairs.append(cat_pairs)\n \n self.parent_ids = None\n\n self.joints_weight = np.ones((self.num_joints, 1), dtype=np.float32\n ).reshape((self.num_joints, 1))\n\n print('Generating samples...')\n tic = time.time()\n self.cls_stat = np.array([0 for i in range(self.num_classes)])\n self.sample_list_of_cls = []\n self.db = self._get_db()\n print('Done (t={:0.2f}s)'.format(time.time()- tic))\n\n if is_train and cfg.DATASET.SELECT_DATA:\n self.db = self.select_data(self.db)\n\n logger.info('=> load {} samples'.format(len(self.db)))\n\n def _get_ann_file_keypoint(self):\n if 'train' in self.image_set:\n directory = self.image_set\n if self.mini_dataset:\n return os.path.join(self.root, directory, 'train-coco_style-32.json')\n else:\n return os.path.join(self.root, directory, 'train-coco_style.json')\n elif 'validation' in self.image_set:\n directory = self.image_set\n if self.mini_dataset:\n return os.path.join(self.root, directory, 'val-coco_style-64.json')\n else:\n return os.path.join(self.root, directory, 'val-coco_style.json')\n elif 'test' in self.image_set:\n directory = 'json_for_test'\n return os.path.join(self.root, directory, 'keypoints_test_information.json')\n else:\n raise NotImplementedError\n\n def _load_image_set_index(self):\n \"\"\" image id: int \"\"\"\n image_ids = self.coco.getImgIds()\n return image_ids\n\n def _get_db(self):\n if self.is_train or self.use_gt_bbox:\n # use ground truth bbox\n gt_db = self._load_coco_keypoint_annotations()\n else:\n # use bbox from detection\n # gt_db = self._load_coco_person_detection_results()\n gt_db = self._load_deepfashion2_detection_results()\n return gt_db\n\n def _load_coco_keypoint_annotations(self):\n \"\"\" ground truth bbox and keypoints \"\"\"\n gt_db = []\n for index in self.image_set_index:\n gt_db.extend(self._load_coco_keypoint_annotation_kernal(index))\n return gt_db\n\n def _load_coco_keypoint_annotation_kernal(self, index):\n \"\"\"\n coco ann: [u'segmentation', u'area', u'iscrowd', u'image_id', u'bbox', u'category_id', u'id']\n iscrowd:\n crowd instances are handled by marking their overlaps with all categories to -1\n and later excluded in training\n bbox:\n [x1, y1, w, h]\n :param index: coco image id\n :return: db entry\n \"\"\"\n im_ann = self.coco.loadImgs(index)[0]\n width = im_ann['width']\n height = im_ann['height']\n\n annIds = self.coco.getAnnIds(imgIds=index, iscrowd=False)\n objs = self.coco.loadAnns(annIds)\n\n # sanitize bboxes\n valid_objs = []\n for obj in objs:\n x, y, w, h = obj['bbox']\n x1 = np.max((0, x))\n y1 = np.max((0, y))\n x2 = np.min((width - 1, x1 + np.max((0, w - 1))))\n y2 = np.min((height - 1, y1 + np.max((0, h - 1))))\n if obj['area'] > 0 and x2 >= x1 and y2 >= y1:\n obj['clean_bbox'] = [x1, y1, x2-x1, y2-y1]\n valid_objs.append(obj)\n objs = valid_objs\n\n rec = []\n for obj in objs:\n cls = self._coco_ind_to_class_ind[obj['category_id']]\n\n # ignore objs of not chosen class, or without keypoints annotation\n if cls not in self.select_cat or max(obj['keypoints']) == 0:\n continue\n self.cls_stat[cls] += 1\n self.sample_list_of_cls.append(cls)\n\n joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)\n joints_3d_vis = np.zeros((self.num_joints, 3), dtype=np.float)\n for ipt in range(self.num_joints):\n joints_3d[ipt, 0] = obj['keypoints'][ipt * 3 + 0]\n joints_3d[ipt, 1] = obj['keypoints'][ipt * 3 + 1]\n joints_3d[ipt, 2] = 0\n t_vis = obj['keypoints'][ipt * 3 + 2]\n if t_vis > 1:\n t_vis = 1\n joints_3d_vis[ipt, 0] = t_vis\n joints_3d_vis[ipt, 1] = t_vis\n joints_3d_vis[ipt, 2] = 0\n\n center, scale = self._box2cs(obj['clean_bbox'][:4])\n rec.append({\n 'image': self.image_path_from_index(index),\n 'center': center,\n 'scale': scale,\n 'joints_3d': joints_3d,\n 'joints_3d_vis': joints_3d_vis,\n 'filename': '',\n 'imgnum': 0,\n 'category_id':obj['category_id'],\n 'area': obj['area']\n })\n return rec\n\n def _box2cs(self, box):\n x, y, w, h = box[:4]\n return self._xywh2cs(x, y, w, h)\n\n def _xywh2cs(self, x, y, w, h):\n center = np.zeros((2), dtype=np.float32)\n center[0] = x + w * 0.5\n center[1] = y + h * 0.5\n\n if w > self.aspect_ratio * h:\n h = w * 1.0 / self.aspect_ratio\n elif w < self.aspect_ratio * h:\n w = h * self.aspect_ratio\n scale = np.array(\n [w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std],\n dtype=np.float32)\n if center[0] != -1:\n scale = scale * 1.25\n\n return center, scale\n\n def image_path_from_index(self, index):\n file_name = '%06d.jpg' % index\n file_name = os.path.join('image', file_name)\n\n image_path = os.path.join(self.root, self.image_set, file_name)\n\n return image_path\n\n def _load_deepfashion2_detection_results(self):\n all_boxes = None\n bbox_file_type = self.bbox_file.split('.')[-1]\n\n if bbox_file_type == 'json':\n with open(self.bbox_file, 'r') as f:\n all_boxes = json.load(f)\n\n if not all_boxes:\n logger.error('=> Load %s fail!' % self.bbox_file)\n return None\n\n logger.info('=> Total boxes: {}'.format(len(all_boxes)))\n\n kpt_db = []\n num_boxes = 0\n for n_img in range(0, len(all_boxes)):\n det_res = all_boxes[n_img]\n # if det_res['category_id'] != 1:\n # continue\n img_name = self.image_path_from_index(det_res['image_id'])\n box = det_res['bbox']\n score = det_res['score']\n\n if score < self.image_thre:\n continue\n\n num_boxes = num_boxes + 1\n\n center, scale = self._box2cs(box)\n joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)\n joints_3d_vis = np.ones(\n (self.num_joints, 3), dtype=np.float)\n kpt_db.append({\n 'image': img_name,\n 'center': center,\n 'scale': scale,\n 'score': score,\n 'joints_3d': joints_3d,\n 'joints_3d_vis': joints_3d_vis,\n })\n elif bbox_file_type == 'pkl':\n logger.info(\"Loading detection results from %s ...\" % self.bbox_file)\n import pickle\n with open(self.bbox_file, 'rb') as f:\n raw_data = pickle.load(f)\n # all_boxes = self._process_pickle(raw_data)\n all_boxes = self._process_pickle(raw_data)\n if not all_boxes:\n logger.error('=> Load %s fail!' % self.bbox_file)\n return None\n\n logger.info('=> Total boxes: {}'.format(len(all_boxes)))\n\n kpt_db = []\n num_boxes = 0\n for n_img in range(0, len(all_boxes)):\n det_res = all_boxes[n_img]\n # if det_res['category_id'] != 1:\n # continue\n img_name = self.image_path_from_index(det_res['image_id'])\n box = det_res['bbox']\n score = det_res['score']\n category_id = det_res['category_id']\n\n if score < self.image_thre:\n continue\n\n num_boxes = num_boxes + 1\n\n center, scale = self._box2cs(box)\n joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)\n joints_3d_vis = np.ones(\n (self.num_joints, 3), dtype=np.float)\n kpt_db.append({\n 'image': img_name,\n 'center': center,\n 'scale': scale,\n 'score': score,\n 'joints_3d': joints_3d,\n 'joints_3d_vis': joints_3d_vis,\n 'category_id': det_res['category_id']\n })\n\n logger.info('=> Total boxes after fliter low score@{}: {}'.format(\n self.image_thre, num_boxes))\n return kpt_db\n\n def _process_pickle(self, data):\n all_boxes = []\n for n_img in range(len(data)):\n for i in range(13):\n for entry in data[n_img][0][i]:\n # print('img:%07d cat:%02d' % (n_img, i+1))\n # input()\n if entry is not None:\n x1, y1, x2, y2 = entry[0:4]\n w = x2 - x1\n h = y2 - y1\n box = dict()\n box['image_id'] = n_img + 1\n box['bbox'] = [x1, y1, w, h]\n box['score'] = entry[4]\n box['category_id'] = i + 1\n all_boxes.append(box)\n return all_boxes\n\n\n def evaluate(self, cfg, preds, output_dir, all_boxes, img_path,\n *args, **kwargs):\n rank = cfg.RANK\n\n res_folder = os.path.join(output_dir, 'results')\n if not os.path.exists(res_folder):\n try:\n os.makedirs(res_folder)\n except Exception:\n logger.error('Fail to make {}'.format(res_folder))\n\n res_file = os.path.join(\n res_folder, 'keypoints_{}_results_{}.json'.format(\n self.image_set, rank)\n )\n\n # person x (keypoints)\n _kpts = []\n for idx, kpt in enumerate(preds):\n _kpts.append({\n 'keypoints': kpt,\n 'center': all_boxes[idx][0:2],\n 'scale': all_boxes[idx][2:4],\n 'area': all_boxes[idx][4],\n 'score': all_boxes[idx][5],\n 'category_id': all_boxes[idx][6],\n 'image': int(img_path[idx][-10:-4])\n })\n # image x person x (keypoints)\n kpts = defaultdict(list)\n for kpt in _kpts:\n kpts[kpt['image']].append(kpt)\n\n # rescoring and oks nms\n num_joints = self.num_joints\n in_vis_thre = self.in_vis_thre\n oks_thre = self.oks_thre\n oks_nmsed_kpts = []\n for img in kpts.keys():\n img_kpts = kpts[img]\n for n_p in img_kpts:\n box_score = n_p['score']\n kpt_score = 0\n valid_num = 0\n for n_jt in range(0, num_joints):\n t_s = n_p['keypoints'][n_jt][2]\n if t_s > in_vis_thre:\n kpt_score = kpt_score + t_s\n valid_num = valid_num + 1\n if valid_num != 0:\n kpt_score = kpt_score / valid_num\n # rescoring\n n_p['score'] = kpt_score * box_score\n # n_p['score'] = box_score\n\n if self.soft_nms:\n keep = soft_oks_nms(\n [img_kpts[i] for i in range(len(img_kpts))],\n oks_thre\n )\n else:\n keep = oks_nms(\n [img_kpts[i] for i in range(len(img_kpts))],\n oks_thre\n )\n\n if len(keep) == 0:\n oks_nmsed_kpts.append(img_kpts)\n else:\n oks_nmsed_kpts.append([img_kpts[_keep] for _keep in keep])\n\n # self._write_coco_keypoint_results(oks_nmsed_kpts, res_file)\n self._write_coco_keypoint_results_DeepFashion2(oks_nmsed_kpts, res_file)\n if 'test' not in self.image_set:\n info_str = self._do_python_keypoint_eval(\n res_file, res_folder)\n name_value = OrderedDict(info_str)\n return name_value, name_value['AP']\n else:\n return {'Null': 0}, 0\n\n\n def _write_coco_keypoint_results_DeepFashion2(self, keypoints, res_file):\n results = self._coco_keypoint_results_all_category_kernel(keypoints)\n logger.info('=> writing results json to %s' % res_file)\n with open(res_file, 'w') as f:\n json.dump(results, f, sort_keys=True, indent=4)\n try:\n json.load(open(res_file))\n except Exception:\n content = []\n with open(res_file, 'r') as f:\n for line in f:\n content.append(line)\n content[-1] = ']'\n with open(res_file, 'w') as f:\n for c in content:\n f.write(c)\n \n def _coco_keypoint_results_all_category_kernel(self, keypoints):\n # cat_id = data_pack['cat_id']\n\n keypoints = keypoints\n cat_results = []\n\n for img_kpts in keypoints:\n if len(img_kpts) == 0:\n continue\n\n for k in range(len(img_kpts)):\n image_id = img_kpts[k]['image']\n cat_id = img_kpts[k]['category_id']\n score = img_kpts[k]['score']\n\n _key_points = np.array([img_kpts[k]['keypoints']\n for k in range(len(img_kpts))])\n key_points = np.zeros(\n (_key_points.shape[0], self.num_joints * 3), dtype=np.float\n )\n\n for ipt in range(self.num_joints):\n key_points[:, ipt * 3 + 0] = _key_points[:, ipt, 0]\n key_points[:, ipt * 3 + 1] = _key_points[:, ipt, 1]\n key_points[:, ipt * 3 + 2] = _key_points[:, ipt, 2] # keypoints score.\n\n result = [\n {\n 'image_id': image_id,\n 'category_id': cat_id,\n 'keypoints': list(key_points[k]),\n 'score': score,\n # 'score': img_kpts[k]['score'],\n # 'center': list(img_kpts[k]['center']),\n # 'scale': list(img_kpts[k]['scale'])\n }\n ]\n cat_results.extend(result)\n\n return cat_results\n\n def _do_python_keypoint_eval(self, res_file, res_folder):\n coco_dt = self.coco.loadRes(res_file)\n coco_eval = COCOeval(self.coco, coco_dt, 'keypoints')\n coco_eval.params.useSegm = None\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n\n stats_names = ['AP', 'Ap .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)']\n\n info_str = []\n for ind, name in enumerate(stats_names):\n info_str.append((name, coco_eval.stats[ind]))\n\n return info_str\n \n def get_channel_index(self, class_id):\n gt_class_keypoints_dict = {\n 1: (0, 25), 2: (25, 58), 3: (58, 89), 4: (89, 128), 5: (128, 143),\n 6: (143, 158), 7: (158, 168), 8: (168, 182), 9: (182, 190),\n 10: (190, 219), 11: (219, 256), 12: (256, 275), 13: (275, 294)}\n \n if isinstance(class_id, int):\n return list(range(gt_class_keypoints_dict[class_id]))\n elif isinstance(class_id, list):\n return [list(range(gt_class_keypoints_dict[i])) for i in class_id]\n\n def generate_target(self, joints, joints_vis):\n '''\n :param joints: [num_joints, 3]\n :param joints_vis: [num_joints, 3]\n :return: target, target_weight(1: visible, 0: invisible)\n '''\n # target_weight = np.ones((self.num_joints, 1), dtype=np.float32)\n target_weight = np.zeros((self.num_joints, 1), dtype=np.float32)\n target_weight[:, 0] = joints_vis[:, 0]\n feat_stride = self.image_size / self.heatmap_size\n\n if self.target_type == 'gaussian':\n target = np.zeros((self.num_joints,\n self.heatmap_size[1],\n self.heatmap_size[0]),\n dtype=np.float32)\n\n tmp_size = self.sigma * 3\n\n for joint_id in range(self.num_joints):\n mu_x = int(joints[joint_id][0] / feat_stride[0] + 0.5)\n mu_y = int(joints[joint_id][1] / feat_stride[1] + 0.5)\n # Check that any part of the gaussian is in-bounds\n ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]\n br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]\n if ul[0] >= self.heatmap_size[0] or ul[1] >= self.heatmap_size[1] \\\n or br[0] < 0 or br[1] < 0:\n # If not, just return the image as is\n target_weight[joint_id] = 0\n continue\n\n # # Generate gaussian\n size = 2 * tmp_size + 1\n x = np.arange(0, size, 1, np.float32)\n y = x[:, np.newaxis]\n x0 = y0 = size // 2\n # The gaussian is not normalized, we want the center value to equal 1\n g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * self.sigma ** 2))\n\n # Usable gaussian range\n g_x = max(0, -ul[0]), min(br[0], self.heatmap_size[0]) - ul[0]\n g_y = max(0, -ul[1]), min(br[1], self.heatmap_size[1]) - ul[1]\n # Image range\n img_x = max(0, ul[0]), min(br[0], self.heatmap_size[0])\n img_y = max(0, ul[1]), min(br[1], self.heatmap_size[1])\n\n v = target_weight[joint_id]\n if v > 0.5:\n target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \\\n g[g_y[0]:g_y[1], g_x[0]:g_x[1]]\n elif self.target_type == 'coordinate':\n target = joints[:, 0:2]\n target /= feat_stride\n else:\n raise NotImplementedError('Only support gaussian map and coordinate now!')\n\n if self.use_different_joints_weight:\n target_weight = np.multiply(target_weight, self.joints_weight)\n\n return target, target_weight\n" ]
[ [ "numpy.multiply", "numpy.arange", "numpy.ones", "numpy.max", "numpy.exp", "numpy.array", "numpy.zeros" ] ]
shamanDevel/fV-SRN
[ "966926ee678a0db0f1c67661537c4bb7eec0c56f" ]
[ "applications/volnet/eval_world_DensityVsColorGrid_NoImportance.py" ]
[ "\"\"\"\nScript to evaluate the network quality if it is trained for densities or for colors.\nThey are tested with world-space training and the best configuration from eval_network_configs\n\"\"\"\n\nimport numpy as np\nimport sys\nimport os\nimport subprocess\nimport itertools\nimport imageio\nimport json\nimport torch\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker\n\nBEST_ACTIVATION = \"SnakeAlt:1\"\nBEST_NETWORK = (32,4)\nGRID_RESOLUTION = 32\nGRID_CHANNELS = 16\n\nCONFIG_FILES = [\n\"config-files/plumeEnsemble-v0-dvr.json\",\n\"config-files/plumeEnsemble-v1-dvr.json\",\n\"config-files/plumeEnsemble-v2-dvr.json\",\n]\nVOLUME_FILES = \"volumes/ScalarFlow/sim_{ensemble:06d}/volume_{time:06d}.cvol\"\nVOLUME_TIMESTEP = 100\n\ndef main():\n configs = collect_configurations()\n train(configs)\n statistics_file = eval(configs)\n make_plots(statistics_file)\n\ndef collect_configurations():\n return [\n (\"density\", 0),\n (\"rgbo\", 0),\n (\"rgbo\", 1),\n (\"rgbo\", 2),\n ]\n\ndef get_args_and_hdf5_file(output_mode, config_file_index:int):\n \"\"\"\n Assembles the command line arguments for training and the filename for the hdf5-file\n with the results\n :param activation: the activation function name\n :param network: the network combination (channels, layers)\n :return: args, filename\n \"\"\"\n output_name = \"run_%s_%d\"%(output_mode, config_file_index)\n parameters = [\n sys.executable, \"volnet/train_volnet.py\",\n CONFIG_FILES[config_file_index],\n \"--volume_filenames\",\n VOLUME_FILES,\n \"--time_keyframes\", f\"{VOLUME_TIMESTEP}:{VOLUME_TIMESTEP+1}:1\",\n \"--time_train\", f\"{VOLUME_TIMESTEP}:{VOLUME_TIMESTEP+1}:1\",\n \"--time_val\", f\"{VOLUME_TIMESTEP}:{VOLUME_TIMESTEP+1}:1\",\n \"--train:mode\", \"world\",\n \"--train:samples\", \"256**3\",\n \"--train:batchsize\", \"64*64*128\",\n \"--val:copy_and_split\",\n \"--outputmode\", \"%s:direct\"%output_mode,\n \"--lossmode\", \"%s\"%output_mode,\n \"-l1\", \"1\",\n \"--lr_step\", \"100\",\n \"-i\", \"200\",\n '--fouriercount', str((BEST_NETWORK[0]-4)//2), '--fourierstd', '1.0',\n \"--activation\", BEST_ACTIVATION,\n \"--layers\", ':'.join([str(BEST_NETWORK[0])]*(BEST_NETWORK[1]-1)),\n \"--volumetric_features_resolution\", str(GRID_RESOLUTION),\n \"--volumetric_features_channels\", str(GRID_CHANNELS),\n \"--logdir\", 'volnet/results/eval_world_DensityVsColorGrid_NoImportance/log',\n \"--modeldir\", 'volnet/results/eval_world_DensityVsColorGrid_NoImportance/model',\n \"--hdf5dir\", 'volnet/results/eval_world_DensityVsColorGrid_NoImportance/hdf5',\n '--name', output_name,\n '--save_frequency', '50'\n ]\n hdf5_file = 'volnet/results/eval_world_DensityVsColorGrid_NoImportance/hdf5/' + output_name + \".hdf5\"\n return parameters, hdf5_file\n\ndef train(configs):\n print(\"Configurations:\", len(configs))\n for output_mode, config_file_index in configs:\n args, filename = get_args_and_hdf5_file(output_mode, config_file_index)\n if os.path.exists(filename):\n print(\"Skipping test\", filename)\n else:\n print(\"\\n=====================================\\nRun\", filename)\n subprocess.run(args, check=True)\n print(\"\\n===========================================\\nDONE!\")\n\ndef eval(configs):\n print(\"Evaluate\")\n statistics_file = 'volnet/results/eval_world_DensityVsColorGrid_NoImportance/stats.json'\n if os.path.exists(statistics_file):\n print(\"Statistics file already exists!\")\n return statistics_file\n\n import common.utils as utils\n import pyrenderer\n from volnet.inference import LoadedModel\n from losses.lossbuilder import LossBuilder\n\n num_cameras = 64\n width = 512\n height = 512\n stepsize = 1 / 512\n timer = pyrenderer.GPUTimer()\n\n output_stats = []\n device = torch.device('cuda')\n ssim_loss = LossBuilder(device).ssim_loss(4)\n lpips_loss = LossBuilder(device).lpips_loss(4, 0.0, 1.0)\n\n def compute_stats(ln, mode, reference_images, filename_template=None, do_ssim=False, do_lpips=False):\n timingsX = []\n ssimX = []\n lpipsX = []\n for i in range(num_cameras):\n current_image = ln.render_network(\n cameras[i], width, height, mode,\n stepsize, timer=timer, timestep=VOLUME_TIMESTEP)\n if i>0:\n timingsX.append(timer.elapsed_milliseconds())\n if filename_template is not None:\n imageio.imwrite(\n filename_template%i,\n LoadedModel.convert_image(current_image))\n if do_ssim:\n ssimX.append(ssim_loss(current_image, reference_images[i]).item())\n if do_lpips:\n lpipsX.append(lpips_loss(current_image, reference_images[i]).item())\n return \\\n (np.mean(timingsX), np.std(timingsX)), \\\n (np.mean(ssimX), np.std(ssimX)) if do_ssim else (np.NaN, np.NaN), \\\n (np.mean(lpipsX), np.std(lpipsX)) if do_lpips else (np.NaN, np.NaN)\n\n # load networks\n def load_and_save(output_mode, index):\n _, filename = get_args_and_hdf5_file(output_mode, index)\n ln = LoadedModel(filename)\n ln.save_compiled_network(filename.replace('.hdf5', '.volnet'))\n return ln\n ln_density = load_and_save('density', 0)\n ln_colors = []\n for i in range(len(CONFIG_FILES)):\n ln_colors.append(load_and_save('rgbo', i))\n\n for cfg_idx, cfg in enumerate(CONFIG_FILES):\n image_folder = 'volnet/results/eval_world_DensityVsColorGrid_NoImportance/images_%d/'%(cfg_idx)\n os.makedirs(image_folder, exist_ok=True)\n local_stats = {\n 'cfg_index': cfg_idx,\n 'cfg': cfg}\n # render reference\n print(cfg)\n print(\"\\n===================================== Render reference\")\n ln = ln_colors[cfg_idx]\n cameras = ln.get_rotation_cameras(num_cameras)\n reference_images = [None] * num_cameras\n for i in range(num_cameras):\n reference_images[i] = ln.render_reference(cameras[i], width, height,\n timestep=VOLUME_TIMESTEP)\n imageio.imwrite(\n os.path.join(image_folder, 'reference%03d.png' % i),\n LoadedModel.convert_image(reference_images[i]))\n\n # render color network\n time, ssim, lpips = compute_stats(\n ln, LoadedModel.EvaluationMode.TENSORCORES_MIXED, reference_images,\n os.path.join(image_folder, 'color%03d.png'),\n True, True)\n local_stats['color-time'] = time\n local_stats['color-ssim'] = ssim\n local_stats['color-lpips'] = lpips\n\n # render density network\n ln.set_network_pytorch(*ln_density.get_network_pytorch())\n ln.set_network_tensorcores(ln_density.get_network_tensorcores())\n time, ssim, lpips = compute_stats(\n ln, LoadedModel.EvaluationMode.TENSORCORES_MIXED, reference_images,\n os.path.join(image_folder, 'density%03d.png'),\n True, True)\n local_stats['density-time'] = time\n local_stats['density-ssim'] = ssim\n local_stats['density-lpips'] = lpips\n\n output_stats.append(local_stats)\n\n # save statistics\n print(\"\\n===================================== Done, save statistics\")\n with open(statistics_file, \"w\") as f:\n json.dump(output_stats, f)\n return statistics_file\n\ndef make_plots(statistics_file):\n print(\"\\n===================================== Make Plots\")\n with open(statistics_file, \"r\") as f:\n stats = json.load(f)\n output_folder = os.path.split(statistics_file)[0]\n FILETYPE = \"eps\"\n\n numCols = len(stats)\n numClasses = 2\n fig, axs = plt.subplots(1, 2, sharex=True, figsize=(6.4, 3.0))\n x_offset = np.linspace(-0.2, +0.2, numClasses, True)\n width = x_offset[1] - x_offset[0]\n handles = []\n for ax, stat, label in zip(axs, [\"ssim\", \"lpips\"], ['SSIM $\\\\uparrow$', 'LPIPS $\\\\downarrow$']):\n for i, cls in enumerate([\"density\", \"color\"]):\n X = []\n Y = []\n err = []\n for j in range(numCols):\n X.append(j+x_offset[i])\n y,e = stats[j][\"%s-%s\"%(cls, stat)]\n Y.append(y)\n err.append(e)\n h = ax.bar(X, Y, width=width, yerr=err)\n ax.bar_label(h, label_type='center', fmt=\"%.3f\", fontsize=7)\n if stat=='ssim':\n handles.append(h)\n ax.set_title(label)\n ax.set_xticks(np.arange(numCols))\n ax.set_xticklabels([\"TF %d\"%(i+1) for i in range(numCols)])\n\n lgd = fig.legend(\n handles, [\"Density\", \"Color\"],\n bbox_to_anchor=(0.65, 0.7), loc='lower center', borderaxespad=0.)\n fig.savefig(os.path.join(output_folder, 'DensityVsColorGrid-NoImportance-SSIM.%s'%FILETYPE),\n bbox_inches='tight', bbox_extra_artists=(lgd,))\n\n print(\"Done\")\n plt.show()\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.linspace", "numpy.arange", "matplotlib.pyplot.subplots", "numpy.std", "numpy.mean", "torch.device", "matplotlib.pyplot.show" ] ]
duducosmos/blockmatching
[ "c37949cce79b58ae658d5814ba3cff906dc4ab4e" ]
[ "src/blockmatching/blockmatching.py" ]
[ "#!/usr/bin/env python3.6\n# -*- Coding: UTF-8 -*-\n\"\"\"\nBlock Matching Algorithm.\n------------------------\nAccording to [cuevs2013]_ in a block matching (BM) approach:\n\n '...image frames in a video sequence are divided into blocks. For each\n block in the current frame, the best matching block is identified inside a\n region of the previous frame, aiming to minimize the sum of absolute\n differences...'\n\nFrom the work of [perez2010]_:\n\n '...pixel-specific motion vectors are determined by calculating the RMSE of\n the difference between two consecutive Kt*grids surrounding the considered\n pixel when the second grid is advected in the direction of a motion vector.\n The selected motion vector corresponds to the lowest RMSE. This process is\n repeated for each image pixel, and each pixel is assigned an individual\n motion vector. Future images are obtained by displacing the current image\n pixels in the direction of their motion vector. Future images are\n subsequently smoothed by averaging each pixel with its 8 surrounding\n neighbors...'\n\nFor example, considering a image, in :math:`t_0 + k dt`, with 9x9 pixels and a\nblock grid with 3x3 pixels. The image bellow it is assumed that the central\npixel C is surrounding by pixels A.\n\n::\n\n * * * * * * * * *\n * * * * * * * * *\n * * * * * * * * *\n * * * A A A * * *\n * * * A C A * * *\n * * * A A A * * *\n * * * * * * * * *\n * * * * * * * * *\n * * * * * * * * *\n\n\nNow, for a image in time :math:`t_0 + (k+1)dt`, the value of block with the pixel C,\nin the image in :math:`t_0 +kdt`, is compared with values of piexls in the 9x9 window\nof the image in :math:`t_0+(k+1)dt`.\n\nThe most probable direction of the moviment of the pixel C, at\n:math:`t_0 + (k+1)dt`, is given by the position of the corresponding block with\nthe lowest square mean error -SME (subtraction of the 3x3 subgrid)\n(e.g. [khawase17]_).\n\nIn the following example, the 3x3 block was in the initial position i=4, j=4.\nThe new initial subblock with lowest is in i=7, j=7.\n\nInitial position of 3x3 block in :math:`t_0 + kdt`:\n\n::\n\n* * * * * * * * *\n* * * * * * * * *\n* * * * * * * * *\n* * * A A A * * *\n* * * A C A * * *\n* * * A A A * * *\n* * * * * * * * *\n* * * * * * * * *\n* * * * * * * * *\n\nThe new position of 3x3 block in :math:`t_0 + (i+1)dt`:\n\n::\n\n* * * * * * * * *\n* * * * * * * * *\n* * * * * * * * *\n* * * * * * * * *\n* * * * * * * * *\n* * * * * * * * *\n* * * * * * A A A\n* * * * * * A C A\n* * * * * * A A A\n\nThe size of search window depend of the expected velocity of the block. For\nslow moviment, a window with size of 3 times can be considered.\n\n:Example:\n\n>>> import cv2\n>>> from blockmatching import *\n>>> cap = cv2.VideoCapture('./videos/car.mp4')\n>>> started = False\n>>> old_frame = None\n>>> while cap.isOpened():\n>>> ret, frame = cap.read()\n>>> if ret == True:\n>>> frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n>>> if started is False:\n>>> old_frame = frame\n>>> started = True\n>>> else:\n>>> XP, YP, XD, YD = block_matching(old_frame, frame,\n>>> width, height)\n>>> old_frame = frame\n>>> else:\n>>> break\n>>>\n>>> cap.release()\n\nLicense\n-------\nDeveloped by: E. S. Pereira.\ne-mail: [email protected]\n\nCopyright [2019] [E. S. Pereira]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\nReferences\n----------\n.. [cuevs2013] CUEVAS, Erik et al. Block matching algorithm for motion\nestimation based on Artificial Bee Colony (ABC).\nApplied Soft Computing, v. 13, n. 6, p. 3047-3059, 2013.\n\n.. [khawase17] KHAWASE, Sonam T. et al. An Overview of Block Matching\nAlgorithms for Motion Vector Estimation. In: Proceedings of the Second\nInternational Conference on Research in Intelligent and Computing in\nEngineering, str. 2017. p. 217-222.\n\n.. [perez2010] Perez, R. et al. Validation of short and medium term\noperational solar radiation forecasts in the US. Solar Energy,\n84. 12. 2161-2172. 2010.\n\"\"\"\n\nfrom numba import jitclass\nfrom numba import njit, prange, jit, float64, int64, uint8\n\nfrom numpy import zeros, sqrt, array\nimport cv2\n\n\nTYPESSME = \"float64[:](float64[:,:], float64[:,:], int64, int64)\"\n\nTYPESME = \"float64[:](float64[:,:], float64[:,:], int64, int64, int64, int64,\"\nTYPESME += \"int64, int64, int64, int64, int64, int64)\"\n\nTYPEBBMATCHING = \"int64[:,:](float64[:,:], float64[:,:], int64, int64, int64,\"\nTYPEBBMATCHING += \"int64, int64, int64 ,int64, int64,)\"\n\nTYPEBMATCHING = \"int64[:,:], int64[:,:], int64[:,:], int64[:,:](float64[:,:],\"\nTYPEBMATCHING += \"float64[:,:], int64, int64)\"\n\n\nclass BlockError(Exception):\n def __init__(self, value):\n value = value\n\n def __str__(self):\n return repr(value)\n\n\nclass ImageSizeError(Exception):\n def __init__(self, value):\n value = value\n\n def __str__(self):\n return repr(value)\n\n\n@njit(TYPESSME, nogil=True)\ndef _ssme(window, block, height, width):\n r'''\n Compare the block in the window.\n input:\n window - 2d array representing the window\n blcok - 2d array representing the block to match\n height - int64 - height of block\n width - int64 - height of block\n '''\n out = zeros(3)\n minval = 1e99\n if block.shape[0] != height or block.shape[1] != width:\n out[0] = height // 2\n out[1] = width // 2\n return out\n\n for ki in prange(height):\n for kj in prange(width):\n tmp = (window[ki: ki + height, kj: kj + width] - block).flatten()\n diff = 0\n for val in list(tmp):\n if val < 0:\n val = 0\n if val > 255:\n val = 255\n diff += val\n diff = diff / (height * width)\n\n if ki == height // 2 and kj == width // 2:\n\n # To Priorize the central region of window.\n # Neighbors can have same value and false moviment is detected.\n # To avoid, a diferent approach for center of window.\n if diff <= minval:\n minval = diff\n out[0] = ki\n out[1] = kj\n out[2] = minval\n\n if(diff < minval):\n minval = diff\n out[0] = ki\n out[1] = kj\n out[2] = minval\n\n return out\n\n\n@njit(TYPESME, nogil=True)\ndef _sme(img1, img0, wblock, hblock, wwind, hwind,\n width, height, i0, i1, j0, j1):\n '''\n Create window and blocks to matching searching.\n input:\n img0 - 2d float64 array - image in time t + idt\n img1 - 2d float64 array - image in time t + (i +1 )dt\n wblock - int64 - Number of blocks in width\n hblock - int64 - Number of blocks in height\n wwind - int64 - Window width\n hwind - int64 - Window height\n width - int64 - block width in pixels\n height - int64 - block height in pixels\n i0 - int64 - initial line of window\n i1 - int64 - final line of window\n j0 - int64 - initial column of window\n j1 - int64 - final column of window\n Return:\n Integer array with four elements:\n inital line, initial column,\n final line, final column.\n '''\n out = zeros(4)\n\n window = img1[i0: i1, j0: j1]\n\n jb0 = j0 + width // 2\n jb1 = j0 + 3 * width // 2\n\n ib0 = i0 + height // 2\n ib1 = i0 + 3 * height // 2\n block = img0[ib0: ib1, jb0: jb1]\n\n tmp = _ssme(window, block, height, width)\n out[0] = ib0\n out[1] = jb0\n out[2] = i0 + tmp[0]\n out[3] = j0 + tmp[1]\n return out\n\n\n@njit(TYPEBBMATCHING, parallel=True, nogil=True)\ndef _block_matching(img0, img1, width, height, wblock, hblock,\n wwind, hwind, wwindt, hwindt):\n \"\"\"\n Divide image in windows to run in parallel mode.\n input:\n img0 - 2d float64 array - image in time t + idt\n img1 - 2d float64 array - image in time t + (i +1 )dt\n width - int64 - block width in pixels\n height - int64 - block height in pixels\n wblock - int64 - Number of blocks in width\n hblock - int64 - Number of blocks in height\n wwind - int64 - Window width\n hwind - int64 - Window height\n wwindt - int64 - Total number of windows in width\n hwindt - int64 - Total number of windows in height\n Return:\n Array with nxm lines and 4 collumns.\n Each collumn:\n 0 - Initial x\n 1 - Initial y\n 2 - Final matching x\n 3 - Final matching y\n \"\"\"\n m = hblock * wblock\n out = zeros((m, 4), dtype=int64)\n for i in prange(hblock - 1):\n for j in prange(wblock - 1):\n i0 = i * (height)\n i1 = (i + 1) * (hwind)\n\n j0 = j * (width)\n j1 = (j + 1) * (wwind)\n\n x = _sme(img1, img0, wblock, hblock, wwind, hwind,\n width, height, i0, i1, j0, j1)\n\n k0 = i * wblock + j\n out[k0, :] = x\n return out\n\n\ndef block_matching(img0, img1, width, height):\n \"\"\"\n Block matching algorithm.\n -------------------------\n Motion estimation from two sequential images.\n\n :Example:\n\n >>> import cv2\n >>> from blockmatching import *\n >>> cap = cv2.VideoCapture('./videos/car.mp4')\n >>> started = False\n >>> old_frame = None\n >>> while cap.isOpened():\n >>> ret, frame = cap.read()\n >>> if ret == True:\n >>> frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n >>> if started is False:\n >>> old_frame = frame\n >>> started = True\n >>> else:\n >>> XP, YP, XD, YD = block_matching(old_frame, frame,\n >>> width, height)\n >>> old_frame = frame\n >>> else:\n >>> break\n >>>\n >>> cap.release()\n\n Parameters\n ----------\n :parameter 2d_array img0: 2d array - Image in time t0 + kdt\n :parameter 2d_array img1: 2d array - Image in time t0 + (k+1)dt\n :parameter int64 width: int64 - matching block width\n :parameter int64 height: int64 - matching block height\n\n Return:\n ------\n :return 2d_array XI: - 2d int64 array - Grid with Initial x\n :return 2d_array YI: - 2d int64 array - Grid with Initial y\n :return 2d_array XF: - 2d int64 array - Final matching Grid with x\n :return 2d_array YF: - 2d int64 array - Final matching Grid with y\n \"\"\"\n\n if img0.shape[0] != img1.shape[0]:\n raise ImageSizeError(\"The images have diferent number of lines\")\n\n if img0.shape[1] != img1.shape[1]:\n raise ImageSizeError(\"The images have diferent number of columns\")\n\n lins, cols = img0.shape\n\n # Number of blocks in width\n wblock = cols // width\n\n # Number of blocks in height\n hblock = lins // height\n\n if wblock < 2:\n raise BlockError(\"block larger than image.\")\n\n if hblock < 2:\n raise BlockError(\"block heigher than image.\")\n\n # Window width\n wwind = 2 * width\n\n # Window height\n hwind = 2 * height\n\n # Total windows in width\n wwindt = cols // wwind\n\n # Total windows in height\n hwindt = lins // hwind\n out = _block_matching(img0.astype(float), img1.astype(float), width,\n height, wblock, hblock, wwind, hwind, wwindt, hwindt\n )\n\n return out[:, 0].reshape(hblock, wblock), out[:, 1].reshape(hblock, wblock),\\\n out[:, 2].reshape(hblock, wblock), out[:, 3].reshape(hblock, wblock),\n" ]
[ [ "numpy.zeros" ] ]
Richard-cpu2333/tx2dl
[ "985d9f9f24004271e85745a49252ab9922aec655" ]
[ "vision/ssd/config/vgg_ssd_config.py" ]
[ "import numpy as np\n\nfrom vision.utils.box_utils import SSDSpec, SSDBoxSizes, generate_ssd_priors\n\n\nimage_size = 300\nimage_mean = np.array([123, 117, 104]) # RGB layout\nimage_std = 1.0\n\niou_threshold = 0.75\ncenter_variance = 0.1\nsize_variance = 0.2\n\nspecs = [\n SSDSpec(38, 8, SSDBoxSizes(30, 60), [2]),\n SSDSpec(19, 16, SSDBoxSizes(60, 111), [2, 3]),\n SSDSpec(10, 32, SSDBoxSizes(111, 162), [2, 3]),\n SSDSpec(5, 64, SSDBoxSizes(162, 213), [2, 3]),\n SSDSpec(3, 100, SSDBoxSizes(213, 264), [2]),\n SSDSpec(1, 300, SSDBoxSizes(264, 315), [2])\n]\n\n\npriors = generate_ssd_priors(specs, image_size)" ]
[ [ "numpy.array" ] ]
thailengthol/PyAEZ
[ "1ff1456c27fd8742a85114a5a538796bc249c8d9" ]
[ "code/SoilConstraints.py" ]
[ "\"\"\"\r\nPyAEZ\r\nWritten by N. Lakmal Deshapriya\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport csv\r\n\r\nimport ALL_REDUCTION_FACTORS_IRR as crop_P_IRR\r\nimport ALL_REDUCTION_FACTORS_RAIN as crop_P_RAIN\r\n\r\nclass SoilConstraints(object):\r\n\r\n '''functions for soil qualities'''\r\n\r\n def soil_qty_1_top(self, TXT_val, OC_val, pH_val, TEB_val):\r\n\r\n TXT_intp = self.crop_P.TXT_factor[self.crop_P.TXT_value.index(TXT_val)]/100\r\n pH_intp = np.interp(pH_val, self.crop_P.pH_value, self.crop_P.pH_factor)/100\r\n OC_intp = np.interp(OC_val, self.crop_P.OC_value, self.crop_P.OC_factor)/100\r\n TEB_intp = np.interp(TEB_val, self.crop_P.TEB_value, self.crop_P.TEB_factor)/100\r\n\r\n min_factor = np.min([TXT_intp, pH_intp, OC_intp, TEB_intp])\r\n final_factor = (min_factor + (np.sum([TXT_intp, pH_intp, OC_intp, TEB_intp]) - min_factor)/3)/2\r\n\r\n return final_factor\r\n\r\n def soil_qty_1_sub(self, TXT_val, pH_val, TEB_val):\r\n\r\n TXT_intp = self.crop_P.TXT_factor[self.crop_P.TXT_value.index(TXT_val)]/100\r\n pH_intp = np.interp(pH_val, self.crop_P.pH_value, self.crop_P.pH_factor)/100\r\n TEB_intp = np.interp(TEB_val, self.crop_P.TEB_value, self.crop_P.TEB_factor)/100\r\n\r\n min_factor = np.min([TXT_intp, pH_intp, TEB_intp])\r\n final_factor = (min_factor + (np.sum([TXT_intp, pH_intp, TEB_intp]) - min_factor)/2)/2\r\n\r\n return final_factor\r\n\r\n def soil_qty_2_top(self, TXT_val, BS_val, CECsoil_val):\r\n\r\n TXT_intp = self.crop_P.TXT_factor[self.crop_P.TXT_value.index(TXT_val)]/100\r\n BS_intp = np.interp(BS_val, self.crop_P.BS_value, self.crop_P.BS_factor)/100\r\n CECsoil_intp = np.interp(CECsoil_val, self.crop_P.CECsoil_value, self.crop_P.CECsoil_factor)/100\r\n\r\n min_factor = np.min([TXT_intp, BS_intp, CECsoil_intp])\r\n final_factor = (min_factor + (np.sum([TXT_intp, BS_intp, CECsoil_intp]) - min_factor)/2)/2\r\n\r\n return final_factor\r\n\r\n def soil_qty_2_sub(self, TXT_val, BS_val, CECclay_val, pH_val):\r\n\r\n TXT_intp = self.crop_P.TXT_factor[self.crop_P.TXT_value.index(TXT_val)]/100\r\n BS_intp = np.interp(BS_val, self.crop_P.BS_value, self.crop_P.BS_factor)/100\r\n CECclay_intp = np.interp(CECclay_val, self.crop_P.CECclay_value, self.crop_P.CECclay_factor)/100\r\n pH_intp = np.interp(pH_val, self.crop_P.pH_value, self.crop_P.pH_factor)/100\r\n\r\n min_factor = np.min([TXT_intp, BS_intp, CECclay_intp, pH_intp])\r\n final_factor = (min_factor + (np.sum([TXT_intp, BS_intp, CECclay_intp, pH_intp]) - min_factor)/3)/2\r\n\r\n return final_factor\r\n\r\n def soil_qty_3_top_sub(self, RSD_val, SPR_val, SPH_val, OSD_val):\r\n\r\n RSD_intp = np.interp(RSD_val, self.crop_P.RSD_value, self.crop_P.RSD_factor)/100\r\n SPR_intp = np.interp(SPR_val, self.crop_P.SPR_value, self.crop_P.SPR_factor)/100\r\n SPH_intp = self.crop_P.SPH3_factor[self.crop_P.SPH3_value.index(SPH_val)]/100\r\n OSD_intp = np.interp(OSD_val, self.crop_P.OSD_value, self.crop_P.OSD_factor)/100\r\n\r\n final_factor = RSD_intp * np.min([SPR_intp, SPH_intp, OSD_intp])\r\n\r\n return final_factor\r\n\r\n def soil_qty_4_top_sub(self, DRG_val, SPH_val):\r\n\r\n DRG_intp = self.crop_P.DRG_factor[self.crop_P.DRG_value.index(DRG_val)]/100\r\n SPH_intp = self.crop_P.SPH4_factor[self.crop_P.SPH4_value.index(SPH_val)]/100\r\n\r\n final_factor = np.min([DRG_intp, SPH_intp])\r\n\r\n return final_factor\r\n\r\n def soil_qty_5_top_sub(self, ESP_val, EC_val, SPH_val):\r\n\r\n ESP_intp = np.interp(ESP_val, self.crop_P.ESP_value, self.crop_P.ESP_factor)/100\r\n EC_intp = np.interp(EC_val, self.crop_P.EC_value, self.crop_P.EC_factor)/100\r\n SPH_intp = self.crop_P.SPH5_factor[self.crop_P.SPH5_value.index(SPH_val)]/100\r\n\r\n final_factor = np.min([ESP_intp*EC_intp, SPH_intp])\r\n\r\n return final_factor\r\n\r\n def soil_qty_6_top_sub(self, CCB_val, GYP_val, SPH_val):\r\n\r\n CCB_intp = np.interp(CCB_val, self.crop_P.CCB_value, self.crop_P.CCB_factor)/100\r\n GYP_intp = np.interp(GYP_val, self.crop_P.GYP_value, self.crop_P.GYP_factor)/100\r\n SPH_intp = self.crop_P.SPH6_factor[self.crop_P.SPH6_value.index(SPH_val)]/100\r\n\r\n final_factor = np.min([CCB_intp*GYP_intp, SPH_intp])\r\n\r\n return final_factor\r\n\r\n def soil_qty_7_top_sub(self, RSD_val, GRC_val, SPH_val, TXT_val, VSP_val):\r\n\r\n RSD_intp = np.interp(RSD_val, self.crop_P.RSD_value, self.crop_P.RSD_factor)/100\r\n GRC_intp = np.interp(GRC_val, self.crop_P.GRC_value, self.crop_P.GRC_factor)/100\r\n SPH_intp = self.crop_P.SPH7_factor[self.crop_P.SPH7_value.index(SPH_val)]/100\r\n TXT_intp = self.crop_P.TXT_factor[self.crop_P.TXT_value.index(TXT_val)]/100\r\n VSP_intp = np.interp(VSP_val, self.crop_P.VSP_value, self.crop_P.VSP_factor)/100\r\n\r\n min_factor = np.min([RSD_intp, GRC_intp, SPH_intp, TXT_intp, VSP_intp])\r\n final_factor = (min_factor + (np.sum([RSD_intp, GRC_intp, SPH_intp, TXT_intp, VSP_intp]) - min_factor)/4)/2\r\n\r\n return final_factor\r\n\r\n def calculateSoilQualities(self, irr_or_rain, topsoil_path='./sample_data/input/soil_characteristics_topsoil.csv', subsoil_path='./sample_data/input/soil_characteristics_subsoil.csv'):\r\n\r\n if irr_or_rain == 'I':\r\n self.crop_P = crop_P_IRR\r\n elif irr_or_rain == 'R':\r\n self.crop_P = crop_P_RAIN\r\n\r\n # getting number of soil units\r\n file_temp = open(topsoil_path)\r\n numline = len(file_temp.readlines())\r\n\r\n self.sq_mat = np.zeros((numline-1,1+7)) # first column for soil unit code and other columns for SQs\r\n\r\n with open(topsoil_path) as csv_top:\r\n csv_sub = open(subsoil_path)\r\n top_reader = csv.reader(csv_top, delimiter=',')\r\n sub_reader = csv.reader(csv_sub, delimiter=',')\r\n\r\n row_count = 0\r\n name_list = []\r\n for row_top in top_reader:\r\n row_sub = next(sub_reader)\r\n\r\n row_count = row_count + 1\r\n if row_count == 1:\r\n name_list = row_top\r\n continue\r\n\r\n self.sq_mat[row_count-2, 0] = float(row_top[0])\r\n\r\n '''SQ 1'''\r\n\r\n TXT_val = row_top[name_list.index('TXT')]\r\n OC_val = float(row_top[name_list.index('OC')])\r\n pH_val = float(row_top[name_list.index('pH')])\r\n TEB_val = float(row_top[name_list.index('TEB')])\r\n sq1_top = self.soil_qty_1_top(TXT_val, OC_val, pH_val, TEB_val)\r\n\r\n TXT_val = row_sub[name_list.index('TXT')]\r\n pH_val = float(row_sub[name_list.index('pH')])\r\n TEB_val = float(row_sub[name_list.index('TEB')])\r\n sq1_sub = self.soil_qty_1_sub(TXT_val, pH_val, TEB_val)\r\n\r\n sq1 = (sq1_top + sq1_sub)/2\r\n\r\n self.sq_mat[row_count-2, 1] = sq1\r\n\r\n '''SQ 2'''\r\n\r\n TXT_val = row_top[name_list.index('TXT')]\r\n BS_val = float(row_top[name_list.index('BS')])\r\n CECsoil_val = float(row_top[name_list.index('CEC_soil')])\r\n sq2_top = self.soil_qty_2_top(TXT_val, BS_val, CECsoil_val)\r\n\r\n TXT_val = row_sub[name_list.index('TXT')]\r\n BS_val = float(row_sub[name_list.index('BS')])\r\n CECclay_val = float(row_sub[name_list.index('CEC_soil')])\r\n pH_val = float(row_sub[name_list.index('pH')])\r\n sq2_sub = self.soil_qty_2_sub(TXT_val, BS_val, CECclay_val, pH_val)\r\n\r\n sq2 = (sq2_top + sq2_sub)/2\r\n\r\n self.sq_mat[row_count-2, 2] = sq2\r\n\r\n '''SQ 3'''\r\n\r\n RSD_val = float(row_top[name_list.index('RSD')])\r\n SPR_val = float(row_top[name_list.index('SPR')])\r\n SPH_val = row_top[name_list.index('SPH')]\r\n OSD_val = float(row_top[name_list.index('OSD')])\r\n sq3_top = self.soil_qty_3_top_sub(RSD_val, SPR_val, SPH_val, OSD_val)\r\n\r\n RSD_val = float(row_sub[name_list.index('RSD')])\r\n SPR_val = float(row_sub[name_list.index('SPR')])\r\n SPH_val = row_sub[name_list.index('SPH')]\r\n OSD_val = float(row_sub[name_list.index('OSD')])\r\n sq3_sub = self.soil_qty_3_top_sub(RSD_val, SPR_val, SPH_val, OSD_val)\r\n\r\n sq3 = (sq3_top + sq3_sub)/2\r\n\r\n self.sq_mat[row_count-2, 3] = sq3\r\n\r\n '''SQ 4'''\r\n\r\n DRG_val = row_top[name_list.index('DRG')]\r\n SPH_val = row_top[name_list.index('SPH')]\r\n sq4_top = self.soil_qty_4_top_sub(DRG_val, SPH_val)\r\n\r\n DRG_val = row_sub[name_list.index('DRG')]\r\n SPH_val = row_sub[name_list.index('SPH')]\r\n sq4_sub = self.soil_qty_4_top_sub(DRG_val, SPH_val)\r\n\r\n sq4 = (sq4_top + sq4_sub)/2\r\n\r\n self.sq_mat[row_count-2, 4] = sq4\r\n\r\n '''SQ 5'''\r\n\r\n ESP_val = float(row_top[name_list.index('ESP')])\r\n EC_val = float(row_top[name_list.index('EC')])\r\n SPH_val = row_top[name_list.index('SPH')]\r\n sq5_top = self.soil_qty_5_top_sub(ESP_val, EC_val, SPH_val)\r\n\r\n ESP_val = float(row_sub[name_list.index('ESP')])\r\n EC_val = float(row_sub[name_list.index('EC')])\r\n SPH_val = row_sub[name_list.index('SPH')]\r\n sq5_sub = self.soil_qty_5_top_sub(ESP_val, EC_val, SPH_val)\r\n\r\n sq5 = (sq5_top + sq5_sub)/2\r\n\r\n self.sq_mat[row_count-2, 5] = sq5\r\n\r\n '''SQ 6'''\r\n\r\n CCB_val = float(row_top[name_list.index('CCB')])\r\n GYP_val = float(row_top[name_list.index('GYP')])\r\n SPH_val = row_top[name_list.index('SPH')]\r\n sq6_top = self.soil_qty_6_top_sub(CCB_val, GYP_val, SPH_val)\r\n\r\n CCB_val = float(row_sub[name_list.index('CCB')])\r\n GYP_val = float(row_sub[name_list.index('GYP')])\r\n SPH_val = row_sub[name_list.index('SPH')]\r\n sq6_sub = self.soil_qty_6_top_sub(CCB_val, GYP_val, SPH_val)\r\n\r\n sq6 = (sq6_top + sq6_sub)/2\r\n\r\n self.sq_mat[row_count-2, 6] = sq6\r\n\r\n '''SQ 7'''\r\n\r\n RSD_val = float(row_top[name_list.index('RSD')])\r\n GRC_val = float(row_top[name_list.index('GRC')])\r\n SPH_val = row_top[name_list.index('SPH')]\r\n TXT_val = row_top[name_list.index('TXT')]\r\n VSP_val = float(row_top[name_list.index('VSP')])\r\n sq7_top = self.soil_qty_7_top_sub(RSD_val, GRC_val, SPH_val, TXT_val, VSP_val)\r\n\r\n RSD_val = float(row_sub[name_list.index('RSD')])\r\n GRC_val = float(row_sub[name_list.index('GRC')])\r\n SPH_val = row_sub[name_list.index('SPH')]\r\n TXT_val = row_sub[name_list.index('TXT')]\r\n VSP_val = float(row_sub[name_list.index('VSP')])\r\n sq7_sub = self.soil_qty_7_top_sub(RSD_val, GRC_val, SPH_val, TXT_val, VSP_val)\r\n\r\n sq7 = (sq7_top + sq7_sub)/2\r\n\r\n self.sq_mat[row_count-2, 7] = sq7\r\n\r\n def calculateSoilRatings(self, input_level):\r\n\r\n self.SR = np.zeros((self.sq_mat.shape[0],2)) # first column for soil unit code and other column for SR\r\n self.SR[:,0] = self.sq_mat[:,0] # adding soil unit code\r\n\r\n for i1 in range(0, self.sq_mat.shape[0]):\r\n\r\n if input_level == 'L':\r\n min_factor = np.min([self.sq_mat[i1][4], self.sq_mat[i1][5], self.sq_mat[i1][6], self.sq_mat[i1][7]])\r\n fsq = (min_factor + (np.sum([self.sq_mat[i1][4], self.sq_mat[i1][5], self.sq_mat[i1][6], self.sq_mat[i1][7]]) - min_factor)/3)/2\r\n self.SR[i1,1] = self.sq_mat[i1][1] * self.sq_mat[i1][3] * fsq\r\n elif input_level == 'I':\r\n min_factor = np.min([self.sq_mat[i1][4], self.sq_mat[i1][5], self.sq_mat[i1][6], self.sq_mat[i1][7]])\r\n fsq = (min_factor + (np.sum([self.sq_mat[i1][4], self.sq_mat[i1][5], self.sq_mat[i1][6], self.sq_mat[i1][7]]) - min_factor)/3)/2\r\n self.SR[i1,1] = 0.5 * (self.sq_mat[i1][1]+self.sq_mat[i1][2]) * self.sq_mat[i1][3] * fsq\r\n elif input_level == 'H':\r\n min_factor = np.min([self.sq_mat[i1][4], self.sq_mat[i1][5], self.sq_mat[i1][6], self.sq_mat[i1][7]])\r\n fsq = (min_factor + (np.sum([self.sq_mat[i1][4], self.sq_mat[i1][5], self.sq_mat[i1][6], self.sq_mat[i1][7]]) - min_factor)/3)/2\r\n self.SR[i1,1] = self.sq_mat[i1][2] * self.sq_mat[i1][3] * fsq\r\n else:\r\n print('Wrong Input Level !')\r\n\r\n def getSoilQualities(self):\r\n return self.sq_mat\r\n\r\n def getSoilRatings(self):\r\n return self.SR\r\n\r\n def applySoilConstraints(self, soil_map, yield_in):\r\n\r\n yield_final = np.copy(yield_in)\r\n\r\n for i1 in range(0, self.SR.shape[0]):\r\n temp_idx = soil_map==self.SR[i1,0]\r\n yield_final[temp_idx] = yield_in[temp_idx] * self.SR[i1,1]\r\n\r\n return yield_final\r\n" ]
[ [ "numpy.min", "numpy.copy", "numpy.interp", "numpy.zeros", "numpy.sum" ] ]
locvdnguyen/python-training
[ "5a415e413d036e5305ec75879684a839fc4d2c86" ]
[ "exercise06/filehandler.py" ]
[ "\"\"\"\nExercise 06: Mock interview\nRequirements:\n 1/ Load data from .csv file as a list of dictionaries\n 2/ Group the data based on license number:\n US: first 6 characters are alphabetic,\n next 5 characters are numbers\n UK: first character is in alphabet, followed by hyphen,\n next 8 characters are numbers\n CA: first 3 characters are in alphabet,\n followed by a hyphen, 6 numbers, a hyphen,\n and 2 alphabetic characters\n 3/ Write unit testing\n\"\"\"\nimport logging\nimport re\nimport pandas as pd\n\n\nlogging.basicConfig(filename=\"log.txt\", level=logging.INFO)\n\n\ndef is_us(license_number: str) -> bool:\n \"\"\"\n US: first 6 characters are alphabetic,\n next 5 characters are numbers\n \"\"\"\n logging.info(\"Starting is_us(%s)...\", license_number)\n pattern = \"[a-zA-Z]{6}[0-9]{5}\"\n is_matched = re.match(pattern=pattern, string=license_number)\n\n return bool(is_matched) and len(license_number) == 11\n\n\ndef is_uk(license_number: str) -> bool:\n \"\"\"\n UK: first character is in alphabet, followed by hyphen,\n next 8 characters are numbers\n \"\"\"\n logging.info(\"Starting is_uk(%s)...\", license_number)\n pattern = \"[a-zA-Z]-[0-9]{8}\"\n is_matched = re.match(pattern=pattern, string=license_number)\n\n return bool(is_matched) and len(license_number) == 10\n\n\ndef is_ca(license_number: str) -> bool:\n \"\"\"\n CA: first 3 characters are in alphabet, followed by a hyphen,\n 6 numbers, a hyphen, and 2 alphabetic characters\n \"\"\"\n logging.info(\"Starting is_ca(%s)...\", license_number)\n pattern = \"[a-zA-Z]{3}-[0-9]{6}-[a-zA-Z]{2}\"\n is_matched = re.match(pattern=pattern, string=license_number)\n\n return bool(is_matched) and len(license_number) == 13\n\n\ndef get_license_number_group(license_number: str) -> str:\n \"\"\" get_license_number_group\n :param license_number:\n :return:\n \"\"\"\n logging.info(\"Start get_license_number_group(%s)...\", license_number)\n if is_us(license_number):\n return \"US\"\n if is_uk(license_number):\n return \"UK\"\n if is_ca(license_number):\n return \"CA\"\n return \"Unknown\"\n\n\nclass FileHandler:\n \"\"\"\n This class take responsibility in reading csv file and\n group the data based on the license_number\n \"\"\"\n def __init__(self):\n self.file_name = \"\"\n self.data = []\n\n def load_csv_as_dictionaries(self, file_name: str):\n \"\"\" load_csv_as_dictionaries\n :param file_name:\n :return:\n \"\"\"\n logging.info(\"Start load_csv_as_dictionaries...\")\n try:\n data_from_csv = pd.read_csv(file_name)\n self.data = data_from_csv.to_dict(orient='records')\n\n except FileNotFoundError as file_not_found_err:\n logging.exception(file_not_found_err)\n raise\n\n except Exception as exception:\n logging.error(exception)\n raise\n\n def group_by_license_number(self):\n \"\"\" group_by_license_number\n :return:\n \"\"\"\n logging.info(\"Start group_by_license_number()...\")\n groups = {'US': [], 'UK': [], 'CA': [], 'Unknown': []}\n for info in self.data:\n if \"license_number\" in info:\n license_number = info[\"license_number\"]\n group_code = get_license_number_group(license_number)\n groups[group_code].append(info)\n else:\n groups['Unknown'].append(info)\n return groups\n\n\ndef main():\n \"\"\" main function\n :return:\n \"\"\"\n file_handler = FileHandler()\n file_handler.load_csv_as_dictionaries(file_name=\"input.csv\")\n print(file_handler.data)\n print(file_handler.group_by_license_number())\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.read_csv" ] ]
broadinstitute/scRNA-Seq
[ "03aafb92274a97f4d634ac9e42f0e0feca91ed98" ]
[ "scCloud/scCloud/tools/visualization.py" ]
[ "import time\nimport numpy as np\nimport pandas as pd\nfrom subprocess import check_call\n\nimport os\nimport pkg_resources\n\nfrom MulticoreTSNE import MulticoreTSNE as TSNE\nfrom umap import UMAP\nfrom fitsne import FItSNE\n\n\nfrom . import calculate_affinity_matrix, calculate_nearest_neighbors, select_cells, construct_graph, net_train_and_predict\n\n\n\ndef calc_tsne(X, n_jobs, n_components, perplexity, early_exaggeration, learning_rate, random_state, init = 'random', n_iter = 1000, n_iter_early_exag = 250):\n\ttsne = TSNE(n_jobs = n_jobs, n_components = n_components, perplexity = perplexity, early_exaggeration = early_exaggeration, learning_rate = learning_rate, \\\n\t\trandom_state = random_state, verbose = 1, init = init, n_iter = n_iter, n_iter_early_exag = n_iter_early_exag)\n\tX_tsne = tsne.fit_transform(X)\n\tprint(\"Final error = {}\".format(tsne.kl_divergence_))\n\treturn X_tsne\n\n\ndef calc_fitsne(X, nthreads, no_dims, perplexity, early_exag_coeff, learning_rate, rand_seed, initialization = None, max_iter = 1000, stop_early_exag_iter = 250, mom_switch_iter = 250):\n\t# FItSNE will change X content\n\treturn FItSNE(X.astype('float64'), nthreads = nthreads, no_dims = no_dims, perplexity = perplexity, early_exag_coeff = early_exag_coeff, learning_rate = learning_rate, \\\n\t\trand_seed = rand_seed, initialization = initialization, max_iter = max_iter, stop_early_exag_iter = stop_early_exag_iter, mom_switch_iter = mom_switch_iter)\n\n\ndef calc_umap(X, n_components, n_neighbors, min_dist, spread, random_state, init = 'spectral', n_epochs = None, learning_rate = 1.0, knn_indices = None, knn_dists = None):\n\tumap = UMAP(n_components = n_components, n_neighbors = n_neighbors, min_dist = min_dist, spread = spread, random_state = random_state, \\\n\t\tinit = init, n_epochs = n_epochs, learning_rate = learning_rate, verbose = True)\n\treturn umap.fit_transform(X, knn_indices = knn_indices, knn_dists = knn_dists)\n\n\ndef calc_force_directed_layout(W, file_name, n_jobs, target_change_per_node, target_steps, is3d, memory, random_state, init = None):\n\tinput_graph_file = '{file_name}.net'.format(file_name = file_name)\n\tG = construct_graph(W)\n\tG.write(input_graph_file)\n\tprint(input_graph_file + ' is written.')\t\t\t\n\n\toutput_coord_file = '{file_name}.coords.txt'.format(file_name = file_name)\n\n\tclasspath = pkg_resources.resource_filename('scCloud', 'ext/forceatlas2.jar') + ':' + pkg_resources.resource_filename('scCloud', 'ext/gephi-toolkit-0.9.2-all.jar')\n\tcommand = ['java', '-Djava.awt.headless=true', '-Xmx{memory}g'.format(memory = memory), '-cp', classpath, \\\n\t\t\t\t'kco.forceatlas2.Main', '--input', input_graph_file, '--output', file_name + '.coords', \\\n\t\t\t\t'--nthreads', str(n_jobs), '--seed', str(random_state), '--targetChangePerNode', str(target_change_per_node), '--targetSteps', str(target_steps)]\n\tif not is3d:\n\t\tcommand.append('--2d')\n\n\tif init is not None:\n\t\tif not is3d:\n\t\t\tdf = pd.DataFrame(data = init, columns = ['x', 'y'], index = range(1, init.shape[0] + 1))\n\t\telse:\n\t\t\tdf = pd.DataFrame(data = init, columns = ['x', 'y', 'z'], index = range(1, init.shape[0] + 1))\n\t\tdf.index.name = 'id'\n\t\tinit_coord_file = '{file_name}.init.coords.txt'.format(file_name = file_name)\n\t\tdf.to_csv(init_coord_file, sep = '\\t', float_format = '%.2f')\n\t\tcommand.extend(['--coords', init_coord_file])\n\tprint(command)\n\tcheck_call(command)\n\tprint(\"Force-directed layout is generated.\")\n\n\tfle_coords = pd.read_csv(output_coord_file, header = 0, index_col = 0, sep = '\\t').values\n\tcheck_call(['rm', '-f', input_graph_file])\n\tcheck_call(['rm', '-f', output_coord_file])\n\n\treturn fle_coords\n\n\n\ndef run_tsne(data, rep_key, n_jobs, n_components = 2, perplexity = 30, early_exaggeration = 12, learning_rate = 1000, random_state = 0, out_basis = 'tsne'):\n\tstart = time.time()\n\tX_tsne = calc_tsne(data.obsm[rep_key], n_jobs, n_components, perplexity, early_exaggeration, learning_rate, random_state)\n\tdata.obsm['X_' + out_basis] = X_tsne\n\tend = time.time()\n\tprint(\"tSNE is calculated. Time spent = {:.2f}s.\".format(end - start))\n\n\ndef run_fitsne(data, rep_key, n_jobs, n_components = 2, perplexity = 30, early_exaggeration = 12, learning_rate = 1000, random_state = 0, out_basis = 'fitsne'):\n\tstart = time.time()\n\tdata.obsm['X_' + out_basis] = calc_fitsne(data.obsm[rep_key], n_jobs, n_components, perplexity, early_exaggeration, learning_rate, random_state) \n\tend = time.time()\n\tprint(\"FItSNE is calculated. Time spent = {:.2f}s.\".format(end - start))\n\n\ndef run_umap(data, rep_key, n_components = 2, n_neighbors = 15, min_dist = 0.5, spread = 1.0, random_state = 0, out_basis = 'umap'):\n\tstart = time.time()\n\tassert 'knn_indices' in data.uns\n\tknn_indices = np.insert(data.uns['knn_indices'][:, 0 : n_neighbors - 1], 0, range(data.shape[0]), axis = 1)\n\tknn_dists = np.insert(data.uns['knn_distances'][:, 0 : n_neighbors - 1], 0, 0.0, axis = 1)\n\tdata.obsm['X_' + out_basis] = calc_umap(data.obsm[rep_key], n_components, n_neighbors, min_dist, spread, random_state, knn_indices = knn_indices, knn_dists = knn_dists)\n\tend = time.time()\n\tprint(\"UMAP is calculated. Time spent = {:.2f}s.\".format(end - start))\n\n\ndef run_force_directed_layout(data, file_name, n_jobs, K = 50, target_change_per_node = 2.0, target_steps = 10000, is3d = False, memory = 8, random_state = 0, out_basis = 'fle'):\n\tstart = time.time()\n\trealK = min(K - 1, data.uns['diffmap_knn_indices'].shape[1]) # K - 1: exclude self\n\tW = calculate_affinity_matrix(data.uns['diffmap_knn_indices'][:, 0:realK], data.uns['diffmap_knn_distances'][:, 0:realK])\n\tdata.obsm['X_' + out_basis] = calc_force_directed_layout(W, file_name, n_jobs, target_change_per_node, target_steps, is3d, memory, random_state);\n\tend = time.time()\n\tprint(\"Force-directed layout is calculated. Time spent = {:.2f}s.\".format(end - start))\n\n\n\ndef run_net_tsne(data, rep_key, selected, n_jobs, n_components = 2, perplexity = 30, early_exaggeration = 12, learning_rate = 1000, random_state = 0, net_alpha = 0.1, polish_learning_frac = 0.33, polish_n_iter = 150, out_basis = 'net_tsne'):\n\tstart = time.time()\n\tX = data.obsm[rep_key][selected,:]\n\tX_tsne = calc_tsne(X, n_jobs, n_components, perplexity, early_exaggeration, learning_rate, random_state)\n\n\tdata.uns['X_' + out_basis + '_small'] = X_tsne\n\tdata.obs['ds_selected'] = selected\n\n\tY_init = np.zeros((data.shape[0], 2), dtype = np.float64)\n\tY_init[selected,:] = X_tsne\n\tY_init[~selected,:] = net_train_and_predict(X, X_tsne, data.obsm[rep_key][~selected,:], net_alpha, random_state, verbose = True)\n\n\tdata.obsm['X_' + out_basis + '_pred'] = Y_init\n\n\tpolish_learning_rate = polish_learning_frac * data.shape[0]\n\tdata.obsm['X_' + out_basis] = calc_tsne(data.obsm[rep_key], n_jobs, n_components, perplexity, early_exaggeration, polish_learning_rate, random_state, \\\n\t\tinit = Y_init, n_iter = polish_n_iter, n_iter_early_exag = 0)\n\tend = time.time()\n\tprint(\"Net tSNE is calculated. Time spent = {:.2f}s.\".format(end - start))\n\n\ndef run_net_fitsne(data, rep_key, selected, n_jobs, n_components = 2, perplexity = 30, early_exaggeration = 12, learning_rate = 1000, random_state = 0, net_alpha = 0.1, polish_learning_frac = 0.5, polish_n_iter = 150, out_basis = 'net_fitsne'):\n\tstart = time.time()\n\tX = data.obsm[rep_key][selected,:]\n\tX_fitsne = calc_fitsne(X, n_jobs, n_components, perplexity, early_exaggeration, learning_rate, random_state)\n\n\tdata.uns['X_' + out_basis + '_small'] = X_fitsne\n\tdata.obs['ds_selected'] = selected\n\n\tY_init = np.zeros((data.shape[0], 2), dtype = np.float64)\n\tY_init[selected,:] = X_fitsne\n\tY_init[~selected,:] = net_train_and_predict(X, X_fitsne, data.obsm[rep_key][~selected,:], net_alpha, random_state, verbose = True)\n\n\tdata.obsm['X_' + out_basis + '_pred'] = Y_init\n\n\tpolish_learning_rate = polish_learning_frac * data.shape[0]\n\tdata.obsm['X_' + out_basis] = calc_fitsne(data.obsm[rep_key], n_jobs, n_components, perplexity, early_exaggeration, polish_learning_rate, random_state, \n\t\tinitialization = Y_init, max_iter = polish_n_iter, stop_early_exag_iter = 0, mom_switch_iter = 0)\n\tend = time.time()\n\tprint(\"Net FItSNE is calculated. Time spent = {:.2f}s.\".format(end - start))\n\n\ndef run_net_umap(data, rep_key, selected, n_jobs, n_components = 2, n_neighbors = 15, min_dist = 0.1, spread = 1.0, random_state = 0, ds_full_speed = False, net_alpha = 0.1, polish_n_epochs = 30, polish_learning_rate = 10.0, out_basis = 'net_umap'):\n\tstart = time.time()\n\tX = data.obsm[rep_key][selected,:]\n\n\tif 'ds_knn_indices' not in data.uns:\n\t\tindices, distances = calculate_nearest_neighbors(X, n_jobs, K = n_neighbors, random_state = random_state, full_speed = ds_full_speed)\n\t\tdata.uns['ds_knn_indices'] = indices\n\t\tdata.uns['ds_knn_distances'] = distances\n\tknn_indices = np.insert(data.uns['ds_knn_indices'][:, 0 : n_neighbors - 1], 0, range(X.shape[0]), axis = 1)\n\tknn_dists = np.insert(data.uns['ds_knn_distances'][:, 0 : n_neighbors - 1], 0, 0.0, axis = 1)\n\n\tX_umap = calc_umap(X, n_components, n_neighbors, min_dist, spread, random_state, knn_indices = knn_indices, knn_dists = knn_dists)\n\n\tdata.uns['X_' + out_basis + '_small'] = X_umap\n\tdata.obs['ds_selected'] = selected\n\t\n\tY_init = np.zeros((data.shape[0], 2), dtype = np.float64)\n\tY_init[selected,:] = X_umap\n\tY_init[~selected,:] = net_train_and_predict(X, X_umap, data.obsm[rep_key][~selected,:], net_alpha, random_state, verbose = True)\n\n\tdata.obsm['X_' + out_basis + '_pred'] = Y_init\n\n\tassert 'knn_indices' in data.uns\n\tknn_indices = np.insert(data.uns['knn_indices'][:, 0 : n_neighbors - 1], 0, range(data.shape[0]), axis = 1)\n\tknn_dists = np.insert(data.uns['knn_distances'][:, 0 : n_neighbors - 1], 0, 0.0, axis = 1)\n\n\tdata.obsm['X_' + out_basis] = calc_umap(data.obsm[rep_key], n_components, n_neighbors, min_dist, spread, random_state, \\\n\t\tinit = Y_init, n_epochs = polish_n_epochs, learning_rate = polish_learning_rate, knn_indices = knn_indices, knn_dists = knn_dists)\n\tend = time.time()\n\tprint(\"Net UMAP is calculated. Time spent = {:.2f}s.\".format(end - start))\n\ndef run_net_fle(data, selected, file_name, n_jobs, K = 50, target_change_per_node = 2.0, target_steps = 10000, is3d = False, memory = 8, random_state = 0, \\\n\tds_full_speed = False, net_alpha = 0.1, polish_target_steps = 1500, out_basis = 'net_fle'):\n\tstart = time.time()\n\tX = data.obsm['X_diffmap'][selected,:]\n\t\n\tif 'diffmap_ds_knn_indices' in data.uns:\n\t\tindices = data.uns['diffmap_ds_knn_indices']\n\t\tdistances = data.uns['diffmap_ds_knn_distances']\n\telse:\n\t\tindices, distances = calculate_nearest_neighbors(X, n_jobs, K = K, random_state = random_state, full_speed = ds_full_speed)\n\t\tdata.uns['diffmap_ds_knn_indices'] = indices\n\t\tdata.uns['diffmap_ds_knn_distances'] = distances\n\n\tW = calculate_affinity_matrix(indices, distances)\n\tX_fle = calc_force_directed_layout(W, file_name + '.small', n_jobs, target_change_per_node, target_steps, is3d, memory, random_state);\n\n\tdata.uns['X_' + out_basis + '_small'] = X_fle\n\tdata.obs['ds_diffmap_selected'] = selected\n\n\tY_init = np.zeros((data.shape[0], 2), dtype = np.float64)\n\tY_init[selected,:] = X_fle\n\tY_init[~selected,:] = net_train_and_predict(X, X_fle, data.obsm['X_diffmap'][~selected,:], net_alpha, random_state, verbose = True)\n\n\tdata.obsm['X_' + out_basis + '_pred'] = Y_init\n\n\trealK = min(K - 1, data.uns['diffmap_knn_indices'].shape[1]) # K - 1: exclude self\n\tW = calculate_affinity_matrix(data.uns['diffmap_knn_indices'][:, 0:realK], data.uns['diffmap_knn_distances'][:, 0:realK])\n\tdata.obsm['X_' + out_basis] = calc_force_directed_layout(W, file_name, n_jobs, target_change_per_node, polish_target_steps, is3d, memory, random_state, init = Y_init);\n\tend = time.time()\n\n\tprint(\"Net FLE is calculated. Time spent = {:.2f}s.\".format(end - start))\n" ]
[ [ "pandas.read_csv", "numpy.zeros", "numpy.insert" ] ]
jtwhite79/flopy
[ "41302901e4db38455c28a68153b49a8466da3027" ]
[ "flopy/utils/datautil.py" ]
[ "import os\nimport numpy as np\n\n\ndef clean_name(name):\n # remove bad characters\n clean_string = name.replace(\" \", \"_\")\n clean_string = clean_string.replace(\"-\", \"_\")\n # remove anything after a parenthesis\n index = clean_string.find(\"(\")\n if index != -1:\n clean_string = clean_string[0:index]\n return clean_string\n\n\ndef find_keyword(arr_line, keyword_dict):\n # convert to lower case\n arr_line_lower = []\n for word in arr_line:\n # integers and floats are not keywords\n if not DatumUtil.is_int(word) and not DatumUtil.is_float(word):\n arr_line_lower.append(word.lower())\n # look for constants in order of most words to least words\n key = \"\"\n for num_words in range(len(arr_line_lower), -1, -1):\n key = tuple(arr_line_lower[0:num_words])\n if len(key) > 0 and key in keyword_dict:\n return key\n return None\n\n\ndef max_tuple_abs_size(some_tuple):\n max_size = 0\n for item in some_tuple:\n item_abs = abs(item)\n if item_abs > max_size:\n max_size = item_abs\n return max_size\n\n\nclass DatumUtil:\n @staticmethod\n def is_int(str):\n try:\n int(str)\n return True\n except TypeError:\n return False\n except ValueError:\n return False\n\n @staticmethod\n def is_float(str):\n try:\n float(str)\n return True\n except TypeError:\n return False\n except ValueError:\n return False\n\n @staticmethod\n def is_basic_type(obj):\n if (\n isinstance(obj, str)\n or isinstance(obj, int)\n or isinstance(obj, float)\n ):\n return True\n return False\n\n\nclass PyListUtil:\n \"\"\"\n Class contains miscellaneous methods to work with and compare python lists\n\n Parameters\n ----------\n path : string\n file path to read/write to\n max_error : float\n maximum acceptable error when doing a compare of floating point numbers\n\n Methods\n -------\n is_iterable : (obj : unknown) : boolean\n determines if obj is iterable\n is_empty_list : (current_list : list) : boolean\n determines if an n-dimensional list is empty\n con_convert : (data : string, data_type : type that has conversion\n operation) : boolean\n returns true if data can be converted into data_type\n max_multi_dim_list_size : (current_list : list) : boolean\n determines the max number of items in a multi-dimensional list\n 'current_list'\n first_item : (current_list : list) : variable\n returns the first item in the list 'current_list'\n next_item : (current_list : list) : variable\n returns the next item in the list 'current_list'\n array_comp : (first_array : list, second_array : list) : boolean\n compares two lists, returns true if they are identical (with max_error)\n spilt_data_line : (line : string) : list\n splits a string apart (using split) and then cleans up the results\n dealing with various MODFLOW input file releated delimiters. returns\n the delimiter type used.\n clean_numeric : (text : string) : string\n returns a cleaned up version of 'text' with only numeric characters\n save_array_diff : (first_array : list, second_array : list,\n first_array_name : string, second_array_name : string)\n saves lists 'first_array' and 'second_array' to files first_array_name\n and second_array_name and then saves the difference of the two\n arrays to 'debug_array_diff.txt'\n save_array(filename : string, multi_array : list)\n saves 'multi_array' to the file 'filename'\n \"\"\"\n\n numeric_chars = {\n \"0\": 0,\n \"1\": 0,\n \"2\": 0,\n \"3\": 0,\n \"4\": 0,\n \"5\": 0,\n \"6\": 0,\n \"7\": 0,\n \"8\": 0,\n \"9\": 0,\n \".\": 0,\n \"-\": 0,\n }\n quote_list = {\"'\", '\"'}\n delimiter_list = {\",\": 1}\n delimiter_used = None\n line_num = 0\n consistent_delim = False\n\n def __init__(self, path=None, max_error=0.01):\n self.max_error = max_error\n if path:\n self.path = path\n else:\n self.path = os.getcwd()\n\n @staticmethod\n def has_one_item(current_list):\n if not isinstance(current_list, list) and not isinstance(\n current_list, np.ndarray\n ):\n return True\n if len(current_list) != 1:\n return False\n if (\n isinstance(current_list[0], list)\n or isinstance(current_list, np.ndarray)\n ) and len(current_list[0] != 0):\n return False\n return True\n\n @staticmethod\n def is_iterable(obj):\n try:\n iterator = iter(obj)\n except TypeError:\n return False\n return True\n\n @staticmethod\n def is_empty_list(current_list):\n if not isinstance(current_list, list):\n return not current_list\n\n for item in current_list:\n if isinstance(item, list):\n # still in a list of lists, recurse\n if not PyListUtil.is_empty_list(item):\n return False\n else:\n return False\n\n return True\n\n @staticmethod\n def max_multi_dim_list_size(current_list):\n max_length = -1\n for item in current_list:\n if isinstance(item, str):\n return len(current_list)\n elif len(item) > max_length:\n max_length = len(item)\n return max_length\n\n @staticmethod\n def first_item(current_list):\n if not isinstance(current_list, list) and not isinstance(\n current_list, np.ndarray\n ):\n return current_list\n\n for item in current_list:\n if isinstance(item, list) or isinstance(item, np.ndarray):\n # still in a list of lists, recurse\n return PyListUtil.first_item(item)\n else:\n return item\n\n @staticmethod\n def next_item(\n current_list, new_list=True, nesting_change=0, end_of_list=True\n ):\n # returns the next item in a nested list along with other information:\n # (<next item>, <end of list>, <entering new list>,\n # <change in nesting level>\n if not isinstance(current_list, list) and not isinstance(\n current_list, np.ndarray\n ):\n yield (current_list, end_of_list, new_list, nesting_change)\n else:\n list_size = 1\n for item in current_list:\n if isinstance(item, list) or isinstance(\n current_list, np.ndarray\n ):\n # still in a list of lists, recurse\n for item in PyListUtil.next_item(\n item,\n list_size == 1,\n nesting_change + 1,\n list_size == len(current_list),\n ):\n yield item\n nesting_change = -(nesting_change + 1)\n else:\n yield (\n item,\n list_size == len(current_list),\n list_size == 1,\n nesting_change,\n )\n nesting_change = 0\n list_size += 1\n\n @staticmethod\n def next_list(current_list):\n if not isinstance(current_list[0], list) and not isinstance(\n current_list[0], np.ndarray\n ):\n yield current_list\n else:\n for lst in current_list:\n if isinstance(lst[0], list) or isinstance(lst[0], np.ndarray):\n for lst in PyListUtil.next_list(lst):\n yield lst\n else:\n yield lst\n\n def array_comp(self, first_array, second_array):\n diff = first_array - second_array\n max = np.max(np.abs(diff))\n if max > self.max_error:\n return False\n return True\n\n @staticmethod\n def reset_delimiter_used():\n PyListUtil.delimiter_used = None\n PyListUtil.line_num = 0\n PyListUtil.consistent_delim = True\n\n @staticmethod\n def split_data_line(line, external_file=False, delimiter_conf_length=15):\n if (\n PyListUtil.line_num > delimiter_conf_length\n and PyListUtil.consistent_delim\n ):\n # consistent delimiter has been found. continue using that\n # delimiter without doing further checks\n if PyListUtil.delimiter_used is None:\n comment_split = line.split(\"#\", 1)\n clean_line = comment_split[0].strip().split()\n else:\n comment_split = line.split(\"#\", 1)\n clean_line = (\n comment_split[0].strip().split(PyListUtil.delimiter_used)\n )\n if len(comment_split) > 1:\n clean_line.append(\"#\")\n clean_line.append(comment_split[1].strip())\n else:\n # compare against the default split option without comments split\n comment_split = line.split(\"#\", 1)\n clean_line = comment_split[0].strip().split()\n if len(comment_split) > 1:\n clean_line.append(\"#\")\n clean_line.append(comment_split[1].strip())\n # try different delimiters and use the one the breaks the data\n # apart the most\n max_split_size = len(clean_line)\n max_split_type = None\n max_split_list = clean_line\n for delimiter in PyListUtil.delimiter_list:\n comment_split = line.split(\"#\")\n alt_split = comment_split[0].strip().split(delimiter)\n if len(comment_split) > 1:\n alt_split.append(\"#\")\n alt_split.append(comment_split[1].strip())\n alt_split_len = len(alt_split)\n if alt_split_len > max_split_size:\n max_split_size = len(alt_split)\n max_split_type = delimiter\n elif alt_split_len == max_split_size:\n if (\n max_split_type not in PyListUtil.delimiter_list\n or PyListUtil.delimiter_list[delimiter]\n < PyListUtil.delimiter_list[max_split_type]\n ):\n max_split_size = len(alt_split)\n max_split_type = delimiter\n max_split_list = alt_split\n\n if max_split_type is not None:\n clean_line = max_split_list\n if PyListUtil.line_num == 0:\n PyListUtil.delimiter_used = max_split_type\n elif PyListUtil.delimiter_used != max_split_type:\n PyListUtil.consistent_delim = False\n PyListUtil.line_num += 1\n\n arr_fixed_line = []\n index = 0\n # loop through line to fix quotes and delimiters\n len_cl = len(clean_line)\n while index < len_cl:\n item = clean_line[index]\n if item and item not in PyListUtil.delimiter_list:\n if item and item[0] in PyListUtil.quote_list:\n # starts with a quote, handle quoted text\n if item[-1] in PyListUtil.quote_list:\n arr_fixed_line.append(item[1:-1])\n else:\n arr_fixed_line.append(item[1:])\n # loop until trailing quote found\n while index < len_cl:\n index += 1\n if index < len_cl:\n item = clean_line[index]\n if item[-1] in PyListUtil.quote_list:\n arr_fixed_line[-1] = \"{} {}\".format(\n arr_fixed_line[-1], item[:-1]\n )\n break\n else:\n arr_fixed_line[-1] = \"{} {}\".format(\n arr_fixed_line[-1], item\n )\n else:\n # no quote, just append\n arr_fixed_line.append(item)\n index += 1\n\n return arr_fixed_line\n\n @staticmethod\n def clean_numeric(text):\n if isinstance(text, str):\n # remove all non-numeric text from leading and trailing positions\n # of text\n if text:\n while text and (\n text[0] not in PyListUtil.numeric_chars\n or text[-1] not in PyListUtil.numeric_chars\n ):\n if text[0] not in PyListUtil.numeric_chars:\n text = text[1:]\n if text and text[-1] not in PyListUtil.numeric_chars:\n text = text[:-1]\n return text\n\n def save_array_diff(\n self, first_array, second_array, first_array_name, second_array_name\n ):\n try:\n diff = first_array - second_array\n self.save_array(first_array_name, first_array)\n self.save_array(second_array_name, second_array)\n self.save_array(\"debug_array_diff.txt\", diff)\n except:\n print(\"An error occurred while outputting array differences.\")\n return False\n return True\n\n # Saves an array with up to three dimensions\n def save_array(self, filename, multi_array):\n file_path = os.path.join(self.path, filename)\n with open(file_path, \"w\") as outfile:\n outfile.write(\"{}\\n\".format(str(multi_array.shape)))\n if len(multi_array.shape) == 4:\n for slice in multi_array:\n for second_slice in slice:\n for third_slice in second_slice:\n for item in third_slice:\n outfile.write(\" {:10.3e}\".format(item))\n outfile.write(\"\\n\")\n outfile.write(\"\\n\")\n outfile.write(\"\\n\")\n elif len(multi_array.shape) == 3:\n for slice in multi_array:\n np.savetxt(outfile, slice, fmt=\"%10.3e\")\n outfile.write(\"\\n\")\n else:\n np.savetxt(outfile, multi_array, fmt=\"%10.3e\")\n\n\nclass MultiList:\n \"\"\"\n Class for storing objects in an n-dimensional list which can be iterated\n through as a single list.\n\n Parameters\n ----------\n mdlist : list\n multi-dimensional list to initialize the multi-list. either mdlist\n or both shape and callback must be specified\n shape : tuple\n shape of the multi-list\n callback : method\n callback method that takes a location in the multi-list (tuple) and\n returns an object to be stored at that location in the multi-list\n\n Methods\n -------\n increment_dimension : (dimension, callback)\n increments the size of one of the two dimensions of the multi-list\n build_list : (callback)\n builds a multi-list of shape self.list_shape, constructing objects\n for the list using the supplied callback method\n first_item : () : object\n gets the first entry in the multi-list\n get_total_size : () : int\n returns the total number of entries in the multi-list\n in_shape : (indexes) : boolean\n returns whether a tuple of indexes are valid indexes for the shape of\n the multi-list\n inc_shape_idx : (indexes) : tuple\n given a tuple of indexes pointing to an entry in the multi-list,\n returns a tuple of indexes pointing to the next entry in the multi-list\n first_index : () : tuple\n returns a tuple of indexes pointing to the first entry in the\n multi-list\n indexes : (start_indexes=None, end_indexes=None) : iter(tuple)\n returns an iterator that iterates from the location in the\n multi-list defined by start_indexes to the location in the\n multi-list defined by end_indexes\n elements : () : iter(object)\n returns an iterator that iterates over each object stored in the\n multi-list\n \"\"\"\n\n def __init__(self, mdlist=None, shape=None, callback=None):\n if mdlist is not None:\n self.multi_dim_list = mdlist\n self.list_shape = MultiList._calc_shape(mdlist)\n elif shape is not None:\n self.list_shape = shape\n self.multi_dim_list = []\n if callback is not None:\n self.build_list(callback)\n else:\n raise Exception(\n \"MultiList requires either a mdlist or a shape \"\n \"at initialization.\"\n )\n\n def __getitem__(self, k):\n if isinstance(k, list) or isinstance(k, tuple):\n item_ptr = self.multi_dim_list\n for index in k:\n item_ptr = item_ptr[index]\n return item_ptr\n else:\n return self.multi_dim_list[k]\n\n @staticmethod\n def _calc_shape(current_list):\n shape = []\n if isinstance(current_list, list):\n shape.append(len(current_list))\n sub_list = current_list[0]\n if isinstance(sub_list, list):\n shape += MultiList._calc_shape(sub_list)\n elif isinstance(current_list, np.ndarray):\n shape.append(current_list.shape[0])\n else:\n return 1\n return tuple(shape)\n\n def increment_dimension(self, dimension, callback):\n # ONLY SUPPORTS 1 OR 2 DIMENSIONAL MULTI-LISTS\n # TODO: REWRITE TO SUPPORT N-DIMENSIONAL MULTI-LISTS\n if len(self.list_shape) > 2:\n raise Exception(\n \"Increment_dimension currently only supports 1 \"\n \"or 2 dimensional multi-lists\"\n )\n if len(self.list_shape) == 1:\n self.multi_dim_list.append(callback(len(self.list_shape)))\n self.list_shape = (self.list_shape[0] + 1,)\n else:\n if dimension == 1:\n new_row_idx = len(self.multi_dim_list)\n self.multi_dim_list.append([])\n for index in range(0, self.list_shape[1]):\n self.multi_dim_list[-1].append(\n callback((new_row_idx, index))\n )\n self.list_shape = (self.list_shape[0] + 1, self.list_shape[1])\n elif dimension == 2:\n new_col_idx = len(self.multi_dim_list[0])\n for index in range(0, self.list_shape[0]):\n self.multi_dim_list[index].append(\n callback((index, new_col_idx))\n )\n self.list_shape = (self.list_shape[0], self.list_shape[1] + 1)\n else:\n raise Exception(\n 'For two dimensional lists \"dimension\" must ' \"be 1 or 2.\"\n )\n\n def build_list(self, callback):\n entry_points = [(self.multi_dim_list, self.first_index())]\n shape_len = len(self.list_shape)\n # loop through each dimension\n for index, shape_size in enumerate(self.list_shape):\n new_entry_points = []\n # loop through locations to add to the list\n for entry_point in entry_points:\n # loop through the size of current dimension\n for val in range(0, shape_size):\n if index < (shape_len - 1):\n # this is a multi-dimensional multi-list, build out\n # first dimension\n entry_point[0].append([])\n if entry_point[1] is None:\n new_location = (len(entry_point) - 1,)\n else:\n new_location = ((len(entry_point[0]) - 1), val)\n new_entry_points.append(\n (entry_point[0][-1], new_location)\n )\n else:\n entry_point[0].append(callback(entry_point[1]))\n entry_points = new_entry_points\n\n def first_item(self):\n return PyListUtil.first_item(self.multi_dim_list)\n\n def get_total_size(self):\n shape_size = 1\n for item in self.list_shape:\n if item is None:\n return 0\n else:\n shape_size *= item\n return shape_size\n\n def in_shape(self, indexes):\n for index, item in zip(indexes, self.list_shape):\n if index > item:\n return False\n return True\n\n def inc_shape_idx(self, indexes):\n new_indexes = []\n incremented = False\n for index, item in zip(indexes, self.list_shape):\n if index == item:\n new_indexes.append(0)\n elif incremented:\n new_indexes.append(index)\n else:\n incremented = True\n new_indexes.append(index + 1)\n if not incremented:\n new_indexes[-1] += 1\n return tuple(new_indexes)\n\n def first_index(self):\n first_index = []\n for index in self.list_shape:\n first_index.append(0)\n return tuple(first_index)\n\n def nth_index(self, n):\n index = None\n aii = ArrayIndexIter(self.list_shape, True)\n index_num = 0\n while index_num <= n:\n index = aii.next()\n index_num += 1\n return index\n\n def indexes(self, start_indexes=None, end_indexes=None):\n aii = ArrayIndexIter(self.list_shape, True)\n if start_indexes is not None:\n aii.current_location = list(start_indexes)\n aii.current_index = len(aii.current_location) - 1\n if end_indexes is not None:\n aii.end_location = list(end_indexes)\n return aii\n\n def elements(self):\n return MultiListIter(self.multi_dim_list, False)\n\n\nclass ArrayIndexIter:\n def __init__(self, array_shape, index_as_tuple=False):\n self.array_shape = array_shape\n self.current_location = []\n self.end_location = []\n self.first_item = True\n self.index_as_tuple = index_as_tuple\n for item in array_shape:\n self.current_location.append(0)\n self.end_location.append(item)\n self.current_index = len(self.current_location) - 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.first_item:\n self.first_item = False\n if (\n self.current_location[self.current_index]\n < self.end_location[self.current_index]\n ):\n if len(self.current_location) > 1 or self.index_as_tuple:\n return tuple(self.current_location)\n else:\n return self.current_location[0]\n while self.current_index >= 0:\n location = self.current_location[self.current_index]\n if location < self.end_location[self.current_index] - 1:\n self.current_location[self.current_index] += 1\n self.current_index = len(self.current_location) - 1\n if len(self.current_location) > 1 or self.index_as_tuple:\n return tuple(self.current_location)\n else:\n return self.current_location[0]\n else:\n self.current_location[self.current_index] = 0\n self.current_index -= 1\n raise StopIteration()\n\n next = __next__ # Python 2 support\n\n\nclass MultiListIter:\n def __init__(self, multi_list, detailed_info=False, iter_leaf_lists=False):\n self.multi_list = multi_list\n self.detailed_info = detailed_info\n if iter_leaf_lists:\n self.val_iter = PyListUtil.next_list(self.multi_list)\n else:\n self.val_iter = PyListUtil.next_item(self.multi_list)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n next_val = next(self.val_iter)\n if self.detailed_info:\n return next_val\n else:\n return next_val[0]\n\n next = __next__ # Python 2 support\n\n\nclass ConstIter:\n def __init__(self, value):\n self.value = value\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return self.value\n\n next = __next__ # Python 2 support\n\n\nclass FileIter:\n def __init__(self, file_path):\n self.eof = False\n try:\n self._fd = open(file_path, \"r\")\n except:\n self.eof = True\n self._current_data = None\n self._data_index = 0\n self._next_line()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.eof:\n raise StopIteration()\n else:\n while self._current_data is not None and self._data_index >= len(\n self._current_data\n ):\n self._next_line()\n self._data_index = 0\n if self.eof:\n raise StopIteration()\n self._data_index += 1\n return self._current_data[self._data_index - 1]\n\n def close(self):\n self._fd.close()\n\n def _next_line(self):\n if self.eof:\n return\n data_line = self._fd.readline()\n if data_line is None:\n self.eof = True\n return\n self._current_data = PyListUtil.split_data_line(data_line)\n\n next = __next__ # Python 2 support\n\n\nclass NameIter:\n def __init__(self, name, first_not_numbered=True):\n self.name = name\n self.iter_num = -1\n self.first_not_numbered = first_not_numbered\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.iter_num += 1\n if self.iter_num == 0 and self.first_not_numbered:\n return self.name\n else:\n return \"{}_{}\".format(self.name, self.iter_num)\n\n next = __next__ # Python 2 support\n\n\nclass PathIter:\n def __init__(self, path, first_not_numbered=True):\n self.path = path\n self.name_iter = NameIter(path[-1], first_not_numbered)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return self.path[0:-1] + (self.name_iter.__next__(),)\n\n next = __next__ # Python 2 support\n" ]
[ [ "numpy.savetxt", "numpy.abs" ] ]
emitra17/PyBNF
[ "61138d8533556eeec262203170053a591fe3633c" ]
[ "pybnf/data.py" ]
[ "\"\"\"Class with methods to manage experimental and simulation data\"\"\"\n\n\nimport logging\nimport math\nimport numpy as np\nimport re\nfrom .printing import PybnfError\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Data(object):\n \"\"\"Top level class for managing data\"\"\"\n\n def __init__(self, file_name=None, arr=None, named_arr=None):\n \"\"\"\n Initializes a Data instance. Must specify either a file name or an array\n\n :param file_name:\n :param arr:\n \"\"\"\n self.cols = dict() # dict of column headers to column indices\n self.headers = dict() # dict of column indices to headers\n self._data = None # Numpy array for data\n self._observers = [] # For implementing the observer pattern\n self.weights = None # Numpy array for bootstrapping weights\n self.indvar = None # Name of the independent variable\n self.bind_to(self.update_weights)\n if file_name is not None:\n self.load_data(file_name)\n elif arr is not None:\n self.data = arr\n elif named_arr is not None:\n # Initialize with RoadRunner named array\n # NamedArray is not pickleable, so we need to copy the contents into a regular array.\n self.data = np.array(named_arr)\n self.load_rr_header(named_arr.colnames)\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, data):\n self._data = data\n for callback in self._observers:\n callback(self._data)\n\n def bind_to(self, callback):\n self._observers.append(callback)\n\n def update_weights(self, data):\n self.weights = np.ones(data.shape)\n\n def _valid_indices(self):\n \"\"\"Finds indices in Data.data that are valid for bootstrap sampling\"\"\"\n valid_indices = []\n for i in range(self.data.shape[0]):\n for j in range(1, self.data.shape[1]):\n if re.search('_SD$', self.headers[j]):\n continue\n if np.isfinite(self.data[i, j]):\n valid_indices.append((i, j))\n return valid_indices\n\n def gen_bootstrap_weights(self):\n \"\"\"\n Generates a integer weight for each point in the set of dependent variables. Equivalent\n to sampling with replacement. Weights are used when calculating the objective function\n for bootstrapped data. Used for experimental data sets\n\n :return:\n \"\"\"\n indices = np.array(self._valid_indices())\n samples = indices[np.random.choice(indices.shape[0], size=indices.shape[0], replace=True)]\n self.weights = np.zeros(self.data.shape)\n for s in samples:\n self.weights[s[0], s[1]] += 1\n\n def __getitem__(self, col_header):\n \"\"\"\n Gets a column of data based on its column header\n \n :param col_header: Data column name\n :type col_header: str\n :return: Numpy array corresponding to name\n \"\"\"\n idx = self.cols[col_header]\n return self.data[:, idx]\n\n def __setitem__(self, key, value):\n \"\"\"\n Sets a column of data based on its column header\n :param key: Column name to modify\n :type key: str\n :param value: New column contents\n :type value: np.array\n \"\"\"\n idx = self.cols[key]\n self.data[:, idx] = value\n\n def get_row(self, col_header, value):\n \"\"\"\n Returns the (first) data row in which field col_header is equal to value.\n This should typically be used for col_header as the independent variable.\n\n :param col_header: Data column name\n :type col_header str\n :param value:\n :type value: str\n :return: 1D numpy array consisting of the requested row\n \"\"\"\n\n c_idx = self.cols[col_header]\n rows = self.data[self.data[:, c_idx] == value, :]\n\n if rows.size == 0:\n return None\n\n return rows[0, :]\n\n @staticmethod\n def _to_number(x):\n \"\"\"\n Attempts to convert a string to a float\n\n :param x: str\n :return: float\n \"\"\"\n if re.match('\\b[nN][aA][nN]\\b', x):\n return math.nan\n elif re.match('\\b[iI][nN][fF]\\b', x):\n return math.inf\n elif re.match('\\b-[iI][nN][fF]\\b', x):\n return -math.inf\n else:\n return float(x)\n\n def load_data(self, file_name, sep='\\s+'):\n \"\"\"\n Loads column data from a text file\n\n :param file_name: Name of data file\n :type file_name: str\n :param sep: String that separates columns\n :type sep: str\n :return: None\n \"\"\"\n with open(file_name) as f:\n lines = f.readlines()\n\n self.data = self._read_file_lines(lines, sep, file_name=file_name)\n\n def load_rr_header(self, header):\n \"\"\"\n Loads the header from a RoadRunner NamedArray\n :param header: The colnames attribute of a RoadRunner NamedArray (a list of str)\n \"\"\"\n self.indvar = header[0].strip('[]')\n self.cols = {header[i].strip('[]'): i for i in range(len(header))}\n\n def _read_file_lines(self, lines, sep, file_name=''):\n \"\"\"Helper function that reads lines from BNGL gdat files\"\"\"\n\n header = re.split(sep, lines[0].strip().strip('#').strip())\n # Ignore parentheses added to functions in BNG 2.3, and [] added to species names in COPASI\n header = [h.strip('()[]') for h in header]\n if header[0] == 'Time':\n header[0] = 'time' # Allow either capitalization because Copasi uses capital, BNG uses lowercase\n ncols = len(header)\n self.indvar = header[0]\n\n for c in header:\n l = len(self.cols)\n self.cols[c] = l\n self.headers[l] = c\n\n data = []\n for i, l in enumerate(lines[1:]):\n if re.match('^\\s*$', l) or re.match('\\s*#', l):\n continue\n try:\n num_list = [self._to_number(x) for x in re.split(sep, l.strip())]\n except ValueError as err:\n raise PybnfError('Parsing %s on line %i: %s' % (file_name, i+2, err.args[0]))\n if len(num_list) != ncols:\n raise PybnfError('Parsing %s on line %i: Found %i values, expected %i' %\n (file_name, i+2, len(num_list), ncols))\n data.append(num_list)\n\n return np.array(data)\n\n def _dep_cols(self, idx):\n \"\"\"\n Returns all data columns without independent variable\n\n :param idx: Column index for independent variable (defaults to 0)\n :type idx: int\n :return: Numpy array of observable data\n \"\"\"\n return np.delete(self.data, idx, axis=1)\n\n def _ind_col(self, idx):\n \"\"\"\n Returns data column corresponding to independent variable\n\n :param idx: Column index for independent variable (defaults to 0)\n :type idx: int\n :return: 1-D Numpy array of independent variable values\n \"\"\"\n return self.data[:, idx]\n\n def normalize_to_peak(self, idx=0, cols='all'):\n \"\"\"\n Normalizes all data columns (except the independent variable) to the peak\n value in their respective columns\n\n Updates the data array in this object, returns none.\n\n :param idx: Index of independent variable\n :type idx: int\n :param cols: List of column indices to normalize, or 'all' for all columns but independent variable\n :return: Normalized Numpy array (including independent variable column)\n \"\"\"\n with np.errstate(all='ignore'): # Suppress divide by 0 warnings printed to terminal\n if cols == 'all':\n cols = list(range(self.data.shape[1]))\n cols.remove(idx)\n for c in cols:\n self.data[:, c] = self.data[:, c] / np.max(self.data[:, c])\n\n def normalize_to_init(self, idx=0, cols='all'):\n \"\"\"\n Normalizes all data columns (except the independent variable) to the initial\n value in their respective columns\n\n Updates the data array in this object, returns none.\n\n :param idx: Index of independent variable\n :type idx: int\n :param cols: List of column indices to normalize, or 'all' for all columns but independent variable\n \"\"\"\n with np.errstate(all='ignore'): # Suppress divide by 0 warnings printed to terminal\n if cols == 'all':\n cols = list(range(self.data.shape[1]))\n cols.remove(idx)\n for c in cols:\n self.data[:, c] = self.data[:, c] / self.data[0, c]\n\n def normalize_to_zero(self, idx=0, bc=True, cols='all'):\n \"\"\"\n Normalizes data so that each column's mean is 0\n\n Updates the data array in this object, returns none.\n\n :param idx: Index of independent variable\n :type idx: int\n :param bc: If True, the standard deviation is normalized by 1/(N-1). If False, by 1/N.\n :type bc: bool\n :param cols: List of column indices to normalize, or 'all' for all columns but independent variable\n \"\"\"\n with np.errstate(all='ignore'): # Suppress divide by 0 warnings printed to terminal\n if cols == 'all':\n cols = list(range(self.data.shape[1]))\n cols.remove(idx)\n ddof = 0 if not bc else 1\n for c in cols:\n col = self.data[:, c]\n col -= np.mean(col)\n std = np.std(col, ddof=ddof)\n if std != 0:\n col /= std\n self.data[:, c] = col\n\n def _subtract_baseline(self, idx=0, cols='all'):\n if cols == 'all':\n cols = list(range(self.data.shape[1]))\n cols.remove(idx)\n for c in cols:\n col = self.data[:, c]\n self.data[:, c] = col - self.data[0, c]\n\n def normalize_to_unit_scale(self, idx=0, cols='all'):\n \"\"\"\n Scales data so that the range of values is between (min-init)/(max-init) and 1. If the maximum value is 0\n (i.e. max == init), then the data is scaled by the minimum value after subtracting the initial value\n so that the range of values is between 0 and -1\n\n :param idx: Index of independent variable\n :type idx: int\n :param cols: List of column indices to normalize, or 'all' for all columns but independent variable\n :type: list or str\n :return:\n \"\"\"\n if cols == 'all':\n cols = list(range(self.data.shape[1]))\n cols.remove(idx)\n self._subtract_baseline(idx, cols)\n for c in cols:\n cmax = np.max(self.data[:, c])\n if cmax == 0.0:\n self.data[:, c] = self.data[:, c] / np.abs(np.min(self.data[:, c]))\n else:\n self.data[:, c] = self.data[:, c] / np.max(self.data[:, c])\n\n @staticmethod\n def average(datas):\n \"\"\"\n Calculates the average of several data objects.\n The input Data objects should have the same column labels and independent variable values (NOT CURRENTLY\n CHECKED)\n\n :param datas: Iterable of Data objects of identical size to be averaged\n :return: Data object\n \"\"\"\n output = Data()\n output.cols = datas[0].cols\n output.data = np.mean(np.stack([d.data for d in datas]), axis=0)\n return output\n\n def normalize(self, method):\n \"\"\"\n Normalize the data according to the specified method: 'init', 'peak', 'unit', or 'zero'\n The method could also be a list of ordered pairs [('init', [columns]), ('peak', [columns])], where columns\n is a list of integers or column labels\n\n Updates the data array in this object, returns none.\n \"\"\"\n\n def normalize_once(m, cols):\n if m == 'init':\n self.normalize_to_init(cols=cols)\n elif m == 'peak':\n self.normalize_to_peak(cols=cols)\n elif m == 'zero':\n self.normalize_to_zero(cols=cols)\n elif m == 'unit':\n self.normalize_to_unit_scale(cols=cols)\n else:\n # Should have caught a user-defined invalid setting in config before getting here.\n raise ValueError('Invalid method %s for Data.normalize()' % m)\n\n if type(method) == str:\n normalize_once(method, 'all')\n else:\n for mi, cols_i in method:\n if type(cols_i[0]) == str:\n # Convert to int indices\n cols_i = [self.cols[c] for c in cols_i]\n normalize_once(mi, cols_i)\n\n def weights_to_file(self, file_name):\n logger.info(\"Saving weights in file %s\" % file_name)\n np.savetxt(file_name, self.weights, fmt='%d', header='\\t'.join(sorted(self.cols, key=self.cols.get)))\n" ]
[ [ "numpy.isfinite", "numpy.random.choice", "numpy.min", "numpy.stack", "numpy.ones", "numpy.max", "numpy.delete", "numpy.std", "numpy.mean", "numpy.errstate", "numpy.array", "numpy.zeros" ] ]
fabiommendes/easymunk
[ "420dfc4a006997c47887f6876876249674feb3cd" ]
[ "examples/matplotlib_util_demo.py" ]
[ "import matplotlib.pyplot as plt\n\nimport easymunk.matplotlib\nfrom shapes_for_draw_demos import fill_space\n\nspace = easymunk.Space()\ncaptions = fill_space(space, (1, 1, 0, 1))\n\nfig = plt.figure(figsize=(14, 10))\nax = plt.axes(xlim=(0, 1000), ylim=(0, 700))\nax.set_aspect(\"equal\")\nfor caption in captions:\n x, y = caption[0]\n y = y - 15\n ax.text(x, y, caption[1], fontsize=12)\no = easymunk.matplotlib.DrawOptions(ax)\nspace.debug_draw(o)\nfig.savefig(\"matplotlib_util_demo.png\", bbox_inches=\"tight\")\n" ]
[ [ "matplotlib.pyplot.axes", "matplotlib.pyplot.figure" ] ]
astrojuanlu/ropy
[ "e01948cf7bfbbf8bca16df921b453e19a6a8cc04" ]
[ "tests/transform/test_utils3d.py" ]
[ "import pytest\nimport numpy as np\nfrom scipy.spatial.transform import Rotation as ScipyRotation\n\nimport ropy.transform as rtf\n\n\[email protected](\n \"sequence, angles, degrees\",\n [\n (\"xyz\", (0, 0, np.pi / 4), False),\n (\"xyz\", (0, 0, 45), True),\n (\"xyz\", (0, 0, 90), True),\n (\"XZY\", (45, 180, 90), True),\n (\"XYZ\", (45, 180, 90), False),\n ],\n)\ndef test_EulerRotation(sequence, angles, degrees):\n in_vectors = np.eye(3)\n\n angles = np.asarray(angles)\n\n rot = rtf.EulerRotation(sequence, angles, degrees=degrees)\n scipy_rot = ScipyRotation.from_euler(sequence, angles, degrees)\n\n result = rot.transform(in_vectors)\n expected = scipy_rot.apply(in_vectors)\n\n assert np.allclose(result, expected)\n\n\[email protected](\n \"sequence, quaternion\",\n [\n (\"xyzw\", (0, 1, 0, np.pi)),\n (\"xyzw\", (1, 0, 0, np.pi / 2)),\n (\"xyzw\", (1, 1, 0, np.pi)),\n (\"wxyz\", (np.pi, 0, 1, 0)),\n (\"wxyz\", (np.pi / 2, 1, 1, 1)),\n ],\n)\ndef test_QuaternionRotation(sequence, quaternion):\n in_vectors = np.eye(3)\n\n rot = rtf.QuaternionRotation(quaternion, sequence=sequence)\n\n if sequence == \"xyzw\":\n scipy_rot = ScipyRotation.from_quat(quaternion)\n else:\n quaternion = (*quaternion[1:], quaternion[0])\n scipy_rot = ScipyRotation.from_quat(quaternion)\n\n result = rot.transform(in_vectors)\n expected = scipy_rot.apply(in_vectors)\n\n assert np.allclose(result, expected)\n\n\ndef test_QuaternionRotation_invalid_sequence():\n with pytest.raises(ValueError):\n rtf.QuaternionRotation((0, 0, 0, 1), sequence=\"xwyz\")\n\n\[email protected](\n \"point_in, fov, im_shape, point_out\",\n [\n ((0, 0, 1), np.pi / 3, (240, 320), (120, 160)),\n ((0, 0, 3.5), np.pi / 2, (240, 320), (120, 160)),\n ((0, 0, 1), 1.5 * np.pi, (480, 640), (240, 320)),\n ((480 / 640, 1, 1), np.pi / 2, (480, 640), (480, 640)),\n ((-480 / 640, -1, 1), np.pi / 2, (480, 640), (0, 0)),\n ],\n)\ndef test_perspective_transform(point_in, fov, im_shape, point_out):\n proj = rtf.FrustumProjection(fov, im_shape)\n assert np.allclose(proj.transform(point_in), point_out)\n" ]
[ [ "numpy.allclose", "scipy.spatial.transform.Rotation.from_quat", "numpy.asarray", "numpy.eye", "scipy.spatial.transform.Rotation.from_euler" ] ]
swc-17/Cylinder3D
[ "fffc642b3950a8e80d553aa6de07f0652ce96068" ]
[ "network/segmentator_3d_asymm_spconv.py" ]
[ "# -*- coding:utf-8 -*-\n# author: Xinge\n# @file: segmentator_3d_asymm_spconv.py\n\nimport numpy as np\nimport spconv\nimport torch\nfrom torch import nn\n\n\ndef conv3x3(in_planes, out_planes, stride=1, indice_key=None):\n return spconv.SubMConv3d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False, indice_key=indice_key)\n\n\ndef conv1x3(in_planes, out_planes, stride=1, indice_key=None):\n return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 3), stride=stride,\n padding=(0, 1, 1), bias=False, indice_key=indice_key)\n\n\ndef conv1x1x3(in_planes, out_planes, stride=1, indice_key=None):\n return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 1, 3), stride=stride,\n padding=(0, 0, 1), bias=False, indice_key=indice_key)\n\n\ndef conv1x3x1(in_planes, out_planes, stride=1, indice_key=None):\n return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 1), stride=stride,\n padding=(0, 1, 0), bias=False, indice_key=indice_key)\n\n\ndef conv3x1x1(in_planes, out_planes, stride=1, indice_key=None):\n return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 1), stride=stride,\n padding=(1, 0, 0), bias=False, indice_key=indice_key)\n\n\ndef conv3x1(in_planes, out_planes, stride=1, indice_key=None):\n return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 3), stride=stride,\n padding=(1, 0, 1), bias=False, indice_key=indice_key)\n\n\ndef conv1x1(in_planes, out_planes, stride=1, indice_key=None):\n return spconv.SubMConv3d(in_planes, out_planes, kernel_size=1, stride=stride,\n padding=1, bias=False, indice_key=indice_key)\n\n\nclass ResContextBlock(nn.Module):\n def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):\n super(ResContextBlock, self).__init__()\n self.conv1 = conv1x3(in_filters, out_filters, indice_key=indice_key + \"bef\")\n self.bn0 = nn.BatchNorm1d(out_filters)\n self.act1 = nn.LeakyReLU()\n\n self.conv1_2 = conv3x1(out_filters, out_filters, indice_key=indice_key + \"bef\")\n self.bn0_2 = nn.BatchNorm1d(out_filters)\n self.act1_2 = nn.LeakyReLU()\n\n self.conv2 = conv3x1(in_filters, out_filters, indice_key=indice_key + \"bef\")\n self.act2 = nn.LeakyReLU()\n self.bn1 = nn.BatchNorm1d(out_filters)\n\n self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key + \"bef\")\n self.act3 = nn.LeakyReLU()\n self.bn2 = nn.BatchNorm1d(out_filters)\n\n self.weight_initialization()\n\n def weight_initialization(self):\n for m in self.modules():\n if isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n shortcut = self.conv1(x)\n shortcut.features = self.act1(shortcut.features)\n shortcut.features = self.bn0(shortcut.features)\n\n shortcut = self.conv1_2(shortcut)\n shortcut.features = self.act1_2(shortcut.features)\n shortcut.features = self.bn0_2(shortcut.features)\n\n resA = self.conv2(x)\n resA.features = self.act2(resA.features)\n resA.features = self.bn1(resA.features)\n\n resA = self.conv3(resA)\n resA.features = self.act3(resA.features)\n resA.features = self.bn2(resA.features)\n resA.features = resA.features + shortcut.features\n\n return resA\n\n\nclass ResBlock(nn.Module):\n def __init__(self, in_filters, out_filters, dropout_rate, kernel_size=(3, 3, 3), stride=1,\n pooling=True, drop_out=True, height_pooling=False, indice_key=None):\n super(ResBlock, self).__init__()\n self.pooling = pooling\n self.drop_out = drop_out\n\n self.conv1 = conv3x1(in_filters, out_filters, indice_key=indice_key + \"bef\")\n self.act1 = nn.LeakyReLU()\n self.bn0 = nn.BatchNorm1d(out_filters)\n\n self.conv1_2 = conv1x3(out_filters, out_filters, indice_key=indice_key + \"bef\")\n self.act1_2 = nn.LeakyReLU()\n self.bn0_2 = nn.BatchNorm1d(out_filters)\n\n self.conv2 = conv1x3(in_filters, out_filters, indice_key=indice_key + \"bef\")\n self.act2 = nn.LeakyReLU()\n self.bn1 = nn.BatchNorm1d(out_filters)\n\n self.conv3 = conv3x1(out_filters, out_filters, indice_key=indice_key + \"bef\")\n self.act3 = nn.LeakyReLU()\n self.bn2 = nn.BatchNorm1d(out_filters)\n\n if pooling:\n if height_pooling:\n self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=2,\n padding=1, indice_key=indice_key, bias=False)\n else:\n self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=(2, 2, 1),\n padding=1, indice_key=indice_key, bias=False)\n self.weight_initialization()\n\n def weight_initialization(self):\n for m in self.modules():\n if isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n shortcut = self.conv1(x)\n shortcut.features = self.act1(shortcut.features)\n shortcut.features = self.bn0(shortcut.features)\n\n shortcut = self.conv1_2(shortcut)\n shortcut.features = self.act1_2(shortcut.features)\n shortcut.features = self.bn0_2(shortcut.features)\n\n resA = self.conv2(x)\n resA.features = self.act2(resA.features)\n resA.features = self.bn1(resA.features)\n\n resA = self.conv3(resA)\n resA.features = self.act3(resA.features)\n resA.features = self.bn2(resA.features)\n\n resA.features = resA.features + shortcut.features\n\n if self.pooling:\n resB = self.pool(resA)\n return resB, resA\n else:\n return resA\n\n\nclass UpBlock(nn.Module):\n def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), indice_key=None, up_key=None):\n super(UpBlock, self).__init__()\n # self.drop_out = drop_out\n self.trans_dilao = conv3x3(in_filters, out_filters, indice_key=indice_key + \"new_up\")\n self.trans_act = nn.LeakyReLU()\n self.trans_bn = nn.BatchNorm1d(out_filters)\n\n self.conv1 = conv1x3(out_filters, out_filters, indice_key=indice_key)\n self.act1 = nn.LeakyReLU()\n self.bn1 = nn.BatchNorm1d(out_filters)\n\n self.conv2 = conv3x1(out_filters, out_filters, indice_key=indice_key)\n self.act2 = nn.LeakyReLU()\n self.bn2 = nn.BatchNorm1d(out_filters)\n\n self.conv3 = conv3x3(out_filters, out_filters, indice_key=indice_key)\n self.act3 = nn.LeakyReLU()\n self.bn3 = nn.BatchNorm1d(out_filters)\n # self.dropout3 = nn.Dropout3d(p=dropout_rate)\n\n self.up_subm = spconv.SparseInverseConv3d(out_filters, out_filters, kernel_size=3, indice_key=up_key,\n bias=False)\n\n self.weight_initialization()\n\n def weight_initialization(self):\n for m in self.modules():\n if isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x, skip):\n upA = self.trans_dilao(x)\n upA.features = self.trans_act(upA.features)\n upA.features = self.trans_bn(upA.features)\n\n ## upsample\n upA = self.up_subm(upA)\n\n upA.features = upA.features + skip.features\n\n upE = self.conv1(upA)\n upE.features = self.act1(upE.features)\n upE.features = self.bn1(upE.features)\n\n upE = self.conv2(upE)\n upE.features = self.act2(upE.features)\n upE.features = self.bn2(upE.features)\n\n upE = self.conv3(upE)\n upE.features = self.act3(upE.features)\n upE.features = self.bn3(upE.features)\n\n return upE\n\n\nclass ReconBlock(nn.Module):\n def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):\n super(ReconBlock, self).__init__()\n self.conv1 = conv3x1x1(in_filters, out_filters, indice_key=indice_key + \"bef\")\n self.bn0 = nn.BatchNorm1d(out_filters)\n self.act1 = nn.Sigmoid()\n\n self.conv1_2 = conv1x3x1(in_filters, out_filters, indice_key=indice_key + \"bef\")\n self.bn0_2 = nn.BatchNorm1d(out_filters)\n self.act1_2 = nn.Sigmoid()\n\n self.conv1_3 = conv1x1x3(in_filters, out_filters, indice_key=indice_key + \"bef\")\n self.bn0_3 = nn.BatchNorm1d(out_filters)\n self.act1_3 = nn.Sigmoid()\n\n def forward(self, x):\n shortcut = self.conv1(x)\n shortcut.features = self.bn0(shortcut.features)\n shortcut.features = self.act1(shortcut.features)\n\n shortcut2 = self.conv1_2(x)\n shortcut2.features = self.bn0_2(shortcut2.features)\n shortcut2.features = self.act1_2(shortcut2.features)\n\n shortcut3 = self.conv1_3(x)\n shortcut3.features = self.bn0_3(shortcut3.features)\n shortcut3.features = self.act1_3(shortcut3.features)\n shortcut.features = shortcut.features + shortcut2.features + shortcut3.features\n\n shortcut.features = shortcut.features * x.features\n\n return shortcut\n\n\nclass Asymm_3d_spconv(nn.Module):\n def __init__(self,\n output_shape,\n use_norm=True,\n num_input_features=128,\n nclasses=20, n_height=32, strict=False, init_size=16):\n super(Asymm_3d_spconv, self).__init__()\n self.nclasses = nclasses\n self.nheight = n_height\n self.strict = False\n\n sparse_shape = np.array(output_shape)\n # sparse_shape[0] = 11\n print(sparse_shape)\n self.sparse_shape = sparse_shape\n\n self.downCntx = ResContextBlock(num_input_features, init_size, indice_key=\"pre\")\n self.resBlock2 = ResBlock(init_size, 2 * init_size, 0.2, height_pooling=True, indice_key=\"down2\")\n self.resBlock3 = ResBlock(2 * init_size, 4 * init_size, 0.2, height_pooling=True, indice_key=\"down3\")\n self.resBlock4 = ResBlock(4 * init_size, 8 * init_size, 0.2, pooling=True, height_pooling=False,\n indice_key=\"down4\")\n self.resBlock5 = ResBlock(8 * init_size, 16 * init_size, 0.2, pooling=True, height_pooling=False,\n indice_key=\"down5\")\n\n self.upBlock0 = UpBlock(16 * init_size, 16 * init_size, indice_key=\"up0\", up_key=\"down5\")\n self.upBlock1 = UpBlock(16 * init_size, 8 * init_size, indice_key=\"up1\", up_key=\"down4\")\n self.upBlock2 = UpBlock(8 * init_size, 4 * init_size, indice_key=\"up2\", up_key=\"down3\")\n self.upBlock3 = UpBlock(4 * init_size, 2 * init_size, indice_key=\"up3\", up_key=\"down2\")\n\n self.ReconNet = ReconBlock(2 * init_size, 2 * init_size, indice_key=\"recon\")\n\n self.logits = spconv.SubMConv3d(4 * init_size, nclasses, indice_key=\"logit\", kernel_size=3, stride=1, padding=1,\n bias=True)\n\n def forward(self, voxel_features, coors, batch_size):\n # x = x.contiguous()\n coors = coors.int()\n # import pdb\n # pdb.set_trace()\n ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size)\n ret = self.downCntx(ret)\n down1c, down1b = self.resBlock2(ret)\n down2c, down2b = self.resBlock3(down1c)\n down3c, down3b = self.resBlock4(down2c)\n down4c, down4b = self.resBlock5(down3c)\n\n up4e = self.upBlock0(down4c, down4b)\n up3e = self.upBlock1(up4e, down3b)\n up2e = self.upBlock2(up3e, down2b)\n up1e = self.upBlock3(up2e, down1b)\n\n up0e = self.ReconNet(up1e)\n\n up0e.features = torch.cat((up0e.features, up1e.features), 1)\n\n logits = self.logits(up0e)\n y = logits.dense()\n return y\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.cat", "torch.nn.init.constant_", "torch.nn.Sigmoid", "torch.nn.LeakyReLU", "numpy.array" ] ]
mushrifshahreyar/NoC-Routing
[ "c744968697d10bf096368d117c4de71d5a62643c" ]
[ "DQN_Hops_rewards.py" ]
[ "import numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import TensorBoard\nfrom collections import deque\nimport time\nimport random\nimport os\nimport sys\nimport pickle\nfrom os import path\n\nREPLAYMEM = \"replay.dat\"\nVARIABLES = \"variable.dat\"\nNACTIONS = 4\nGRIDSIZE = 8\nNROUTERS = GRIDSIZE * GRIDSIZE\nNSTATES = 2 * NROUTERS # Number of states in case of one-hot encoding\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' \n\nLEARNINGRATE = 0.01\nDISCOUNT = 0.9\nEPSILON = 0.99\nEPSILON_DECAY = 0.9992\nMIN_EPSILON = 0.01\nREWARD_FACTOR = 1000000\nDISTANCE_FACTOR = 500\n\n\nREPLAY_MEMORY_SIZE = 2000 # How many last steps to keep for model training\nMIN_REPLAY_MEMORY_SIZE = 64 # Minimum number of steps in a memory to start training\nMINIBATCH_SIZE = 32 # How many steps (samples) to use for training\nUPDATE_TARGET_EVERY = 64 # Terminal states (end of episodes)\n\n# Try using ordinal encoding as there is relationship between inputs\ndef oneHotEncode(my_id, dest_id):\n result = []\n my_id_vec = [0] * NROUTERS\n dest_id_vec = [0] * NROUTERS\n my_id_vec[my_id] = 1\n dest_id_vec[dest_id] = 1\n result.extend(my_id_vec)\n result.extend(dest_id_vec)\n return np.array(result)\n\n\ndef vectorize(my_id, dest_id, free_vcs, future_dist, past_dist):\n result = [my_id, dest_id]\n result.extend(free_vcs)\n result.append(future_dist)\n result.append(past_dist)\n return np.array(result)\n\ndef getFutureDistance(my_id, dest_id):\n my_x = my_id % GRIDSIZE\n my_y = my_id // GRIDSIZE\n dest_x = dest_id % GRIDSIZE\n dest_y = dest_id // GRIDSIZE\n dist = abs(my_x - dest_x) + abs(my_y - dest_y)\n return dist\n\n\ndef initialize():\n # Main model\n model = create_model()\n\n # Target network\n target_model = create_model()\n target_model.set_weights(model.get_weights())\n\n # An array with last n steps for training\n replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)\n\n # Used to count when to update target network with main network's weights\n target_update_counter = 0\n\n return model, target_model, replay_memory, target_update_counter\n\ndef create_model():\n model = Sequential()\n\n #model.add(tf.keras.Input(shape = (NSTATES, )))\n model.add(tf.keras.Input(shape = (128, )))\n model.add(Dense(256, activation = 'relu'))\n # model.add(Dropout(0.2))\n\n model.add(Dense(128, activation = 'relu'))\n # model.add(Dropout(0.2))\n model.add(Dense(64, activation = 'relu'))\n # model.add(Dropout(0.2))\n model.add(Dense(NACTIONS, activation='relu'))\n\n model.compile(loss=\"mse\", optimizer=Adam(lr = LEARNINGRATE), metrics=['accuracy'])\n return model\n\n\n# Adds step's data to a memory replay array\n# (observation space, action, reward, new observation space, done)\ndef update_replay_memory(replay_memory, my_id, dest_id, prev_router_id, prev_action, queueing_delay, done, is_dead, free_vcs, past_dist):\n\n transition = [my_id, dest_id, prev_router_id, prev_action, queueing_delay, done, is_dead, free_vcs, past_dist]\n replay_memory.append(transition)\n\n return replay_memory\n\n\n# Trains main network every step during episode\ndef train(model, target_model, replay_memory, target_update_counter):\n\n # Start training only if certain number of samples is already saved\n if len(replay_memory) < MIN_REPLAY_MEMORY_SIZE:\n return model, target_model, replay_memory, target_update_counter\n\n # Get a minibatch of random samples from memory replay table\n minibatch = random.sample(replay_memory, MINIBATCH_SIZE)\n\n # Get current states from minibatch, then query NN model for Q values\n #current_states = np.array([oneHotEncode(transition[2], transition[1]) for transition in minibatch])\n current_states = np.array([oneHotEncode(transition[2], transition[1]) for transition in minibatch])\n current_qs_list = model.predict(current_states)\n #print('Transitino[7]: ', transition[7])\n #current_dead_states = np.array([oneHotEncode(transition[0], transition[1]) for transition in minibatch])\n current_dead_states = np.array([oneHotEncode(transition[0], transition[1]) for transition in minibatch])\n current_qs_dead_list = model.predict(current_dead_states) \n\n # Get future states from minibatch, then query NN model for Q values\n # When using target network, query it, otherwise main network should be queried\n #new_current_states = np.array([oneHotEncode(transition[0], transition[1]) for transition in minibatch])\n new_current_states = np.array([oneHotEncode(transition[0], transition[1]) for transition in minibatch])\n future_qs_list = target_model.predict(new_current_states)\n\n X = []\n y = []\n\n # Now we need to enumerate our batches\n # reward here is queuing delay\n for index, (my_id, dest_id, prev_router_id, prev_action, reward, done, is_dead, free_vcs, past_dist) in enumerate(minibatch):\n\n if not is_dead:\n if not done:\n max_future_q = np.max(future_qs_list[index])\n new_q = (REWARD_FACTOR / (reward + 1)) + (DISTANCE_FACTOR / (getFutureDistance(prev_router_id, dest_id) + 1)) + (DISTANCE_FACTOR / (past_dist + 1)) + (DISCOUNT * max_future_q)\n else:\n new_q = (REWARD_FACTOR / reward) + 5000 \n\n\n # Update Q value for given state\n current_qs = current_qs_list[index]\n current_qs[prev_action] = new_q\n\n # And append to our training data\n #X.append(oneHotEncode(prev_router_id, dest_id))\n X.append(oneHotEncode(prev_router_id, dest_id))\n y.append(current_qs)\n else:\n new_q = -1000\n\n current_qs = current_qs_dead_list[index]\n current_qs[prev_action] = new_q\n\n # And append to our training data\n #X.append(oneHotEncode(my_id, dest_id))\n X.append(oneHotEncode(my_id, dest_id))\n y.append(current_qs)\n\n\n # Fit on all samples as one batch, log only on terminal state\n model.fit(np.array(X), np.array(y), batch_size = MINIBATCH_SIZE, verbose=0)\n\n if done:\n target_update_counter += 1\n\n # If counter reaches set value, update target network with weights of main network\n if target_update_counter > UPDATE_TARGET_EVERY:\n target_model.set_weights(model.get_weights())\n target_update_counter = 0\n \n return model, target_model, replay_memory, target_update_counter\n\n\n\n# Queries main network for Q values given current observation space (environment state)\ndef get_qs(model, my_id, dest_id, free_vcs, past_dist):\n\n #state = oneHotEncode(my_id, dest_id)\n #print('Size:', len(free_vcs))\n state = oneHotEncode(my_id, dest_id)\n #actions = model.predict(np.array(state).reshape(-1, NSTATES))\n actions = model.predict(np.array(state).reshape(-1, 128))\n optimal_action = np.argmax(actions)\n \n return model, optimal_action\n\n\nprint(\"\\nSTARTING DQN PYTHON EXECUTION!\")\n\n\nif __name__ == \"__main__\":\n\n reset_variables = sys.argv[1] if len(sys.argv) > 1 else 0\n iterations = sys.argv[2] if len(sys.argv) > 2 else 0\n\n m = None\n tm = None\n rm = None\n tuc = None\n\n if reset_variables:\n m, tm, rm, tuc = initialize()\n else:\n m = tf.keras.models.load_model('./saved_model')\n tm = tf.keras.models.load_model('./saved_target_model')\n with open('REPLAYMEM', 'rb') as f:\n rm = pickle.load(f)\n with open('VARIABLES', 'rb') as f:\n tuc = pickle.load(f)\n\n while(1):\n while(1):\n if(path.exists(\"input.txt\")):\n break\n\n my_id = -1\n dest_id = 0\n prev_router_id = 0\n prev_action = 0\n queueing_delay = 0\n free_vcs = []\n past_dist = 0\n\n while(True):\n with open('input.txt','r') as f:\n lines = f.readlines()\n if(len(lines) == 12):\n break\n\n my_id = int(lines[0])\n src_id = int(lines[1])\n dest_id = int(lines[2])\n prev_router_id = int(lines[3])\n prev_action = int(lines[4])\n queueing_delay = int(lines[5])\n cur_tick = int(lines[6])\n free_vcs.append(int(lines[7]))\n free_vcs.append(int(lines[8])) \n free_vcs.append(int(lines[9])) \n free_vcs.append(int(lines[10])) \n past_dist = int(lines[11])\n\n os.remove(\"input.txt\")\n # print('Free vcss:', free_vcs)\n iterations += 1\n print('Iteration: ', iterations, ' Epsilon:', EPSILON)\n\n done = 0\n if(my_id == dest_id):\n done = 1\n\n my_x = my_id % GRIDSIZE\n my_y = my_id // GRIDSIZE\n\n is_dead = False\n generate_model_action = np.random.random() > EPSILON\n if generate_model_action:\n m, action = get_qs(m, my_id, dest_id, free_vcs, past_dist)\n print(\"Action taken: \",action)\n if(action == 0 and my_y == GRIDSIZE-1):\n is_dead = True\n elif(action == 1 and my_x == GRIDSIZE-1):\n is_dead = True\n elif(action == 2 and my_y == 0):\n is_dead = True\n elif(action == 3 and my_x == 0):\n is_dead = True\n \n rm = update_replay_memory(rm, my_id, dest_id, prev_router_id, action, queueing_delay, done, is_dead, free_vcs, past_dist)\n\n \n if is_dead or not generate_model_action:\n is_dead = False\n action = np.random.randint(0, NACTIONS)\n while True:\n if(action == 0 and my_y < GRIDSIZE-1):\n break\n elif(action == 1 and my_x < GRIDSIZE-1):\n break\n elif(action == 2 and my_y>0):\n break\n elif(action == 3 and my_x>0):\n break\n elif(my_id == dest_id):\n break\n else:\n action = np.random.randint(0, NACTIONS)\n\n f = open(\"action.txt\",\"w\")\n f.write(str(action))\n f.close()\n\n\n if my_id != src_id:\n rm = update_replay_memory(rm, my_id, dest_id, prev_router_id, prev_action, queueing_delay, done, is_dead, free_vcs, past_dist)\n\n if my_id == dest_id:\n m, tm, rm, tuc = train(m, tm, rm, tuc)\n\n if iterations % 50 == 0:\n if EPSILON > MIN_EPSILON:\n EPSILON *= EPSILON_DECAY\n EPSILON = max(MIN_EPSILON, EPSILON)\n if iterations == 200000:\n EPSILON = MIN_EPSILON\n\n\n if cur_tick == 100000:\n m.save('./saved_model')\n tm.save('./saved_target_model')\n with open('REPLAYMEM', 'wb') as f:\n pickle.dump(rm, f)\n with open('VARIABLES', 'wb') as f:\n pickle.dump(tuc, f)\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.random.random", "tensorflow.keras.Input", "tensorflow.keras.layers.Dense", "numpy.max", "numpy.argmax", "tensorflow.keras.optimizers.Adam", "numpy.array", "tensorflow.keras.models.Sequential", "numpy.random.randint" ] ]
cdknorow/modelstore
[ "f08839478432b89e828a8dcb41adf27b0e3aa66b" ]
[ "tests/models/test_pytorch_lightning.py" ]
[ "# Copyright 2020 Neal Lathia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\nimport pytest\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn.functional as F\nfrom modelstore.models.pytorch_lightning import (\n MODEL_CHECKPOINT,\n PyTorchLightningManager,\n _save_lightning_model,\n)\nfrom torch import nn\nfrom torch.utils.data import DataLoader, TensorDataset\n\n# pylint: disable=protected-access\n# pylint: disable=redefined-outer-name\n# pylint: disable=missing-class-docstring\n\n\nclass ExampleLightningNet(pl.LightningModule):\n def __init__(self):\n super(ExampleLightningNet, self).__init__()\n self.linear = nn.Linear(10, 1)\n\n def forward(self, x):\n return self.linear(x)\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self.linear(x)\n train_loss = F.mse_loss(y_hat, y)\n return train_loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self.linear(x)\n val_loss = F.mse_loss(y_hat, y)\n return val_loss\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters())\n return [optimizer], []\n\n\[email protected]\ndef lightning_model():\n return ExampleLightningNet()\n\n\[email protected]\ndef train_loader():\n x = torch.rand(20, 10)\n y = torch.rand(20).view(-1, 1)\n data_set = TensorDataset(x, y)\n return DataLoader(data_set, num_workers=0)\n\n\[email protected]\ndef val_loader():\n x = torch.rand(2, 10)\n y = torch.rand(2).view(-1, 1)\n data_set = TensorDataset(x, y)\n return DataLoader(data_set, num_workers=0)\n\n\[email protected]\ndef lightning_trainer(tmp_path, lightning_model, train_loader, val_loader):\n trainer = pl.Trainer(max_epochs=5, default_root_dir=tmp_path)\n trainer.fit(lightning_model, train_loader, val_loader)\n return trainer\n\n\[email protected]\ndef lightning_manager():\n return PyTorchLightningManager()\n\n\ndef assert_models_equal(\n model_a: pl.LightningModule, module_b: pl.LightningModule\n):\n for a_params, lb_params in zip(model_a.parameters(), module_b.parameters()):\n assert a_params.data.ne(lb_params.data).sum() == 0\n\n\ndef test_model_info(lightning_manager, lightning_model):\n exp = {\"library\": \"pytorch_lightning\", \"type\": \"ExampleLightningNet\"}\n res = lightning_manager._model_info(model=lightning_model)\n assert exp == res\n\n\[email protected](\n \"ml_library,should_match\",\n [\n (\"pytorch_lightning\", True),\n (\"sklearn\", False),\n ],\n)\ndef test_is_same_library(lightning_manager, ml_library, should_match):\n assert (\n lightning_manager._is_same_library({\"library\": ml_library})\n == should_match\n )\n\n\ndef test_model_data(lightning_manager, lightning_model):\n exp = {}\n res = lightning_manager._model_data(model=lightning_model)\n assert exp == res\n\n\ndef test_required_kwargs(lightning_manager):\n assert lightning_manager._required_kwargs() == [\"trainer\", \"model\"]\n\n\ndef test_matches_with(lightning_manager, lightning_trainer, lightning_model):\n assert lightning_manager.matches_with(\n trainer=lightning_trainer, model=lightning_model\n )\n assert not lightning_manager.matches_with(model=\"a-string-value\")\n assert not lightning_manager.matches_with(classifier=lightning_trainer)\n\n\ndef test_get_functions(lightning_manager, lightning_model, lightning_trainer):\n assert (\n len(\n lightning_manager._get_functions(\n trainer=lightning_trainer, model=lightning_model\n )\n )\n == 1\n )\n\n\ndef test_get_params(lightning_manager):\n exp = {}\n res = lightning_manager._get_params()\n assert exp == res\n\n\ndef test_save_model(tmp_path, lightning_model, lightning_trainer):\n exp = os.path.join(tmp_path, \"checkpoint.pt\")\n file_path = _save_lightning_model(tmp_path, lightning_trainer)\n assert exp == file_path\n\n loaded_model = ExampleLightningNet.load_from_checkpoint(file_path)\n assert_models_equal(lightning_model, loaded_model)\n\n\ndef test_load_model(tmp_path, lightning_manager, lightning_trainer):\n # Save the model to a tmp directory\n file_path = os.path.join(tmp_path, MODEL_CHECKPOINT)\n lightning_trainer.save_checkpoint(file_path)\n\n #  Load the model\n loaded_model = lightning_manager.load(\n tmp_path,\n {\n \"model\": {\n \"model_type\": {\n \"type\": \"ExampleLightningNet\",\n }\n }\n },\n )\n\n # Expect the two to be the same\n assert_models_equal(lightning_trainer.model, loaded_model)\n" ]
[ [ "torch.utils.data.TensorDataset", "torch.utils.data.DataLoader", "torch.nn.functional.mse_loss", "torch.nn.Linear", "torch.rand" ] ]
ShivangMathur1/Small-Pytorch-Projects
[ "aebc6e81103fe2a6830caeedc1b17227e211a6e5" ]
[ "MNIST/train.py" ]
[ "import torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import transforms, datasets\n\n# Neural Network\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n\n #layers: \n self.fc1 = nn.Linear(28*28, 64)\n self.fc2 = nn.Linear(64, 64)\n self.fc3 = nn.Linear(64, 64)\n self.fc4 = nn.Linear(64, 10)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = self.fc4(x)\n\n return F.log_softmax(x, dim=1)\n\n\nif __name__ == '__main__':\n\n # Load dataset to train on\n train = datasets.MNIST('', train=True, download=True, transform=transforms.Compose([transforms.ToTensor()]))\n trainset = torch.utils.data.DataLoader(train, batch_size=10, shuffle=True)\n\n # Hyperparameters\n epochs = 3\n lr = 0.001\n\n # Declaring model, optimizer\n net = Net()\n optimizer = optim.Adam(net.parameters(), lr=lr)\n\n # Learning\n for epoch in range(epochs):\n for data in trainset:\n # Data is a batch of (feature, label)\n x, y = data\n net.zero_grad()\n \n # Forward, backward, optimiztion\n output = net(x.view(-1, 28*28))\n loss = F.nll_loss(output, y)\n loss.backward()\n optimizer.step()\n\n # Save model\n torch.save(net.state_dict(), \"./mnist_net.pth\")\n print(\"Model Saved\")" ]
[ [ "torch.nn.Linear", "torch.nn.functional.nll_loss", "torch.utils.data.DataLoader", "torch.nn.functional.log_softmax" ] ]
emilydolson/interpreting_the_tape_of_life
[ "48fd46f3cd2a7f07ce4ea1fd6fa120b520e34925" ]
[ "analysis/cec_python_library/extract_lineage_fitness.py" ]
[ "#!/home/emily/anaconda2/bin/python\n###############################################################################\n# Version: 1.1\n# Last modified on: 3 April, 2016 \n# Developers: Michael G. Epitropakis\n# email: m_(DOT)_epitropakis_(AT)_lancaster_(DOT)_ac_(DOT)_uk \n###############################################################################\nfrom cec2013.cec2013 import *\nimport numpy as np\nimport sys\nimport pandas as pd\n\ndef main():\n\n\tproblem = int(sys.argv[1])\n\tfilename = sys.argv[2]\n\n\t# Create function\n\tf = CEC2013(problem)\n\tassert(f.get_dimension() == 2)\n\n\tdf = pd.read_csv(filename)\n\tdf.rename(str.strip, axis=\"columns\", inplace=True)\n\t# print(df.columns.values)\n\tgenotypes = [i.strip(\"[] \").split() for i in df[\"info\"]]\n\tgenotypes = map(lambda x: [float(x[0]), float(x[1])], genotypes)\n\tdf[\"fitness\"] = [f.evaluate(np.array(x)) for x in genotypes]\n\tgenotypes = zip(*genotypes)\n\n\tdf[\"x\"] = genotypes[0]\n\tdf[\"y\"] = genotypes[1]\n\n\tprint(df)\n\t# Evaluate :-)\n\t# x = np.ones(2)\n\t# value = f.evaluate(x)\n\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n" ]
[ [ "numpy.array", "pandas.read_csv" ] ]
JackWBoynton/ecg-augmentation
[ "7590c510231e387d77908e2e2b418f83cb2ae621" ]
[ "ecgaugmentation/augment.py" ]
[ "from scipy import signal\nfrom scipy import stats\nimport numpy as np\nfrom pylab import rcParams\nfrom glob import glob\nimport random as rd\nimport sys\nimport math\nimport matplotlib.pyplot as plt\n\nNUM_PEAKS = 5\n\nVARIANCE_MIN = 0.45\nVARIANCE_MAX = 0.5 # for smoothing\n\nAUGMENT_MAX = 1.5 # max peak multiplier\nAUGMENT_MIN = 0.5\n\nAUGMENT_STEP = 0.01 # increase to get less \"random\" segments\n\nSIMILAR_MAX = 20 # +- 20 samples to search for similar peak across different channels\n\nSMOOTHING_FUNCTION = lambda x,y,z: z*(((1/((y)*math.sqrt(2*math.pi)))*math.e**-(x**2/(2*(y**2))))) #x data, y variance, z sign\n\nCHANNELS = list(range(12))\n\nclass Augment:\n def __init__(self, annotations=None, data=None, current_ind=0, path=None, use_path=True, anomaly_type=None):\n self.annotation_mapping = np.load(\"definitions.npy\", allow_pickle=True) # what annotations mean\n if use_path and path is not None and current_ind == 0 and data is None and annotations is None and anomaly_type is not None:\n self._load_all(path, anomaly_type)\n elif data is not None and annotations is not None and current_ind is not None and anomaly_type is not None:\n self._load_ind(data, annotations, current_ind, anomaly_type)\n else:\n print(\"check params, either give a path name or an index\")\n sys.exit(0)\n\n def augment_random(self, anomaly_type: str) -> tuple:\n \"\"\"augments a random index from path data\n\n Args:\n anomaly_type (str): anomaly type to augment\n\n Returns:\n tuple: \n \"\"\"\n assert self.type == \"path\"\n\n self.data = np.array(rd.choice(self.data_)) # self.data_ contains tons of segments from all patients... therefore need singular patient\n self.baseline = [self.find_baseline(channel) for channel in CHANNELS]\n self.init_peaks_indices = [self.find_peaks(channel) for channel in CHANNELS]\n self.init_peaks_seg = [[self.find_whole_peak(peak=i, channel=channel, ind=None)[1] for i in self.init_peaks_indices[channel]] for channel in CHANNELS] # get segment values for each of the found peaks for channel CH\n self.init_peaks_ranges = [[self.find_whole_peak(peak=i, channel=channel)[0] for i in self.init_peaks_indices[channel]] for channel in CHANNELS] # get segment values for each of the found peaks for channel CH\n self.get_similar_timed()\n return self.run_augment(anomaly_type)\n\n def augment_ind(self, ind: int, anomaly_type: str) -> tuple:\n \"\"\"augments and returns a singular index of the data\n\n Args:\n ind (int): data index to augment\n anomaly_type (str): anomaly type to augment\n\n Returns:\n tuple:\n \"\"\"\n assert self.type == 'ind'\n\n return self.run_augment(anomaly_type, ind=ind)\n\n\n def _load_ind(self, data: np.ndarray, annotations: np.ndarray, current_ind: int, anomaly_type: str) -> None:\n \"\"\"load data from a given index only\n\n Args:\n data (np.ndarray): ECG data\n annotations (np.ndarray): numpy array of the annotations that correspond to the current index\n current_ind (int): the current index to load\n anomaly_type (str): anomaly type to load\n \"\"\"\n self.type = 'ind'\n self.data = data\n self.annotations = annotations\n self.curr = current_ind\n self.init_peaks_indices = [self.find_peaks(channel,ind=current_ind) for channel in CHANNELS]\n self.init_peaks_seg = [[self.find_whole_peak(peak=i, channel=channel, ind=current_ind)[1] for i in self.init_peaks_indices[channel]] for channel in CHANNELS] # get segment values for each of the found peaks for channel CH\n self.init_peaks_ranges = [[self.find_whole_peak(peak=i, channel=channel, ind=current_ind)[0] for i in self.init_peaks_indices[channel]] for channel in CHANNELS] # get segment values for each of the found peaks for channel CH\n self.baseline = [self.find_baseline(channel, ind=current_ind) for channel in CHANNELS]\n \n \n def _load_all(self, path: str, anomaly_type: str) -> None:\n \"\"\"load all ECG data from path\n\n Args:\n path (str): path to load .npy files from\n anomaly_type (str): anomaly type to load\n \"\"\"\n self.type = 'path'\n self.data_ = []\n filenames = glob(f\"{path}/*-fecg.npy\")\n #print(self.annotation_mapping)\n data, data_ann, order = [], [], []\n for filename in filenames:\n #print(filename)\n tmp = np.load(filename)\n annotations = (np.load(filename.split(\"-\")[0] + \"-ann.npy\"))\n for n,i in enumerate(tmp):\n if annotations[n] != 1 and self.annotation_mapping[annotations[n][0]][0] == anomaly_type:\n data.append(i) # dont care about the index anymore just need raw data that is of type anomaly_type\n self.data_ = np.array(data)\n assert len(self.data_) != 0\n \n\n def find_baseline(self, channel: int, ind: int=None) -> list:\n \"\"\"finds the baseline of a signal at channel\n\n Args:\n channel (int): channel of data to find baseline of\n ind (int, optional): index of data to look at. Defaults to None.\n\n Returns:\n list: list of baseline the same length of the input data segment for comparison\n \"\"\"\n if ind is None:\n nans = list(filter(lambda x: np.isnan(x), self.data[:,channel]))\n return [np.nanmean(self.data[:,channel])]*(len(self.data[:,channel])-len(nans))\n else:\n nans = list(filter(lambda x: np.isnan(x), self.data[ind][:,channel]))\n return [np.nanmean(self.data[ind][:,channel])]*(len(self.data[ind][:,channel])-len(nans))\n\n def get_anomalous_types(self) -> list:\n \"\"\"returns the list of anomalies in the current dataset\n\n Returns:\n [type]: list of anomaly types\n \"\"\"\n types = []\n for n,i in enumerate(self.data):\n if self.annotations[n] != 1:\n types.append((self.annotation_mapping[self.annotations[n][0]][0], n)) \n return (types)\n\n def get_anomalous_type(self, ind: int) -> str:\n \"\"\"returns the anomalous type at index ind\n\n Args:\n ind (int): index in dataset\n\n Returns:\n str: anomalous type\n \"\"\"\n return self.annotation_mapping[self.annotations[ind][0]][0]\n\n def get_similar_timed(self) -> None:\n \"\"\"tries to find peaks in different channels that are at about the same \"time\"\n \"\"\"\n self.similars = [[[]]for _ in range(NUM_PEAKS)]\n comp = [[[]for channel in range(len(CHANNELS))]for _ in range(NUM_PEAKS)]\n \n for n,x in enumerate(self.init_peaks_indices): # 12\n for peak in range(len(x)): # 5\n comp[peak][n] = x[peak]\n\n \n for m,x in enumerate(comp): # for each peak detected\n compare = x[0] # compare against the first channel lead for each peak that is detected\n self.similars[m] = [[0,compare]] # set initial\n for channel in range(1,len(CHANNELS[1:])+1):\n for stretch in range(0,SIMILAR_MAX):\n if x[channel] + stretch == compare or x[channel] - stretch == compare:\n # similar peak to compare\n if self.similars[m] == [[]]:\n self.similars[m] = [[channel,x[channel]]]\n else:\n self.similars[m].append([channel,x[channel]])\n break\n \n #[[[0, 80], [2, 79], [4, 90], [5, 65], [6, 66], [7, 72], [8, 78], [11, 81]], [[0, 90], [2, 90], [3, 79], [4, 96], [5, 79], [6, 82], [7, 85], [8, 88], [9, 79], [10, 80], [11, 90]], [[0, 110], [2, 96], [4, 110], [7, 96], [8, 95], [11, 96]], [[0, 138], [2, 138], [4, 138], [11, 138]], [[0, 143], [2, 142], [3, 134], [4, 143], [5, 138], [6, 138], [7, 142], [8, 138], [9, 138], [10, 138], [11, 162]]]\n \n def _flip(self, data: np.ndarray, sign: int) -> list:\n \"\"\"flip data about the baseline instead of about 0 (makes peak detection for both positive and negative peaks more reliable)\n\n Args:\n data (np.ndarray): in np.ndarray to flip\n sign (int): sign to flip about\n\n Returns:\n list: flipped data\n \"\"\"\n data = np.copy(data)\n out = []\n for x in data:\n if sign == -1:\n if x > np.nanmean(data):\n #out.append(np.nanmean(data))\n out.append(x)\n elif x < np.nanmean(data):\n out.append(np.nanmean(data)+(np.nanmean(data)-x))\n else:\n out.append(x)\n else:\n if x < np.nanmean(data):\n #out.append(np.nanmean(data))\n out.append(np.nan)\n elif x > np.nanmean(data):\n out.append(np.nanmean(data)-(np.nanmean(data)-x))\n else:\n out.append(x)\n assert len(out) == len(data)\n return out\n \n def find_peaks(self, channel: int, ind: int=None, override: int=0) -> list:\n \"\"\"find the peaks of an ECG segment using scipy find_peaks\n\n Args:\n channel (int): channel to search for peaks in\n ind (int, optional): index of data. Defaults to None.\n override (int, optional): [description]. Defaults to 0.\n\n Returns:\n list: [description]\n \"\"\"\n data = self._flip(self.data[ind][:,channel],-1) if ind is not None else self._flip(self.data[:,channel],-1)\n \n peaks, _ = signal.find_peaks(data, height=np.nanmean(self.data[ind][:,channel])+0.01-override) if ind is not None else signal.find_peaks(data, height=np.nanmean(self.data[:,channel])+0.01-override)\n #plt.scatter(peaks, self.data[ind][:,channel][peaks])\n #plt.show()\n proms = signal.peak_prominences(data, peaks=peaks)[0]\n collect = list(zip(proms,peaks))\n collect = sorted(collect, key=lambda x: x[0])\n collect = [c[1] for c in collect]\n collect_indices = collect\n start = 5\n if len(collect_indices) == NUM_PEAKS:\n return sorted(collect_indices)\n elif len(collect_indices) > NUM_PEAKS:\n s = collect\n \n return s[-NUM_PEAKS:]\n else:\n if override - 0.001 > 0:\n self.find_peaks(ind, channel, override=override-0.001)\n else:\n for x in range(5-len(collect_indices)):\n collect_indices.append(0)\n #print('error, did not find enough peaks... maybe change NUM_PEAKS parameter for this anomaly type?')\n #sys.exit(0)\n return sorted(collect_indices)\n\n\n def find_whole_peak(self, peak: int, channel: int, ind: int=None) -> list:\n \"\"\"finds the entire length of a peak, rather than just the exact index of peak. searches for baseline crosses to the left and right of peaks found by find_peaks().\n\n Args:\n peak (int): which peak currently looking at\n channel (int): channel of the data\n ind (int, optional): index of data. Defaults to None.\n\n Returns:\n list: list of peak ranges\n \"\"\"\n \n baseline = self.find_baseline(channel, ind)\n left, right = 0, 1\n #print(self.data[:,channel], peak, self.data[:,channel][peak])\n if ind is not None:\n if (self.data[ind][:,channel][peak]) > baseline[0]:\n for i in range(peak, len(self.data[ind][:,channel])):\n if self.data[ind][:,channel][i] <= baseline[0]:\n right = i\n break\n for i in range(peak, 0, -1):\n if self.data[ind][:,channel][i] <= baseline[0]:\n left = i\n break\n else:\n for i in range(peak, len(self.data[ind][:,channel])):\n if self.data[ind][:,channel][i] >= baseline[0]:\n right = i\n break\n for i in range(peak, 0, -1):\n if self.data[ind][:,channel][i] >= baseline[0]:\n left = i\n break\n \n if right == 1:\n right = len(self.data[ind][:,channel]) - 1\n if right < left:\n right = len(self.data[ind][:,channel]) - 1\n return list(range(left, right)), self.data[ind][:,channel][left:right]\n else:\n #print(baseline[0])\n #print(self.data[:,channel][peak] > baseline[0])\n if (self.data[:,channel][peak]) > baseline[0]:\n for i in range(peak, len(self.data[:,channel])):\n if self.data[:,channel][i] <= baseline[0]:\n right = i\n break\n for i in range(peak, 0, -1):\n if self.data[:,channel][i] <= baseline[0]:\n left = i\n break\n else:\n for i in range(peak, len(self.data[:,channel])):\n if self.data[:,channel][i] >= baseline[0]:\n right = i\n break\n for i in range(peak, 0, -1):\n if self.data[:,channel][i] >= baseline[0]:\n left = i\n break\n \n if right == 1:\n right = len(self.data[:,channel]) - 1\n if right < left:\n right = len(self.data[:,channel]) - 1\n left = left - 2\n return list(range(left, right)), self.data[:,channel][left:right]\n\n def get_data(self, channel, ind):\n return self.data[ind][:,channel]\n\n def run_augment(self, anomaly_type: str, ind: int=None, use_noise: bool=False) -> tuple:\n \"\"\"Main augmentation program\n\n Args:\n anomaly_type (str): string for the anomaly type\n ind (int, optional): index of data. Defaults to None.\n use_noise (bool, optional): add noise to signal?. Defaults to False.\n\n Returns:\n tuple: augmented data\n \"\"\"\n try:\n self.get_similar_timed()\n chosen_factors = []\n factorss = []\n rem_factors = [None for _ in range(5)]\n factors_dict = {}\n for m,peak_ in enumerate(self.similars):\n chosen_factor = rd.choice(np.arange(AUGMENT_MIN, AUGMENT_MAX, AUGMENT_STEP))\n #chosen_factor = AUGMENT_MAX\n for channel in range(len(CHANNELS)):\n try:\n index = list(filter(lambda x: x[0] == channel, peak_))[0][1]\n except:\n print('arb')\n index = self.init_peaks_indices[channel][m]\n factors_dict[(m,channel)] = round(rd.choice(np.arange(AUGMENT_MIN, AUGMENT_MAX, AUGMENT_STEP)),3) # choose arb factor\n #factors_dict[(m,channel)] = round(AUGMENT_MAX,3)\n else:\n factors_dict[(m,channel)] = round(chosen_factor,3) # set all similar to same value (peak, channel, index)\n assert len(factors_dict) == NUM_PEAKS * len(CHANNELS)\n \n out = np.copy(self.data[ind]) if ind is not None else np.copy(self.data) # format output and only apply scaling on peaks\n np.save(\"orig.npy\", self.data[ind])\n augmented_ = []\n augmented__ = [[] for _ in range(len(CHANNELS))]\n for augmented in range(1):\n num_to_augment = rd.randint(2,NUM_PEAKS-1) # choose a random number of peaks to augment\n #num_to_augment = 5\n peaks_ = list(range(NUM_PEAKS))\n \n for peak in range(num_to_augment):\n factorss.append([])\n chosen_factors.append([])\n choice_to_augment = rd.choice(peaks_)\n augmented_.append(choice_to_augment)\n del peaks_[peaks_.index(choice_to_augment)] # don't apply augmentation to same peak more than once\n for channel in CHANNELS:\n chosen_factors[peak].append([])\n factorss[peak].append([])\n signn = -1 if self.data[:,channel][self.init_peaks_indices[channel][peak]] < self.baseline[channel][0] else 1\n\n variance = rd.choice(np.arange(VARIANCE_MIN,VARIANCE_MAX,0.01))\n\n chosen_factors[peak][channel].append(chosen_factor)\n \n range_ = abs(np.linspace(0, math.sqrt(abs(2*math.log(1/(variance))-math.log(2*math.pi)))*variance, num=len(self.init_peaks_ranges[channel][choice_to_augment]))) # smoothing function reaches 0 at about +- 1.6\n \n factors = []\n augmented__[channel].append(chosen_factor)\n\n for g in range(len(self.init_peaks_ranges[channel][choice_to_augment])):\n factors.append(abs(SMOOTHING_FUNCTION(range_[g],variance,signn))) # apply smoothing function\n factorss[peak][channel].append(factors)\n chosen_factor = factors_dict[(choice_to_augment, channel)]\n bot = min(self.init_peaks_ranges[channel][choice_to_augment])\n top = max(self.init_peaks_ranges[channel][choice_to_augment])\n for n,h in enumerate(range(bot, top+1)):\n if 1==1:\n if self.init_peaks_seg[channel][choice_to_augment][n] < self.baseline[channel][0]: # if negative peak subtract changes\n if self.baseline[channel][0] < 0:\n out[:,channel][h] = -abs(self.baseline[channel][0] - self.init_peaks_seg[channel][choice_to_augment][n])*chosen_factor*factors[n]+abs((self.baseline[channel][0]-self.init_peaks_seg[channel][choice_to_augment][n]))-abs(self.init_peaks_seg[channel][choice_to_augment][n])\n else:\n out[:,channel][h] = -(abs(self.baseline[channel][0] - self.init_peaks_seg[channel][choice_to_augment][n])*chosen_factor*factors[n]-self.baseline[channel][0])\n elif self.init_peaks_seg[channel][choice_to_augment][n] > self.baseline[channel][0]: # if positive peak add change\n out[:,channel][h] = abs(self.init_peaks_seg[channel][choice_to_augment][n]-self.baseline[channel][0])*chosen_factor*factors[n]+self.baseline[channel][0]\n return out, list(map(lambda x: 1 if x in augmented_ else 0, list(range(NUM_PEAKS)))), augmented__, factorss, chosen_factors\n except Exception as e:\n return self.augment_random(anomaly_type) # min arg error\n" ]
[ [ "numpy.isnan", "numpy.arange", "numpy.save", "scipy.signal.peak_prominences", "numpy.copy", "numpy.nanmean", "numpy.load", "numpy.array" ] ]
KuangjuX/Algorithm-Lab
[ "02fb617b7639185120b08edf9826ecc106118f55" ]
[ "Test.py" ]
[ "import matplotlib.pyplot as plt\nimport os\nimport random\nimport numpy as np\n\ndef generateData(filePath,fileName):\n file = str(filePath + '/' +fileName)\n with open(file ,'w+') as f:\n for i in range(10,100):\n flag = []\n up = random.randint(10,10000)\n f.write(str(i)+'\\n')\n f.write(str(up)+'\\n')\n for j in range(i):\n element = random.randint(0,up)\n # element = (int)(random.random()*up)\n while element in flag:\n element = random.randint(0,up)\n # element = (int)(random.random()*up)\n\n flag.append(element)\n f.write(str(element)+' ')\n f.write('\\n') \n\n# def execAlgorithm(exePath):\n# if os.path.exists(exePath):\n\ndef dataVisual(path):\n data = np.loadtxt(path)\n x, _ = data.shape\n plt.plot([data[i][0] for i in range(x)], [data[i][1] for i in range(x)])\n plt.show()\n \n\n\ndef main():\n # filePath = 'testcases'\n # fileName = 'example.txt'\n # generateData(filePath,fileName)\n # exePath = '.\\src\\\\main.exe'\n # execAlgorithm(exePath)\n # os.system(exePath)\n path = 'testcases/time.txt'\n dataVisual(path)\n\nif __name__ == '__main__':\n main()" ]
[ [ "matplotlib.pyplot.show", "numpy.loadtxt" ] ]
heyamrita/Python
[ "ebfd55d0c21fd50dc3376030ec90c1c67b5df48f" ]
[ "Face_Mask_detection (haarcascade)/mask_detection.py" ]
[ "import tensorflow.keras\nfrom PIL import Image, ImageOps\nimport numpy as np\nimport cv2\n\nstr = ''\nfaceCascade= cv2.CascadeClassifier(\"Resources/haarcascade_frontalface_default.xml\")\n\nnp.set_printoptions(suppress=True)\nmodel = tensorflow.keras.models.load_model('Resources/keras_model.h5')\ndata = np.ndarray(shape=(1, 224,224, 3), dtype=np.float32)\n\n\ncap = cv2.VideoCapture(0)\ncap.set(3,640)\ncap.set(4,480)\nwhile True:\n success,img = cap.read()\n \n cv2.imshow(\"webcam\", img)\n faces = faceCascade.detectMultiScale(img,1.1,4)\n \n \n for (x,y,w,h) in faces:\n crop_img = img[y:y+h, x:x+w]\n crop_img = cv2.resize(crop_img,(224,224))\n normalized_image_array = (crop_img.astype(np.float32) / 127.0) - 1 \n data[0] = normalized_image_array \n prediction = model.predict(data)\n print(prediction)\n if prediction[0][0] > prediction[0][1]:\n str = 'Mask'\n else:\n str = 'Without-mask'\n\n\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n cv2.putText(img,str,(x,y),cv2.FONT_HERSHEY_COMPLEX,1,(0,150,0),1)\n cv2.imshow(\"Result\", img)\n\n\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n" ]
[ [ "numpy.set_printoptions", "numpy.ndarray" ] ]
DSciLab/mlutils
[ "352af36f2b34218b6551254f641427b7bbdd0f31" ]
[ "mlutils/inspector.py" ]
[ "import cv2\nimport numpy as np\nimport torch\n\n\nclass ModuleNode(object):\n def __init__(self, name) -> None:\n super().__init__()\n self.name = name\n self.children = {}\n \n def remove_child(self, name):\n del self.children[name]\n\n @property\n def empty(self):\n return self.isleaf\n\n @property\n def isleaf(self):\n return len(self.children) == 0\n\n def has_child(self, name):\n return name in self.children\n\n def append_child(self, child):\n self.children[child.name] = child\n\n def get_child(self, name):\n return self.children[name]\n\n def __str__(self):\n root = 'root'\n return (f'{self.name or root}, '\n f'number of children {len(self.children)}')\n\n\nclass ModuleForest(ModuleNode):\n def __init__(self, name=None) -> None:\n super().__init__(name)\n self.children = {}\n\n def add_layer(self, layer_name):\n splited_name = layer_name.split('.')\n node = self\n for name in splited_name:\n if node.has_child(name):\n node = node.get_child(name)\n else:\n new_node = ModuleNode(name)\n node.append_child(new_node)\n node = new_node\n\n\ndef one_hot(inp, num_classes):\n output = torch.zeros(\n (inp.shape[0], num_classes, *inp.shape[1:])\n ).to(inp.device)\n output.scatter_(1, inp.unsqueeze(1), 1)\n return output\n\n\nclass Inspector(object):\n '''\n >>> # init\n >>> model = Model()\n >>> inspector = Inspector(model)\n >>> inspector.regist_layers(['35'])\n >>> inspector.regist_loss_fn(loss_fn)\n >>> # run\n >>> inspector.inspect(images, labels)\n >>> cams = inspector.show_cam_on_images()\n '''\n def __init__(self, opt, model) -> None:\n super().__init__()\n self.opt = opt\n self.model = model\n self.one_hot = one_hot\n self.regist_module_forest = ModuleForest()\n self.gradients = []\n self.features = []\n self.cams = []\n self.image = None\n self.training = False\n\n def regist_layers(self, *layers):\n for layer in layers:\n self.regist_module_forest.add_layer(layer)\n\n def regist_one_hot(self, one_hot):\n self.one_hot = one_hot\n\n def save_gradient(self, grad):\n self.gradients.append(grad)\n\n def inspect(self, x):\n assert not self.regist_module_forest.empty, \\\n 'No layer has been registed yet.'\n assert self.one_hot is not None, \\\n 'one_hot function is None'\n if self.model.training:\n self.training = True\n self.model.eval()\n self.model.zero_grad()\n self.image = x\n self.features = []\n self.cams = []\n self.gradients = []\n\n def exec(module, node, x):\n \"\"\"\n x: single image\n \"\"\"\n if node.isleaf:\n x = module(x)\n x.register_hook(self.save_gradient)\n self.features += [x]\n return x\n\n for name, m in module.named_children():\n # print('>>', name)\n if not node.has_child(name):\n # TODO fix dirty code\n if name in ['classifier', 'fc'] and x.ndim > 2:\n x = x.view(x.size(0), -1)\n x = m(x)\n else:\n child_node = node.get_child(name)\n x = exec(m, child_node, x)\n return x\n\n x = exec(self.model, self.regist_module_forest, x)\n\n num_classes = x.size(1)\n if num_classes == 1:\n loss = x[x>0].sum() + x[x<=0].sum() * -1\n else:\n y = torch.argmax(x, dim=1)\n y = self.one_hot(inp=y, num_classes=num_classes)\n loss = torch.sum(x * y)\n loss.backward(retain_graph=True)\n\n for feat, grad in zip(self.features, self.gradients):\n # get first image of the batch\n feat = feat[0]\n grad = grad[0]\n self.image = self.image[0].detach()\n\n cam = torch.zeros(feat.shape[-2:], device=x.device)\n weights = torch.mean(grad, dim=(1, 2))\n for i, w in enumerate(weights):\n # print('feat', feat[i].device)\n # print('w', w.device)\n cam += feat[i] * w\n self.cams.append(cam)\n\n if self.training:\n self.model.train()\n\n def show_cam_on_images(self, strength=1.0):\n assert len(self.cams) > 0, 'NO cam to show.'\n outputs = []\n image = self.image\n\n if isinstance(image, torch.Tensor):\n image = image.cpu().numpy()\n\n for cam in self.cams:\n if isinstance(cam, torch.Tensor):\n cam = cam.detach().cpu().numpy()\n\n # adjust cam to 0~1\n cam = np.maximum(cam, 0)\n cam = cv2.resize(cam, image.shape[-2:])\n cam = cam - cam.min()\n cam = cam / cam.max()\n heatmap = cv2.applyColorMap(\n np.uint8(255 * cam), cv2.COLORMAP_JET)\n heatmap = np.float32(heatmap) / 255.\n # image = (image - image.min()) / (image.max() - image.min())\n cam_image = strength * heatmap.transpose((2, 0, 1))[::-1, :, :] \\\n + np.float32(image)\n cam_image = cam_image / cam_image.max()\n outputs.append(cam_image)\n\n self.cams = []\n self.features = []\n self.gradients = []\n return outputs\n" ]
[ [ "torch.mean", "numpy.maximum", "torch.zeros", "numpy.uint8", "torch.sum", "numpy.float32", "torch.argmax" ] ]
jjcordano/elkans_kmeans
[ "21ac72896cdf2a54b951cb06fb45575befdc8dc7" ]
[ "kmeans.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\nimport numpy as np\n\nclass Kmeans:\n def __init__(self, k=2, max_iter=250, tolerance=0, method='Elkan'):\n \"\"\"\n Initialises a Kmeans clustering algorithm.\n \n Args:\n - k: Number of clusters. Defaults to 2.\n - max_iter: Maximum number of iterations to be performed before the Kmeans algorithm terminates. Defaults to 250.\n - tolerance: Threshold distance change for each centroid to terminate algorithm. Defaults to 0.\n When the distance rate of change for each centroid between 2 subsequent iterations is lower than the tolerance, the algorithm terminates.\n - method: 'classic' or 'Elkan'. Determines whether the classic Kmeans or Elkan's accelerated Kmeans algorithm will be used. Defaults to 'Elkan'.\n \"\"\"\n \n assert method in ['classic','Elkan'], \"Method argument not valid\"\n \n self.k = k\n self.max_iter = max_iter\n self.tol = tolerance\n self.method = method\n \n def fit(self, data):\n '''\n Finds k centroids for a dataset of numeric points.\n \n Args:\n - data: Numpy array or pandas DataFrame of numerical values.\n '''\n pointsArray = np.array(data)\n\n ## Initializing k random centroids within the bounds of the data points\n self.centroids = {}\n self.labels = [0 for point in pointsArray]\n initCentroids = []\n\n for dim in range(pointsArray.shape[1]):\n dim_min = np.min(pointsArray, axis=0)[dim]\n dim_max = np.max(pointsArray, axis=0)[dim]\n newCentroid = (dim_max-dim_min)*np.random.random_sample([1,self.k])+dim_min\n initCentroids = np.append(initCentroids,newCentroid)\n\n initCentroids = initCentroids.reshape((pointsArray.shape[1],self.k)).T\n\n self.centroids = dict(zip(list(range(self.k)),initCentroids))\n \n ## Classic Kmeans\n if self.method == 'classic':\n for i in range(self.max_iter):\n self.classifications = {}\n self.pointsClassif = {}\n\n for i in range(self.k):\n self.classifications[i] = []\n self.pointsClassif[i] = []\n\n for point in pointsArray:\n distances = [np.linalg.norm(point-self.centroids[centroid]) for centroid in self.centroids]\n classification = distances.index(min(distances))\n self.classifications[classification].append(point)\n\n prevCentroids = dict(self.centroids)\n\n for classification in self.classifications:\n if len(self.classifications[classification]) == 0:\n pass\n\n else:\n self.centroids[classification] = np.average(self.classifications[classification],axis=0)\n\n optimized = [True for centroid in self.centroids]\n\n for centroid in self.centroids:\n original_centroid = prevCentroids[centroid]\n current_centroid = self.centroids[centroid]\n if abs(np.sum((current_centroid-original_centroid)/original_centroid*100.0)) > self.tol :\n optimized[centroid] = False\n\n if False not in optimized:\n for i in range(pointsArray.shape[0]):\n point = pointsArray[i]\n distances = [np.linalg.norm(point-self.centroids[centroid]) for centroid in self.centroids]\n classification = distances.index(min(distances))\n self.pointsClassif[classification].append(i)\n \n break\n \n ## Accelerated (Elkan) Kmeans\n elif self.method == 'Elkan':\n \n self.classifications = {} ## Points coordinates by centroid\n self.pointsClassif = {} ## Points indices by centroid\n \n lowerBounds = np.zeros((pointsArray.shape[0],self.k)) ## Lower bounds matrix. Dimensions : (nb_points, nb_centroids)\n upperBounds = np.zeros((pointsArray.shape[0])) ## Upper bounds vector : Dimension : (nb_points)\n\n\n for i in range(self.k):\n self.classifications[i] = []\n self.pointsClassif[i] = []\n \n i=0\n for point in pointsArray:\n distances = [np.linalg.norm(point-self.centroids[centroid]) for centroid in self.centroids]\n classification = distances.index(min(distances))\n \n ## Centroid assigned to each point\n self.classifications[classification].append(point)\n self.pointsClassif[classification].append(i)\n \n ## Lower bound distance between the point and each center\n ## Initialized as distance between the point and each initial centroid\n lowerBounds[i] = distances\n \n ## Upper bound distance between the point and assigned centroid\n upperBounds[i] = min(distances)\n \n i+=1\n \n prevCentroids = dict(self.centroids.copy())\n prevClassifications = dict(self.classifications.copy())\n prevPointsClassif = dict(self.pointsClassif.copy())\n\n for classification in self.classifications:\n if len(self.classifications[classification]) == 0:\n pass\n\n else:\n self.centroids[classification] = np.average(self.classifications[classification],axis=0)\n\n optimized = [True for centroid in self.centroids]\n \n centroidDistanceChange = {}\n\n for centroid in self.centroids:\n original_centroid = prevCentroids[centroid]\n current_centroid = self.centroids[centroid]\n centroidDistanceChange[centroid] = np.linalg.norm(original_centroid-current_centroid)\n \n if abs(np.sum((current_centroid-original_centroid)/original_centroid*100.0)) > self.tol :\n optimized[centroid] = False\n\n if False not in optimized:\n \n for centroid in self.pointsClassif:\n for point in self.pointsClassif[centroid]:\n self.labels[point] = centroid\n return\n \n ## Update lower and upper bound distances\n for centroid in self.pointsClassif:\n for i in list(range(pointsArray.shape[0])):\n lowerBounds[i][centroid] -= centroidDistanceChange[centroid]\n \n for i in self.pointsClassif[centroid]:\n upperBounds[i] += centroidDistanceChange[centroid]\n \n ## Repeat until convergence \n for it in range(self.max_iter):\n \n listCentroids = list(range(self.k))\n self.classifications = {} ## Points coordinates by centroid\n self.pointsClassif = {} ## Points indices by centroid\n centroidDistances = {} ## Distances to other centroids for each centroid\n closestCentroidDistances = {} ## Distance to closest centroid for each centroid\n \n for i in range(self.k):\n self.classifications[i] = []\n self.pointsClassif[i] = []\n centroidDistances[i] = [np.linalg.norm(self.centroids[i]-self.centroids[c_prime]) for c_prime in self.centroids]\n closestCentroidDistances[i] = min(centroidDistances[i][:i]+centroidDistances[i][i+1:])\n \n for centroid in prevPointsClassif:\n for i in prevPointsClassif[centroid]:\n \n r = True\n distToCurrentCentroid = upperBounds[i]\n \n ## Check if upper bound lower than 1/2 of distance with closest centroid\n if upperBounds[i] <= 0.5*closestCentroidDistances[centroid]:\n ## If condition is met : said point keeps its centroid with no further computation needed\n self.classifications[centroid].append(pointsArray[i])\n self.pointsClassif[centroid].append(i)\n \n \n else:\n assigned_centroid = centroid\n for c_prime in (listCentroids[:centroid]+listCentroids[centroid+1:]):\n ## Check if lower bound between point and c_prime < upper bound between point and its current centroid\n ## AND if (0.5*distance between current centroid and c_prime) < upper bound between point and its current centroid\n if ((distToCurrentCentroid > lowerBounds[i][c_prime]) and (distToCurrentCentroid > 0.5*centroidDistances[centroid][c_prime])): \n if r:\n distToCurrentCentroid = np.linalg.norm(pointsArray[i] - self.centroids[centroid])\n r = False\n \n distToCPrime = np.linalg.norm(pointsArray[i]-self.centroids[c_prime])\n \n if distToCurrentCentroid > distToCPrime:\n assigned_centroid = c_prime \n \n self.classifications[assigned_centroid].append(pointsArray[i])\n self.pointsClassif[assigned_centroid].append(i)\n \n prevCentroids = dict(self.centroids.copy())\n prevClassifications = dict(self.classifications.copy())\n prevPointsClassif = dict(self.pointsClassif.copy())\n \n for classification in self.classifications:\n if len(self.classifications[classification]) == 0:\n pass\n\n else:\n self.centroids[classification] = np.average(self.classifications[classification],axis=0)\n\n optimized = [True for centroid in self.centroids]\n \n centroidDistanceChange = {}\n\n for centroid in self.centroids:\n original_centroid = prevCentroids[centroid]\n current_centroid = self.centroids[centroid]\n centroidDistanceChange[centroid] = np.linalg.norm(original_centroid-current_centroid)\n\n if abs(np.sum((current_centroid-original_centroid)/original_centroid*100.0)) > self.tol :\n optimized[centroid] = False\n\n if False not in optimized:\n break\n \n ## Update of lower and upper bound distances\n for centroid in self.pointsClassif:\n for i in list(range(pointsArray.shape[0])):\n lowerBounds[i][centroid] -= centroidDistanceChange[centroid]\n \n for i in self.pointsClassif[centroid]:\n upperBounds[i] += centroidDistanceChange[centroid]\n \n ## Update labels (cluster) for each point\n for centroid in self.pointsClassif:\n for point in self.pointsClassif[centroid]:\n self.labels[point] = centroid\n \n def predict(self,data):\n \"\"\"\n Assigns point(s) to cluster (or centroid).\n Clusters must have been previously computed using Kmeans.fit() method on dataset.\n \n Args:\n - data: 1-d or 2-d numpy array of points to be assigned to a cluster.\n \"\"\"\n \n data = np.array(data)\n \n assert data.flatten().shape[0] % self.centroids[self.labels[0]].shape[0] == 0, \"All inputs must be of shape ({},)\".format(self.centroids[self.labels[0]])\n \n if (data.flatten().shape[0] / self.centroids[self.labels[0]].shape[0]) == 1:\n distances = [np.linalg.norm(data - self.centroids[centroid]) for centroid in self.centroids]\n classification = distances.index(min(distances))\n \n return classification\n \n else:\n \n assert data.shape[1] == data.shape[-1] == self.centroids[self.labels[0]].shape[0], \"All inputs must be of shape ({},)\".format(self.centroids[self.labels[0]])\n \n classifications = []\n \n for point in data:\n distances = [np.linalg.norm(point - self.centroids[centroid]) for centroid in self.centroids]\n classification = distances.index(min(distances))\n classifications.append(classification)\n \n return classifications\n\n" ]
[ [ "numpy.min", "numpy.linalg.norm", "numpy.random.random_sample", "numpy.max", "numpy.append", "numpy.average", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
sdeneefe/tensor2tensor
[ "d9f807cf2738323d19aba0a20a8cf0c7f7da8b27" ]
[ "tensor2tensor/tpu/tpu_trainer_lib.py" ]
[ "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Library for training on TPU. See tpu_trainer.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nfrom tensor2tensor.utils import t2t_model\nfrom tensor2tensor.utils import trainer_utils\n\nimport tensorflow as tf\n\n\ndef create_run_config(master=\"\",\n model_dir=None,\n iterations_per_loop=1000,\n num_shards=8,\n log_device_placement=False,\n save_checkpoints_steps=1000,\n num_gpus=1,\n gpu_order=\"\",\n shard_to_cpu=False,\n use_tpu=True):\n \"\"\"Create TPUConfig and tpu.RunConfig.\"\"\"\n session_config = tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=log_device_placement)\n run_config_args = {\n \"model_dir\": model_dir,\n \"session_config\": session_config,\n \"save_summary_steps\": 0,\n \"save_checkpoints_steps\": save_checkpoints_steps,\n }\n run_config_cls = tf.estimator.RunConfig\n\n # If using TPU, use TPU RunConfig, add TPUConfig, and add additional args\n if use_tpu:\n run_config_cls = tf.contrib.tpu.RunConfig\n tpu_config = tf.contrib.tpu.TPUConfig(\n iterations_per_loop=iterations_per_loop,\n num_shards=num_shards,\n per_host_input_for_training=(num_shards <= 8))\n run_config_args[\"master\"] = master\n run_config_args[\"tpu_config\"] = tpu_config\n\n config = run_config_cls(**run_config_args)\n\n # If not using TPU, add device info for data_parallelism\n if not use_tpu:\n config.t2t_device_info = {\n \"num_gpus\": num_gpus,\n \"gpu_order\": gpu_order,\n \"shard_to_cpu\": shard_to_cpu,\n \"num_shards\": max(1, num_gpus + int(shard_to_cpu))\n }\n\n return config\n\n\ndef create_estimator(model_name, hparams, run_config, use_tpu=True):\n model_fn = t2t_model.T2TModel.make_estimator_model_fn(\n model_name, hparams, use_tpu=use_tpu)\n\n if use_tpu:\n batch_size = hparams.tpu_batch_size_per_shard\n batch_size *= run_config.tpu_config.num_shards\n return tf.contrib.tpu.TPUEstimator(\n model_fn=model_fn,\n model_dir=run_config.model_dir,\n config=run_config,\n train_batch_size=batch_size,\n eval_batch_size=batch_size * 2)\n else:\n return tf.estimator.Estimator(\n model_fn=model_fn, model_dir=run_config.model_dir, config=run_config)\n\n\ndef create_experiment(run_config,\n hparams,\n model_name,\n problem_name,\n data_dir,\n train_steps,\n eval_steps,\n min_eval_frequency,\n use_tpu=True):\n \"\"\"Create Experiment.\"\"\"\n # HParams\n hparams.add_hparam(\"data_dir\", data_dir)\n trainer_utils.add_problem_hparams(hparams, problem_name)\n\n # Estimator\n estimator = create_estimator(model_name, hparams, run_config, use_tpu=use_tpu)\n\n # Input fns from Problem\n problem = hparams.problem_instances[0]\n train_input_fn = problem.make_estimator_input_fn(\n tf.estimator.ModeKeys.TRAIN, hparams)\n eval_input_fn = problem.make_estimator_input_fn(\n tf.estimator.ModeKeys.EVAL, hparams)\n\n # Experiment\n return tf.contrib.learn.Experiment(\n estimator=estimator,\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n train_steps=train_steps,\n eval_steps=eval_steps,\n min_eval_frequency=min_eval_frequency,\n train_steps_per_iteration=min_eval_frequency)\n\n\ndef create_experiment_fn(*args, **kwargs):\n \"\"\"Wrapper for canonical experiment_fn. See create_experiment.\"\"\"\n\n def experiment_fn(run_config, hparams):\n return create_experiment(run_config, hparams, *args, **kwargs)\n\n return experiment_fn\n" ]
[ [ "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.estimator.Estimator", "tensorflow.ConfigProto", "tensorflow.contrib.learn.Experiment", "tensorflow.contrib.tpu.TPUConfig" ] ]
ElementAI/seq2struct
[ "06c6bd29a0b7d4db799c2fbbacc41ea30eadbecc" ]
[ "seq2struct/models/spider_enc_modules.py" ]
[ "import collections\nimport itertools\nimport operator\n\nimport numpy as np\nimport torch\nfrom torch import nn\nimport torchtext\n\ntry:\n from seq2struct.models import lstm\nexcept ImportError:\n pass\nfrom seq2struct.models import transformer\nfrom seq2struct.utils import batched_sequence\nfrom seq2struct.utils import registry\n\n\n\ndef clamp(value, abs_max):\n value = max(-abs_max, value)\n value = min(abs_max, value)\n return value\n\n\ndef get_attn_mask(seq_lengths):\n # Given seq_lengths like [3, 1, 2], this will produce\n # [[[1, 1, 1],\n # [1, 1, 1],\n # [1, 1, 1]],\n # [[1, 0, 0],\n # [0, 0, 0],\n # [0, 0, 0]],\n # [[1, 1, 0],\n # [1, 1, 0],\n # [0, 0, 0]]]\n # int(max(...)) so that it has type 'int instead of numpy.int64\n max_length, batch_size = int(max(seq_lengths)), len(seq_lengths)\n attn_mask = torch.LongTensor(batch_size, max_length, max_length).fill_(0)\n for batch_idx, seq_length in enumerate(seq_lengths):\n attn_mask[batch_idx, :seq_length, :seq_length] = 1\n return attn_mask\n\n\nclass LookupEmbeddings(torch.nn.Module):\n def __init__(self, device, vocab, embedder, emb_size):\n super().__init__()\n self._device = device\n self.vocab = vocab\n self.embedder = embedder\n self.emb_size = emb_size\n\n self.embedding = torch.nn.Embedding(\n num_embeddings=len(self.vocab),\n embedding_dim=emb_size)\n if self.embedder:\n assert emb_size == self.embedder.dim\n\n def forward_unbatched(self, token_lists):\n # token_lists: list of list of lists\n # [batch, num descs, desc length]\n # - each list contains tokens\n # - each list corresponds to a column name, table name, etc.\n\n embs = []\n for tokens in token_lists:\n # token_indices shape: batch (=1) x length\n token_indices = torch.tensor(\n self.vocab.indices(tokens), device=self._device).unsqueeze(0)\n\n # emb shape: batch (=1) x length x word_emb_size\n emb = self.embedding(token_indices)\n\n # emb shape: desc length x batch (=1) x word_emb_size\n emb = emb.transpose(0, 1)\n embs.append(emb)\n\n # all_embs shape: sum of desc lengths x batch (=1) x word_emb_size\n all_embs = torch.cat(embs, dim=0)\n\n # boundaries shape: num of descs + 1\n # If desc lengths are [2, 3, 4],\n # then boundaries is [0, 2, 5, 9]\n boundaries = np.cumsum([0] + [emb.shape[0] for emb in embs])\n\n return all_embs, boundaries\n \n def _compute_boundaries(self, token_lists):\n # token_lists: list of list of lists\n # [batch, num descs, desc length]\n # - each list contains tokens\n # - each list corresponds to a column name, table name, etc.\n boundaries = [\n np.cumsum([0] + [len(token_list) for token_list in token_lists_for_item])\n for token_lists_for_item in token_lists]\n\n return boundaries\n\n def _embed_token(self, token, batch_idx, out):\n if self.embedder:\n emb = self.embedder.lookup(token)\n else:\n emb = None\n if emb is None:\n emb = self.embedding.weight[self.vocab.index(token)]\n out.copy_(emb)\n\n def forward(self, token_lists):\n # token_lists: list of list of lists\n # [batch, num descs, desc length]\n # - each list contains tokens\n # - each list corresponds to a column name, table name, etc.\n # PackedSequencePlus, with shape: [batch, sum of desc lengths, emb_size]\n all_embs = batched_sequence.PackedSequencePlus.from_lists(\n lists=[\n [\n token\n for token_list in token_lists_for_item\n for token in token_list\n ]\n for token_lists_for_item in token_lists\n ],\n item_shape=(self.emb_size,),\n tensor_type=torch.FloatTensor,\n item_to_tensor=self._embed_token)\n all_embs = all_embs.apply(lambda d: d.to(self._device))\n \n return all_embs, self._compute_boundaries(token_lists)\n\n def _embed_words_learned(self, token_lists):\n # token_lists: list of list of lists\n # [batch, num descs, desc length]\n # - each list contains tokens\n # - each list corresponds to a column name, table name, etc.\n\n # PackedSequencePlus, with shape: [batch, num descs * desc length (sum of desc lengths)]\n indices = batched_sequence.PackedSequencePlus.from_lists(\n lists=[\n [\n token\n for token_list in token_lists_for_item\n for token in token_list\n ]\n for token_lists_for_item in token_lists\n ],\n item_shape=(1,), # For compatibility with old PyTorch versions\n tensor_type=torch.LongTensor,\n item_to_tensor=lambda token, batch_idx, out: out.fill_(self.vocab.index(token))\n )\n indices = indices.apply(lambda d: d.to(self._device))\n # PackedSequencePlus, with shape: [batch, sum of desc lengths, emb_size]\n all_embs = indices.apply(lambda x: self.embedding(x.squeeze(-1)))\n\n return all_embs, self._compute_boundaries(token_lists)\n\n\nclass EmbLinear(torch.nn.Module):\n def __init__(self, input_size, output_size):\n super().__init__()\n self.linear = torch.nn.Linear(input_size, output_size)\n \n def forward(self, input_):\n all_embs, boundaries = input_\n all_embs = all_embs.apply(lambda d: self.linear(d))\n return all_embs, boundaries\n\n\nclass BiLSTM(torch.nn.Module):\n def __init__(self, input_size, output_size, dropout, summarize, use_native=False):\n # input_size: dimensionality of input\n # output_size: dimensionality of output\n # dropout\n # summarize:\n # - True: return Tensor of 1 x batch x emb size \n # - False: return Tensor of seq len x batch x emb size \n super().__init__()\n\n if use_native:\n self.lstm = torch.nn.LSTM(\n input_size=input_size,\n hidden_size=output_size // 2,\n bidirectional=True,\n dropout=dropout)\n self.dropout = torch.nn.Dropout(dropout)\n else:\n self.lstm = lstm.LSTM(\n input_size=input_size,\n hidden_size=output_size // 2,\n bidirectional=True,\n dropout=dropout)\n self.summarize = summarize\n self.use_native = use_native\n\n def forward_unbatched(self, input_):\n # all_embs shape: sum of desc lengths x batch (=1) x input_size\n all_embs, boundaries = input_\n\n new_boundaries = [0]\n outputs = []\n for left, right in zip(boundaries, boundaries[1:]):\n # state shape:\n # - h: num_layers (=1) * num_directions (=2) x batch (=1) x recurrent_size / 2\n # - c: num_layers (=1) * num_directions (=2) x batch (=1) x recurrent_size / 2\n # output shape: seq len x batch size x output_size\n if self.use_native:\n inp = self.dropout(all_embs[left:right])\n output, (h, c) = self.lstm(inp)\n else:\n output, (h, c) = self.lstm(all_embs[left:right])\n if self.summarize:\n seq_emb = torch.cat((h[0], h[1]), dim=-1).unsqueeze(0)\n new_boundaries.append(new_boundaries[-1] + 1)\n else:\n seq_emb = output\n new_boundaries.append(new_boundaries[-1] + output.shape[0])\n outputs.append(seq_emb)\n\n return torch.cat(outputs, dim=0), new_boundaries\n\n def forward(self, input_):\n # all_embs shape: PackedSequencePlus with shape [batch, sum of desc lengths, input_size]\n # boundaries: list of lists with shape [batch, num descs + 1]\n all_embs, boundaries = input_\n\n # List of the following:\n # (batch_idx, desc_idx, length)\n desc_lengths = []\n batch_desc_to_flat_map = {}\n for batch_idx, boundaries_for_item in enumerate(boundaries):\n for desc_idx, (left, right) in enumerate(zip(boundaries_for_item, boundaries_for_item[1:])):\n desc_lengths.append((batch_idx, desc_idx, right - left))\n batch_desc_to_flat_map[batch_idx, desc_idx] = len(batch_desc_to_flat_map)\n\n # Recreate PackedSequencePlus into shape\n # [batch * num descs, desc length, input_size]\n # with name `rearranged_all_embs`\n remapped_ps_indices = []\n def rearranged_all_embs_map_index(desc_lengths_idx, seq_idx):\n batch_idx, desc_idx, _ = desc_lengths[desc_lengths_idx]\n return batch_idx, boundaries[batch_idx][desc_idx] + seq_idx\n def rearranged_all_embs_gather_from_indices(indices):\n batch_indices, seq_indices = zip(*indices)\n remapped_ps_indices[:] = all_embs.raw_index(batch_indices, seq_indices)\n return all_embs.ps.data[torch.LongTensor(remapped_ps_indices)]\n rearranged_all_embs = batched_sequence.PackedSequencePlus.from_gather(\n lengths=[length for _, _, length in desc_lengths],\n map_index=rearranged_all_embs_map_index,\n gather_from_indices=rearranged_all_embs_gather_from_indices)\n rev_remapped_ps_indices = tuple(\n x[0] for x in sorted(\n enumerate(remapped_ps_indices), key=operator.itemgetter(1)))\n\n # output shape: PackedSequence, [batch * num_descs, desc length, output_size]\n # state shape:\n # - h: [num_layers (=1) * num_directions (=2), batch, output_size / 2]\n # - c: [num_layers (=1) * num_directions (=2), batch, output_size / 2]\n if self.use_native:\n rearranged_all_embs = rearranged_all_embs.apply(self.dropout)\n output, (h, c) = self.lstm(rearranged_all_embs.ps)\n if self.summarize:\n # h shape: [batch * num descs, output_size]\n h = torch.cat((h[0], h[1]), dim=-1)\n\n # new_all_embs: PackedSequencePlus, [batch, num descs, input_size]\n new_all_embs = batched_sequence.PackedSequencePlus.from_gather(\n lengths=[len(boundaries_for_item) - 1 for boundaries_for_item in boundaries],\n map_index=lambda batch_idx, desc_idx: rearranged_all_embs.sort_to_orig[batch_desc_to_flat_map[batch_idx, desc_idx]],\n gather_from_indices=lambda indices: h[torch.LongTensor(indices)])\n\n new_boundaries = [\n list(range(len(boundaries_for_item)))\n for boundaries_for_item in boundaries\n ]\n else:\n new_all_embs = all_embs.apply(\n lambda _: output.data[torch.LongTensor(rev_remapped_ps_indices)])\n new_boundaries = boundaries\n\n return new_all_embs, new_boundaries\n\n\nclass RelationProvider:\n\n def compute_relation(self, desc, i_type, i_index, i_token_index, j_type, j_index, j_token_index):\n raise NotImplementedError\n\n @property\n def all_relation_types(self):\n raise NotImplementedError\n\n\nclass SchemaRelationProvider(RelationProvider):\n def __init__(self, \n merge_types=False,\n qq_max_dist=2,\n #qc_token_match=True,\n #qt_token_match=True,\n #cq_token_match=True,\n cc_foreign_key=True,\n cc_table_match=True,\n cc_max_dist=2,\n ct_foreign_key=True,\n ct_table_match=True,\n #tq_token_match=True,\n tc_table_match=True,\n tc_foreign_key=True,\n tt_max_dist=2,\n tt_foreign_key=True):\n\n self.qq_max_dist = qq_max_dist\n #self.qc_token_match = qc_token_match\n #self.qt_token_match = qt_token_match\n #self.cq_token_match = cq_token_match\n self.cc_foreign_key = cc_foreign_key\n self.cc_table_match = cc_table_match\n self.cc_max_dist = cc_max_dist\n self.ct_foreign_key = ct_foreign_key\n self.ct_table_match = ct_table_match\n #self.tq_token_match = tq_token_match\n self.tc_table_match = tc_table_match\n self.tc_foreign_key = tc_foreign_key\n self.tt_max_dist = tt_max_dist\n self.tt_foreign_key = tt_foreign_key\n\n self.relation_map = collections.OrderedDict()\n def add_relation(key):\n self.relation_map[key] = key\n def add_rel_dist(name, max_dist):\n for i in range(-max_dist, max_dist + 1):\n add_relation((name, i))\n\n add_rel_dist('qq_dist', qq_max_dist)\n\n add_relation('qc_default')\n #if qc_token_match:\n # add_relation('qc_token_match')\n\n add_relation('qt_default')\n #if qt_token_match:\n # add_relation('qt_token_match')\n\n add_relation('cq_default')\n #if cq_token_match:\n # add_relation('cq_token_match')\n\n add_relation('cc_default')\n if cc_foreign_key:\n add_relation('cc_foreign_key_forward')\n add_relation('cc_foreign_key_backward')\n if cc_table_match:\n add_relation('cc_table_match')\n add_rel_dist('cc_dist', cc_max_dist)\n\n add_relation('ct_default')\n if ct_foreign_key:\n add_relation('ct_foreign_key')\n if ct_table_match:\n add_relation('ct_primary_key')\n add_relation('ct_table_match')\n add_relation('ct_any_table')\n\n add_relation('tq_default')\n #if cq_token_match:\n # add_relation('tq_token_match')\n\n add_relation('tc_default')\n if tc_table_match:\n add_relation('tc_primary_key')\n add_relation('tc_table_match')\n add_relation('tc_any_table')\n if tc_foreign_key:\n add_relation('tc_foreign_key')\n\n add_relation('tt_default')\n if tt_foreign_key:\n add_relation('tt_foreign_key_forward')\n add_relation('tt_foreign_key_backward')\n add_relation('tt_foreign_key_both')\n add_rel_dist('tt_dist', tt_max_dist)\n\n if merge_types:\n assert not cc_foreign_key\n assert not cc_table_match\n assert not ct_foreign_key\n assert not ct_table_match\n assert not tc_foreign_key\n assert not tc_table_match\n assert not tt_foreign_key\n\n assert cc_max_dist == qq_max_dist\n assert tt_max_dist == qq_max_dist\n\n add_relation('xx_default')\n self.relation_map['qc_default'] = 'xx_default'\n self.relation_map['qt_default'] = 'xx_default'\n self.relation_map['cq_default'] = 'xx_default'\n self.relation_map['cc_default'] = 'xx_default'\n self.relation_map['ct_default'] = 'xx_default'\n self.relation_map['tc_default'] = 'xx_default'\n self.relation_map['tt_default'] = 'xx_default'\n\n for i in range(-qq_max_dist, qq_max_dist + 1):\n self.relation_map['cc_dist', i] = ('qq_dist', i)\n self.relation_map['tt_dist', i] = ('tt_dist', i)\n\n # Ordering of this is very important!\n self._all_relation_types = list(self.relation_map.keys())\n\n @property\n def all_relation_types(self):\n return self._all_relation_types\n\n # i/j_type: question/column/table\n # i/j_index: \n # - question: always 0\n # - column/table: index of the column/table within the schema\n # i/j_token_index: index for the token within the description for the item that the token belongs to\n def compute_relation(self, desc, i_type, i_index, i_token_index, j_type, j_index, j_token_index):\n result = None\n def set_relation(key):\n nonlocal result\n result = self.relation_map[key]\n\n if i_type == 'question':\n if j_type == 'question':\n set_relation(('qq_dist', clamp(j_token_index - i_token_index, self.qq_max_dist)))\n elif j_type == 'column':\n set_relation('qc_default')\n elif j_type == 'table':\n set_relation('qt_default')\n\n elif i_type == 'column':\n if j_type == 'question':\n set_relation('cq_default')\n elif j_type == 'column':\n col1, col2 = i_index, j_index\n if i_index == j_index:\n set_relation(('cc_dist', clamp(j_token_index - i_token_index, self.cc_max_dist)))\n else:\n set_relation('cc_default')\n if self.cc_foreign_key:\n if desc['foreign_keys'].get(str(col1)) == col2:\n set_relation('cc_foreign_key_forward')\n if desc['foreign_keys'].get(str(col2)) == col1:\n set_relation('cc_foreign_key_backward')\n if (self.cc_table_match and \n desc['column_to_table'][str(col1)] == desc['column_to_table'][str(col2)]):\n set_relation('cc_table_match')\n\n elif j_type == 'table':\n col, table = i_index, j_index\n set_relation('ct_default')\n if self.ct_foreign_key and self.match_foreign_key(desc, col, table):\n set_relation('ct_foreign_key')\n if self.ct_table_match:\n col_table = desc['column_to_table'][str(col)] \n if col_table == table:\n if col in desc['primary_keys']:\n set_relation('ct_primary_key')\n else:\n set_relation('ct_table_match')\n elif col_table is None:\n set_relation('ct_any_table')\n\n elif i_type == 'table':\n if j_type == 'question':\n set_relation('tq_default')\n elif j_type == 'column':\n table, col = i_index, j_index\n set_relation('tc_default')\n\n if self.tc_foreign_key and self.match_foreign_key(desc, col, table):\n set_relation('tc_foreign_key')\n if self.tc_table_match:\n col_table = desc['column_to_table'][str(col)] \n if col_table == table:\n if col in desc['primary_keys']:\n set_relation('tc_primary_key')\n else:\n set_relation('tc_table_match')\n elif col_table is None:\n set_relation('tc_any_table')\n elif j_type == 'table':\n table1, table2 = i_index, j_index\n if table1 == table2:\n set_relation(('tt_dist', clamp(j_token_index - i_token_index, self.tt_max_dist)))\n else:\n set_relation('tt_default')\n if self.tt_foreign_key:\n forward = table2 in desc['foreign_keys_tables'].get(str(table1), ())\n backward = table1 in desc['foreign_keys_tables'].get(str(table2), ())\n if forward and backward:\n set_relation('tt_foreign_key_both')\n elif forward:\n set_relation('tt_foreign_key_forward')\n elif backward:\n set_relation('tt_foreign_key_backward')\n\n return result\n\n @classmethod\n def match_foreign_key(cls, desc, col, table):\n foreign_key_for = desc['foreign_keys'].get(str(col))\n if foreign_key_for is None:\n return False\n\n foreign_table = desc['column_to_table'][str(foreign_key_for)]\n return desc['column_to_table'][str(col)] == foreign_table\n\nclass RelationalTransformerUpdate(torch.nn.Module):\n\n def __init__(self, device, num_layers, num_heads, hidden_size, \n tie_layers=False,\n ff_size=None,\n dropout=0.1,\n relation_providers=[\n {'name': 'schema'},\n ]):\n super().__init__()\n self._device = device\n\n registered_relation_providers = {\n 'schema': SchemaRelationProvider,\n }\n self.relation_providers = [\n registry.instantiate(\n registered_relation_providers[config['name']],\n config,\n unused_keys=('name',))\n for config in relation_providers\n ]\n self.relation_ids = {}\n\n for provider in self.relation_providers:\n for key in provider.all_relation_types:\n self.relation_ids[key] = len(self.relation_ids)\n\n if ff_size is None:\n ff_size = hidden_size * 4\n self.encoder = transformer.Encoder(\n lambda: transformer.EncoderLayer(\n hidden_size, \n transformer.MultiHeadedAttentionWithRelations(\n num_heads,\n hidden_size,\n dropout),\n transformer.PositionwiseFeedForward(\n hidden_size,\n ff_size,\n dropout),\n len(self.relation_ids),\n dropout),\n hidden_size,\n num_layers,\n tie_layers)\n \n def forward_unbatched(self, desc, q_enc, c_enc, c_boundaries, t_enc, t_boundaries):\n # enc shape: total len x batch (=1) x recurrent size\n enc = torch.cat((q_enc, c_enc, t_enc), dim=0)\n\n # enc shape: batch (=1) x total len x recurrent size\n enc = enc.transpose(0, 1)\n\n # Catalogue which things are where\n relations = self.compute_relations(\n desc,\n enc_length=enc.shape[1],\n q_enc_length=q_enc.shape[0],\n c_enc_length=c_enc.shape[0],\n c_boundaries=c_boundaries,\n t_boundaries=t_boundaries)\n\n relations_t = torch.tensor(relations, device=self._device)\n enc_new = self.encoder(enc, relations_t, mask=None)\n\n # Split updated_enc again\n c_base = q_enc.shape[0]\n t_base = q_enc.shape[0] + c_enc.shape[0]\n q_enc_new = enc_new[:, :c_base]\n c_enc_new = enc_new[:, c_base:t_base]\n t_enc_new = enc_new[:, t_base:]\n return q_enc_new, c_enc_new, t_enc_new\n\n def forward(self, descs, q_enc, c_enc, c_boundaries, t_enc, t_boundaries):\n # enc: PackedSequencePlus with shape [batch, total len, recurrent size]\n enc = batched_sequence.PackedSequencePlus.cat_seqs((q_enc, c_enc, t_enc))\n\n q_enc_lengths = list(q_enc.orig_lengths())\n c_enc_lengths = list(c_enc.orig_lengths())\n t_enc_lengths = list(t_enc.orig_lengths())\n enc_lengths = list(enc.orig_lengths())\n max_enc_length = max(enc_lengths)\n\n all_relations = []\n for batch_idx, desc in enumerate(descs):\n enc_length = enc_lengths[batch_idx]\n relations_for_item = self.compute_relations(\n desc,\n enc_length,\n q_enc_lengths[batch_idx],\n c_enc_lengths[batch_idx],\n c_boundaries[batch_idx],\n t_boundaries[batch_idx])\n all_relations.append(np.pad(relations_for_item, ((0, max_enc_length - enc_length),), 'constant'))\n relations_t = torch.from_numpy(np.stack(all_relations)).to(self._device)\n\n # mask shape: [batch, total len, total len]\n mask = get_attn_mask(enc_lengths).to(self._device)\n # enc_new: shape [batch, total len, recurrent size]\n enc_padded, _ = enc.pad(batch_first=True)\n enc_new = self.encoder(enc_padded, relations_t, mask=mask)\n\n # Split enc_new again\n def gather_from_enc_new(indices):\n batch_indices, seq_indices = zip(*indices)\n return enc_new[torch.LongTensor(batch_indices), torch.LongTensor(seq_indices)]\n\n q_enc_new = batched_sequence.PackedSequencePlus.from_gather(\n lengths=q_enc_lengths,\n map_index=lambda batch_idx, seq_idx: (batch_idx, seq_idx),\n gather_from_indices=gather_from_enc_new)\n c_enc_new = batched_sequence.PackedSequencePlus.from_gather(\n lengths=c_enc_lengths,\n map_index=lambda batch_idx, seq_idx: (batch_idx, q_enc_lengths[batch_idx] + seq_idx),\n gather_from_indices=gather_from_enc_new)\n t_enc_new = batched_sequence.PackedSequencePlus.from_gather(\n lengths=t_enc_lengths,\n map_index=lambda batch_idx, seq_idx: (batch_idx, q_enc_lengths[batch_idx] + c_enc_lengths[batch_idx] + seq_idx),\n gather_from_indices=gather_from_enc_new)\n return q_enc_new, c_enc_new, t_enc_new\n\n def compute_relations(self, desc, enc_length, q_enc_length, c_enc_length, c_boundaries, t_boundaries):\n # Catalogue which things are where\n loc_types = {}\n for i in range(q_enc_length):\n loc_types[i] = ('question', 0, i)\n\n c_base = q_enc_length\n for c_id, (c_start, c_end) in enumerate(zip(c_boundaries, c_boundaries[1:])):\n for i in range(c_start + c_base, c_end + c_base):\n loc_types[i] = ('column', c_id, i - c_start - c_base)\n t_base = q_enc_length + c_enc_length\n for t_id, (t_start, t_end) in enumerate(zip(t_boundaries, t_boundaries[1:])):\n for i in range(t_start + t_base, t_end + t_base):\n loc_types[i] = ('table', t_id, i - t_start - t_base)\n \n relations = np.empty((enc_length, enc_length), dtype=np.int64)\n\n for i, j in itertools.product(range(enc_length),repeat=2):\n i_type, i_index, i_token_index = loc_types[i]\n j_type, j_index, j_token_index = loc_types[j]\n\n relation_name = None\n for provider in self.relation_providers:\n relation_name = provider.compute_relation(\n desc, i_type, i_index, i_token_index, j_type, j_index, j_token_index)\n if relation_name is not None:\n relations[i, j] = self.relation_ids[relation_name]\n break\n if relation_name is None:\n raise ValueError('No relation provider worked')\n return relations\n\n\nclass NoOpUpdate:\n def __init__(self, device, hidden_size):\n pass\n\n def __call__(self, desc, q_enc, c_enc, c_boundaries, t_enc, t_boundaries):\n #return q_enc.transpose(0, 1), c_enc.transpose(0, 1), t_enc.transpose(0, 1)\n return q_enc, c_enc, t_enc\n\n" ]
[ [ "torch.LongTensor", "torch.nn.Dropout", "numpy.pad", "torch.cat", "torch.nn.LSTM", "numpy.cumsum", "numpy.stack", "torch.tensor", "torch.nn.Linear", "numpy.empty" ] ]
cwharris/custrings
[ "78b52db8a07163e170b6728217b2e1e85d6b9007" ]
[ "python/tests/test_strip.py" ]
[ "# Copyright (c) 2018-2019, NVIDIA CORPORATION.\n\nimport pandas as pd\nimport nvstrings\n\nfrom utils import assert_eq\n\n\ndef test_strip():\n s = [\" hello \", \" there \", \" world \", None, \" accénté \", \"\"]\n strs = nvstrings.to_device(s)\n pstrs = pd.Series(s)\n got = strs.strip()\n expected = pstrs.str.strip()\n assert_eq(got.to_host(), expected)\n\n got = strs.strip().strip('é')\n expected = pstrs.str.strip().str.strip('é')\n assert_eq(got.to_host(), expected)\n\n got = strs.strip(' e')\n expected = pstrs.str.strip(' e')\n assert_eq(got.to_host(), expected)\n\n\ndef test_lstrip():\n s = [\" hello \", \" there \", \" world \", None, \" accénté \", \"\"]\n strs = nvstrings.to_device(s)\n pstrs = pd.Series(s)\n got = strs.lstrip()\n expected = pstrs.str.lstrip()\n assert_eq(got.to_host(), expected)\n\n\ndef test_rstrip():\n s = [\" hello \", \" there \", \" world \", None, \" accénté \", \"\"]\n strs = nvstrings.to_device(s)\n pstrs = pd.Series(s)\n got = strs.rstrip()\n expected = pstrs.str.rstrip()\n assert_eq(got.to_host(), expected)\n" ]
[ [ "pandas.Series" ] ]
krai/ck-qaic
[ "c84964bb019fe1b12ba952fafd92274ecdc64380" ]
[ "package/model-qaic-calibrate/ssd-resnet34/modifications/aimet_mod.py" ]
[ "import types\nfrom typing import Tuple, List, Union, Dict\nimport copy\n\nimport numpy as np\nimport torch\n\nimport libpymo\nimport aimet_torch\nimport aimet_torch.onnx_utils\nfrom aimet_torch import utils\nimport aimet_torch.bias_correction\nfrom aimet_torch.qc_quantize_op import QcQuantizeWrapper, QcPostTrainingWrapper\nfrom aimet_common.defs import QuantScheme\n\nimport models.ssd_r34 as ssd_r34\nfrom modifications.ssd_r34_mod import Concat as ConcatMod\naimet_torch.onnx_utils.map_torch_types_to_onnx[ConcatMod] = 'Concat'\n\ndef my_compute_encoding_for_given_bitwidth(data: np.ndarray, bitwidth: int, quant_scheme: QuantScheme,\n is_symmetric: bool) -> Dict:\n \"\"\"\n Return encoding dictionary for given bitwidth\n :param data: Numpy data\n :param bitwidth: bitwidth (4-31) to use for quantizing data\n :param quant_scheme: Quantization scheme\n :param is_symmetric: True if symmetric encodings is used, False otherwise\n :return: Encoding Dictionary\n \"\"\"\n # Create Encodings Analyzer and collect statistical data to compute encodings\n # Since the data is numpy array and on CPU memory, useCuda is False\n encoding_analyzer = libpymo.EncodingAnalyzerForPython(quant_scheme)\n encoding_analyzer.updateStats(data, False)\n\n encoding, is_encoding_valid = encoding_analyzer.computeEncoding(bitwidth, is_symmetric, False, False)\n\n if is_encoding_valid:\n return {'min': encoding.min,\n 'max': encoding.max,\n 'scale': encoding.delta,\n 'offset': int(encoding.offset),\n 'bitwidth': encoding.bw,\n 'is_symmetric': str(is_symmetric)}\n\n return {}\n\ndef my_map_onnx_nodes_to_pytorch(torch_model: torch.nn.Module, dummy_input,\n onnx_ordered_list):\n torch_ordered_list = aimet_torch.utils.get_ordered_list_of_modules(torch_model, dummy_input)\n\n torch_index = 0\n onnx_index = 0\n\n num_onnx_nodes_to_map_to_same_torch_node = 0\n while torch_index < len(torch_ordered_list):\n # If few PyTorch ops are not mapped to ONNX ops\n if onnx_index >= len(onnx_ordered_list):\n aimet_torch.onnx_utils._logger.warning('All ONNX ops were exhausted but few PyTorch ops did not get mapped to a '\n 'corresponding ONNX op')\n break\n name, module = torch_ordered_list[torch_index]\n\n if isinstance(module, tuple(aimet_torch.onnx_utils.torch_types_to_ignore)):\n torch_index += 1\n continue\n if onnx_ordered_list[onnx_index].op_type in 'Concat' and len(onnx_ordered_list[onnx_index].input) < 6:\n pass \n elif onnx_ordered_list[onnx_index].op_type in aimet_torch.onnx_utils.map_torch_types_to_onnx[type(module)]:\n aimet_torch.onnx_utils._logger.debug('Found a match: %r -> %r', onnx_ordered_list[onnx_index].op_type, name)\n onnx_ordered_list[onnx_index].name = name\n\n if num_onnx_nodes_to_map_to_same_torch_node == 0:\n num_onnx_nodes_to_map_to_same_torch_node = aimet_torch.onnx_utils.OnnxSaver.get_num_onnx_nodes_to_map(module)\n\n num_onnx_nodes_to_map_to_same_torch_node = num_onnx_nodes_to_map_to_same_torch_node - 1\n if num_onnx_nodes_to_map_to_same_torch_node == 0:\n torch_index += 1\n\n onnx_index += 1\n\ndef my_update_param_encodings_dict_for_layer(self, layer: torch.nn.Module, layer_name: str,\n\t\t\t\t\t\t\t\t\t\t\tparam_encodings: Dict, valid_param_set: set):\n\t\"\"\"\n\t:param layer: layer as torch.nn.Module\n\t:param layer_name : Name of the layer\n\t:param param_encodings: dictionary of param encodings\n\t:param valid_param_set: a set of valid param input names in model\n\t\"\"\"\n\n\tdisabled_param_quantizers = []\n\tfor orig_param_name, param_quantizer in layer.param_quantizers.items():\n\t\tparam_name = layer_name + '.' + orig_param_name\n\t\tif param_quantizer.enabled:\n\t\t\tif param_name in valid_param_set:\n\t\t\t\tencoding = utils.create_encoding_dict(param_quantizer.encoding,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tparam_quantizer.use_symmetric_encodings)\n\t\t\t\tparam_encodings[param_name] = [encoding]\n\t\t\telse:\n\t\t\t\tlogger.error('Param tensor {%s} not found in valid param set', param_name)\n\t\telse:\n\t\t\tdisabled_param_quantizers.append(orig_param_name)\n\n\t# retrieve the appropriate param generator\n\tif isinstance(layer, QcQuantizeWrapper):\n\t\t# pylint: disable=protected-access\n\t\tnamed_parameters = layer._module_to_wrap.named_parameters()\n\telse:\n\t\tnamed_parameters = layer.named_parameters(recurse=False)\n\n\tfor name, param in named_parameters:\n\t\t# if the param quantizer was disabled generate encoding assuming bitwidth of 32\n\t\tif name in disabled_param_quantizers:\n\t\t\tparam_name = layer_name + '.' + name\n\t\t\tparam_quantizer = layer.param_quantizers[name]\n\t\t\tparam_data = param.cpu().detach().numpy()\n\t\t\tencoding = utils.compute_encoding_for_given_bitwidth(param_data, 31, param_quantizer.quant_scheme,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tparam_quantizer.use_symmetric_encodings)\n\t\t\tparam_encodings[param_name] = [encoding]\n\ndef my_correct_bias(model: torch.nn.Module, quant_params,\n num_quant_samples: int, data_loader, num_bias_correct_samples: int,\n conv_bn_dict = None,\n perform_only_empirical_bias_corr: bool = True,\n layers_to_ignore = None,\n quantizer_modifications=None):\n\n if layers_to_ignore is None:\n layers_to_ignore = []\n\n # Find batch size and shape of input tensor\n batch_size, input_shape = aimet_torch.utils.get_input_shape_batch_size(data_loader)\n\n # Rounding up number of samples to batch size\n n_batches_bias_correction = int(np.ceil(num_bias_correct_samples / batch_size))\n n_batches_quantization = int(np.ceil(num_quant_samples / batch_size))\n\n data_loader_n_samples_bias_corr = aimet_torch.utils.IterFirstX(data_loader, n_batches_bias_correction)\n data_loader_n_samples_quant = aimet_torch.utils.IterFirstX(data_loader, n_batches_quantization)\n\n # TODO: Remove wrapper function\n # Create a wrapping function for data loader for quantization\n def pass_data_through_model(model, early_stopping_iterations=None, use_cuda=False):\n # pylint: disable=unused-argument\n # forward pass for given number of batches for model\n for (images_in_one_batch, _) in data_loader_n_samples_quant:\n aimet_torch.bias_correction.forward_pass(model, images_in_one_batch)\n\n ordered_conv_linear_nodes = aimet_torch.utils.get_ordered_lists_of_conv_fc(model, input_shape)\n\n if conv_bn_dict is None:\n conv_bn_dict = aimet_torch.bias_correction.find_all_conv_bn_with_activation(model, input_shape)\n\n # Create a copy of the model as reference model\n model_copy = copy.deepcopy(model)\n\n # Add bias for all the layers whose bias is None\n for name, module in ordered_conv_linear_nodes:\n if module.bias is None:\n if isinstance(module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):\n output_size = module.out_channels\n elif isinstance(module, torch.nn.Linear):\n output_size = module.out_features\n module.bias = torch.nn.Parameter(torch.zeros(output_size))\n module.bias.data = module.bias.data.to(device=module.weight.device)\n\n # Quantize full model\n dummy_tensors = aimet_torch.utils.create_rand_tensors_given_shapes(input_shape)\n dummy_tensors = [tensor.to(aimet_torch.utils.get_device(model)) for tensor in dummy_tensors]\n q = aimet_torch.quantsim.QuantizationSimModel(model=model, quant_scheme=quant_params.quant_scheme,\n rounding_mode=quant_params.round_mode,\n default_output_bw=quant_params.act_bw,\n default_param_bw=quant_params.weight_bw,\n in_place=True,\n dummy_input=dummy_tensors, config_file=quant_params.config_file)\n\n # make sure model got updated in-place before we use it for bc updates\n assert(q.model is model)\n\n if quantizer_modifications is not None:\n quantizer_modifications(q)\n\n # updates to skip_output_activation and layers_to_ignore\n for name, module in model.named_modules():\n # Skip all layer's output quantization\n if isinstance(module, QcQuantizeWrapper):\n module.output_quantizers[0].enabled = False\n\n q.compute_encodings(pass_data_through_model, None)\n\n # For first conv layer, perform analytical bc if perform_only_empirical_bias_corr is set to False\n # and layer is not marked to be ignored during bc.\n if not perform_only_empirical_bias_corr:\n module_name, module = ordered_conv_linear_nodes[0]\n if module not in layers_to_ignore:\n aimet_torch.bias_correction.logger.info('Correcting layer %s using Analytical Bias Correction', module_name)\n quantize_layer = aimet_torch.utils.get_layer_by_name(model, module_name)\n aimet_torch.bias_correction.call_analytical_mo_correct_bias(quantize_layer, None, None)\n aimet_torch.bias_correction.logger.info('Corrected bias for the layer')\n ordered_conv_linear_nodes.pop(0)\n\n for module_name, module in ordered_conv_linear_nodes:\n # Ignore all layers which are skipped by user\n if module in layers_to_ignore or module_name in layers_to_ignore:\n continue\n else:\n # make sure module is in the model used by qsim.\n assert(module in list(q.model.modules()))\n # Analytical Bias Correction is only done for Conv layers\n reference_layer = aimet_torch.utils.get_layer_by_name(model_copy, module_name)\n quantize_layer = aimet_torch.utils.get_layer_by_name(model, module_name)\n\n if module in conv_bn_dict.keys():\n\n bn_layer_info = conv_bn_dict[module]\n\n if perform_only_empirical_bias_corr or bn_layer_info is None or bn_layer_info.input_bn is None:\n aimet_torch.bias_correction.logger.info('Correcting layer %s using Empirical Bias Correction', module_name)\n bias_correction = libpymo.BiasCorrection()\n\n # Get output from quantized model and reference model\n\n for images_in_one_batch, _ in data_loader_n_samples_bias_corr:\n reference_output_batch = aimet_torch.bias_correction.get_output_data(reference_layer, model_copy, images_in_one_batch)\n quantized_model_output_batch = aimet_torch.bias_correction.get_output_data(quantize_layer, model, images_in_one_batch)\n\n if isinstance(reference_layer, torch.nn.Linear):\n extended_shape = np.concatenate((reference_output_batch.shape, np.array([1, 1])))\n reference_output_batch = reference_output_batch.reshape(extended_shape)\n quantized_model_output_batch = quantized_model_output_batch.reshape(extended_shape)\n\n bias_correction.storePreActivationOutput(reference_output_batch)\n bias_correction.storeQuantizedPreActivationOutput(quantized_model_output_batch)\n\n aimet_torch.bias_correction.call_empirical_mo_correct_bias(module, bias_correction)\n\n else:\n aimet_torch.bias_correction.logger.info('Correcting layer %s using Analytical Bias Correction', module_name)\n aimet_torch.bias_correction.call_analytical_mo_correct_bias(quantize_layer, bn_layer_info.input_bn,\n bn_layer_info.in_activation_type)\n\n aimet_torch.bias_correction.logger.info('Corrected bias for the layer')\n\n aimet_torch.save_utils.SaveUtils.remove_quantization_wrappers(model)\n\n aimet_torch.bias_correction.logger.info('Completed bias correction')\n\ndef quantizer_modifications(quantizer):\n from modifications import ssd_r34_mod\n quantizer._update_param_encodings_dict_for_layer = types.MethodType(my_update_param_encodings_dict_for_layer, quantizer)\n\n asymmetric_weight_quantization = [\n 'model.layer2.0.5.conv1',\n 'model.layer2.0.5.conv2'\n ]\n asymmetric_actvn_quantization = [\n 'catModule1',\n 'catModule2',\n 'conf.0',\n 'conf.1',\n 'conf.3',\n 'model.layer2.0.4.relu2',\n 'model.layer2.0.5.conv1',\n 'model.layer2.0.5.conv2'\n ]\n\n for name,module in quantizer.model.named_modules():\n if name in asymmetric_weight_quantization:\n module.param_quantizers['weight'].use_symmetric_encodings = False\n if name in asymmetric_actvn_quantization:\n module.output_quantizer.use_symmetric_encodings = False\n\nutils.compute_encoding_for_given_bitwidth = my_compute_encoding_for_given_bitwidth\naimet_torch.onnx_utils.OnnxSaver.map_onnx_nodes_to_pytorch = my_map_onnx_nodes_to_pytorch\naimet_torch.bias_correction.correct_bias = my_correct_bias\n" ]
[ [ "numpy.ceil", "numpy.array", "torch.zeros" ] ]
monodera/seir_model
[ "1a5a9880b2124f87280af8998a3ebc43c2b07665" ]
[ "main.py" ]
[ "#!/usr/bin/env python\n\n# %%\nimport numpy as np\nfrom scipy.integrate import solve_ivp\nfrom bokeh.layouts import column, row\nfrom bokeh.models import CustomJS, Slider, Range1d\nfrom bokeh.plotting import (\n ColumnDataSource,\n figure,\n output_file,\n show,\n save,\n curdoc,\n)\nfrom bokeh.io.doc import set_curdoc\n\n# %%\ndef seir_ode(t, y, beta, gamma, sigma, mu, nu):\n # y[0] : susceptible (S)\n # y[1] : exposed (E)\n # y[2] : infected (I)\n # y[3] : resistant (R)\n\n ntot = y[0] + y[1] + y[2] + y[3]\n array_return = [\n mu * (ntot - y[0]) - beta * y[0] * y[2] / ntot - nu * y[0],\n beta * y[0] * y[2] / ntot - (mu + sigma) * y[1],\n sigma * y[1] - (mu + gamma) * y[2],\n gamma * y[2] - mu * y[3] + nu * y[0],\n ]\n return array_return\n\n\n# %%\ndef solve_seir_model(beta, gamma, sigma, mu, nu, y0, t_span):\n\n n_eval = 101\n\n t_eval = np.linspace(t_span[0], t_span[1], n_eval)\n\n sol = solve_ivp(\n seir_ode, t_span, y0, t_eval=t_eval, args=(beta, gamma, sigma, mu, nu)\n )\n\n return sol\n\n\ndef plot_with_bokeh():\n\n beta = 0.8\n gamma = 0.1\n sigma = 0.5\n mu = 0.0\n nu = 0.0\n y0 = [10, 1, 0, 0]\n t_span = [0, 30] # days\n\n sol = solve_seir_model(beta, gamma, sigma, mu, nu, y0, t_span)\n\n x = sol.t\n y_s, y_e, y_i, y_r = sol.y\n\n source = ColumnDataSource(data=dict(x=x, y_s=y_s, y_e=y_e, y_i=y_i, y_r=y_r,))\n\n p = figure(\n plot_width=800, plot_height=600, x_axis_label=\"Days\", y_axis_label=\"Population\",\n )\n\n p.line(\n \"x\",\n \"y_s\",\n source=source,\n line_width=3,\n color=\"orange\",\n legend_label=\"Susceptible\",\n )\n p.line(\n \"x\",\n \"y_e\",\n source=source,\n line_width=3,\n color=\"dodgerblue\",\n legend_label=\"Exposed\",\n )\n p.line(\n \"x\",\n \"y_i\",\n source=source,\n line_width=3,\n color=\"orangered\",\n legend_label=\"Infected\",\n )\n p.line(\n \"x\",\n \"y_r\",\n source=source,\n line_width=3,\n color=\"seagreen\",\n legend_label=\"Resistant\",\n )\n\n slider_beta = Slider(start=0.0, end=1, value=0.8, step=0.1, title=\"\\u03B2\",)\n slider_gamma = Slider(start=0.0, end=1, value=0.1, step=0.1, title=\"\\u03B3\")\n slider_sigma = Slider(start=0.0, end=1, value=0.5, step=0.1, title=\"\\u03C3\")\n slider_mu = Slider(start=0.0, end=1, value=0.0, step=0.1, title=\"\\u03BC\")\n slider_nu = Slider(start=0.0, end=1, value=0.0, step=0.1, title=\"\\u03BD\")\n\n slider_s = Slider(start=0, end=100, value=10, step=1, title=\"N(Susceptible)\")\n slider_e = Slider(start=0, end=100, value=1, step=1, title=\"N(Exposed)\")\n slider_i = Slider(start=0, end=100, value=0, step=1, title=\"N(Infected)\")\n slider_r = Slider(start=0, end=100, value=0, step=1, title=\"N(Recovered)\")\n slider_t = Slider(start=0, end=100, value=30, step=1, title=\"Duration (days)\")\n\n def callback_beta(attr, old, new):\n sol = solve_seir_model(new, gamma, sigma, mu, nu, y0, t_span)\n source.data[\"x\"] = sol.t\n source.data[\"y_s\"] = sol.y[0]\n source.data[\"y_e\"] = sol.y[1]\n source.data[\"y_i\"] = sol.y[2]\n source.data[\"y_r\"] = sol.y[3]\n\n def callback_gamma(attr, old, new):\n sol = solve_seir_model(beta, new, sigma, mu, nu, y0, t_span)\n source.data[\"x\"] = sol.t\n source.data[\"y_s\"] = sol.y[0]\n source.data[\"y_e\"] = sol.y[1]\n source.data[\"y_i\"] = sol.y[2]\n source.data[\"y_r\"] = sol.y[3]\n\n def callback_sigma(attr, old, new):\n sol = solve_seir_model(beta, gamma, new, mu, nu, y0, t_span)\n source.data[\"x\"] = sol.t\n source.data[\"y_s\"] = sol.y[0]\n source.data[\"y_e\"] = sol.y[1]\n source.data[\"y_i\"] = sol.y[2]\n source.data[\"y_r\"] = sol.y[3]\n\n def callback_mu(attr, old, new):\n sol = solve_seir_model(beta, gamma, sigma, new, nu, y0, t_span)\n source.data[\"x\"] = sol.t\n source.data[\"y_s\"] = sol.y[0]\n source.data[\"y_e\"] = sol.y[1]\n source.data[\"y_i\"] = sol.y[2]\n source.data[\"y_r\"] = sol.y[3]\n\n def callback_nu(attr, old, new):\n sol = solve_seir_model(beta, gamma, sigma, mu, new, y0, t_span)\n source.data[\"x\"] = sol.t\n source.data[\"y_s\"] = sol.y[0]\n source.data[\"y_e\"] = sol.y[1]\n source.data[\"y_i\"] = sol.y[2]\n source.data[\"y_r\"] = sol.y[3]\n\n def callback_s(attr, old, new):\n y0[0] = new\n sol = solve_seir_model(beta, gamma, sigma, mu, nu, y0, t_span)\n source.data[\"x\"] = sol.t\n source.data[\"y_s\"] = sol.y[0]\n source.data[\"y_e\"] = sol.y[1]\n source.data[\"y_i\"] = sol.y[2]\n source.data[\"y_r\"] = sol.y[3]\n\n def callback_e(attr, old, new):\n y0[1] = new\n sol = solve_seir_model(beta, gamma, sigma, mu, nu, y0, t_span)\n source.data[\"x\"] = sol.t\n source.data[\"y_s\"] = sol.y[0]\n source.data[\"y_e\"] = sol.y[1]\n source.data[\"y_i\"] = sol.y[2]\n source.data[\"y_r\"] = sol.y[3]\n\n def callback_i(attr, old, new):\n y0[2] = new\n sol = solve_seir_model(beta, gamma, sigma, mu, nu, y0, t_span)\n source.data[\"x\"] = sol.t\n source.data[\"y_s\"] = sol.y[0]\n source.data[\"y_e\"] = sol.y[1]\n source.data[\"y_i\"] = sol.y[2]\n source.data[\"y_r\"] = sol.y[3]\n\n def callback_r(attr, old, new):\n y0[3] = new\n sol = solve_seir_model(beta, gamma, sigma, mu, nu, y0, t_span)\n source.data[\"x\"] = sol.t\n source.data[\"y_s\"] = sol.y[0]\n source.data[\"y_e\"] = sol.y[1]\n source.data[\"y_i\"] = sol.y[2]\n source.data[\"y_r\"] = sol.y[3]\n\n def callback_t(attr, old, new):\n t_span[1] = new\n sol = solve_seir_model(beta, gamma, sigma, mu, nu, y0, t_span)\n source.data[\"x\"] = sol.t\n source.data[\"y_s\"] = sol.y[0]\n source.data[\"y_e\"] = sol.y[1]\n source.data[\"y_i\"] = sol.y[2]\n source.data[\"y_r\"] = sol.y[3]\n\n slider_beta.on_change(\"value\", callback_beta)\n slider_gamma.on_change(\"value\", callback_gamma)\n slider_sigma.on_change(\"value\", callback_sigma)\n slider_mu.on_change(\"value\", callback_mu)\n slider_nu.on_change(\"value\", callback_nu)\n\n slider_s.on_change(\"value\", callback_s)\n slider_e.on_change(\"value\", callback_e)\n slider_i.on_change(\"value\", callback_i)\n slider_r.on_change(\"value\", callback_r)\n\n slider_t.on_change(\"value\", callback_t)\n\n # draw_plot()\n\n sliders_params = column(\n slider_beta, slider_gamma, slider_sigma, slider_mu, slider_nu\n )\n sliders_inits = column(slider_s, slider_e, slider_i, slider_r, slider_t)\n layout = column(p, row(sliders_params, sliders_inits),)\n\n curdoc().add_root(layout)\n\n\nplot_with_bokeh()\n" ]
[ [ "scipy.integrate.solve_ivp", "numpy.linspace" ] ]
itouchz/Conversational-AutoML
[ "01b7f8428d517a6d2b91f2a72e672c1b3627a992" ]
[ "server/AutoML.py" ]
[ "from flask import Flask, request, send_from_directory\nfrom flask_cors import CORS\nfrom tensorflow.keras.models import load_model\n\nimport tensorflow as tf\nimport autokeras as ak\nimport numpy as np\n\nimport dl_model as dl\nimport ml_model as ml\n\nimport os\nimport joblib\n\napp = Flask(__name__)\nCORS(app)\n\n\[email protected]('/', methods=['GET'])\ndef index():\n return 'Conversational AutoML Server~!'\n\[email protected]('/img/<path:filename>', methods=['GET', 'POST'])\ndef img_download(filename):\n uploads = os.path.join(app.root_path, 'upload/')\n return send_from_directory(directory=uploads, filename=filename)\n\[email protected]('/download/<path:filename>', methods=['GET', 'POST'])\ndef download(filename):\n uploads = os.path.join(app.root_path, 'models/')\n return send_from_directory(directory=uploads, filename=filename)\n\n\[email protected]('/upload', methods=['POST'])\ndef upload():\n file = request.files['file']\n file.save('./upload/' + file.filename)\n results = rb.upload_success(request.form['current_state'], file.filename)\n return {'sender': 'Bot', 'text': 'Successfully uploaded! <br/ >' + results[0], 'current_state': results[1], 'user_slot': results[2]}\n\n\[email protected]('/image/get_prediction', methods=['POST'])\ndef get_image_prediction():\n file = request.files['file']\n file.save('./upload/' + file.filename)\n\n model_name = request.form['model']\n label_name = request.form['target'].split(',')\n [task, _, method, _] = model_name.split('_')\n text = ''\n\n img = tf.keras.preprocessing.image.load_img(\"./upload/\" + file.filename, target_size=(32, 32))\n img_array = tf.keras.preprocessing.image.img_to_array(img)\n img_array = tf.expand_dims(img_array, 0)\n\n print(label_name)\n if method == 'dl':\n if task == 'cls':\n if len(label_name) == 2:\n model = load_model(\n model_name, custom_objects=ak.CUSTOM_OBJECTS)\n pred = np.floor(model.predict([img_array]))\n text += \"It's a \" + label_name[pred[0]] + \".\"\n else:\n model = load_model(\n model_name, custom_objects=ak.CUSTOM_OBJECTS)\n pred = np.argmax(model.predict([img_array]), axis=1)\n text += \"It's a \" + label_name[pred[0]] + \".\"\n else:\n model = load_model(model_name, custom_objects=ak.CUSTOM_OBJECTS)\n pred = model.predict([img_array])\n text += \"The predicted \" + label_name + \" is \" + pred + \".\"\n else:\n img_array = img_array.numpy().reshape(1,-1)\n if task == 'cls':\n model = joblib.load(model_name)\n pred = model.predict([img_array])\n text += \"It's a \" + label_name[pred[0]] + \".\"\n else:\n model = joblib.load(model_name)\n pred = model.predict([img_array])\n text += \"The predicted \" + label_name + \" is \" + pred + \".\"\n\n response = {\n 'sender': 'Bot',\n 'text': text, \n 'img_name': file.filename\n }\n\n return response\n\n\[email protected]('/text/get_prediction', methods=['POST'])\ndef get_text_prediction():\n model_name = request.get_json(force=True)['model']\n label_name = request.get_json(force=True)['target']\n input_text = request.get_json(force=True)['input_text']\n [task, _, method, _] = model_name.split('_')\n text = ''\n\n print(label_name)\n if method == 'dl':\n if task == 'cls':\n if len(label_name) == 2:\n model = load_model(\n model_name, custom_objects=ak.CUSTOM_OBJECTS)\n pred = np.round(model.predict([input_text])).astype(int)\n print(pred)\n text += \"It's \" + label_name[pred[0][0]] + \".\"\n else:\n model = load_model(\n model_name, custom_objects=ak.CUSTOM_OBJECTS)\n pred = np.argmax(model.predict([input_text]), axis=1).astype(int)\n text += \"It's \" + label_name[pred[0][0]] + \".\"\n else:\n model = load_model(model_name, custom_objects=ak.CUSTOM_OBJECTS)\n pred = model.predict([input_text])\n text += \"The predicted \" + label_name + \" is \" + pred + \".\"\n else:\n if task == 'cls':\n model = joblib.load(model_name)\n pred = model.predict([input_text])\n text += \"It's \" + label_name[pred[0]] + \".\"\n else:\n model = joblib.load(model_name)\n pred = model.predict([input_text])\n text += \"The predicted \" + label_name + \" is \" + pred + \".\"\n\n response = {\n 'sender': 'Bot',\n 'text': text\n }\n\n return response\n\n\[email protected]('/get_model', methods=['POST'])\ndef get_model():\n data = request.get_json(force=True)\n\n if data['user_slot']['method'] == 'dl':\n results = dl.get_model(data['user_slot'])\n else:\n results = ml.get_model(data['user_slot'])\n\n response = {\n 'model': results['model'],\n 'score': results['score'],\n 'metric': results['metric'],\n 'summary': results['summary']\n }\n\n return response\n" ]
[ [ "tensorflow.expand_dims", "tensorflow.keras.models.load_model", "tensorflow.keras.preprocessing.image.img_to_array", "tensorflow.keras.preprocessing.image.load_img" ] ]
gomesfernanda/lxmls_lab
[ "74b60b9e79aaa2994aee9428b623c04e93807bda" ]
[ "lxmls/parsing/dependency_features.py" ]
[ "import sys\nimport numpy as np\nfrom scipy.sparse import lil_matrix\nfrom lxmls.parsing.dependency_reader import *\n\n\nclass DependencyFeatures:\n \"\"\"\n Dependency features class\n \"\"\"\n\n def __init__(self, use_lexical=False, use_distance=False, use_contextual=False):\n self.feat_dict = {}\n self.n_feats = 0\n self.use_lexical = use_lexical\n self.use_distance = use_distance\n self.use_contextual = use_contextual\n\n def create_dictionary(self, instances):\n \"\"\"Creates dictionary of features (note: only uses supported features)\"\"\"\n self.feat_dict = {}\n self.n_feats = 0\n for instance in instances:\n nw = np.size(instance.words) - 1\n heads = instance.heads\n for m in range(1, nw+1):\n h = heads[m]\n self.create_arc_features(instance, h, m, True)\n\n print(\"Number of features: {0}\".format(self.n_feats))\n\n def create_features(self, instance):\n \"\"\"Creates arc features from an instance.\"\"\"\n nw = np.size(instance.words) - 1\n feats = np.empty((nw+1, nw+1), dtype=object)\n for h in range(0, nw+1):\n for m in range(1, nw+1):\n if h == m:\n feats[h][m] = []\n continue\n feats[h][m] = self.create_arc_features(instance, h, m)\n\n return feats\n\n def create_arc_features(self, instance, h, m, add=False):\n \"\"\"Creates features for arc h-->m.\"\"\"\n nw = np.size(instance.words)\n k = 0\n ff = []\n if h < m:\n att_dir = 1 # Right attachment\n dist = m - h # Distance\n else:\n att_dir = 0 # Left attachment\n dist = h - m # Distance\n if dist > 10:\n dist = 10\n elif dist > 5:\n dist = 5\n\n if h == 0:\n hpp = \"__START__\"\n else:\n hpp = instance.pos[h-1]\n if h == nw-1:\n hpn = \"__END__\"\n else:\n hpn = instance.pos[h+1]\n if m == 0:\n mpp = \"__START__\"\n else:\n mpp = instance.pos[m-1]\n if m == nw-1:\n mpn = \"__END__\"\n else:\n mpn = instance.pos[m+1]\n\n # Head pos, modifier pos\n f = self.lookup_fid(\"{0}_{1}_{2}\".format(k, instance.pos[h], instance.pos[m]), add)\n ff.append(f)\n k += 1\n # Head pos\n f = self.lookup_fid(\"{0}_{1}\".format(k, instance.pos[h]), add)\n ff.append(f)\n k += 1\n\n if self.use_lexical:\n # Head word+pos, modifier word+pos\n f = self.lookup_fid(\n \"{0}_{1}_{2}_{3}_{4}\".format(\n k, instance.words[h],\n instance.pos[h],\n instance.words[m],\n instance.pos[m]),\n add)\n ff.append(f)\n k += 1\n # Head word+pos, modifier pos\n f = self.lookup_fid(\"{0}_{1}_{2}_{3}\".format(k, instance.words[h], instance.pos[h], instance.pos[m]), add)\n ff.append(f)\n k += 1\n # Head pos, modifier word\n f = self.lookup_fid(\"{0}_{1}_{2}_{3}\".format(k, instance.pos[h], instance.words[m], instance.pos[m]), add)\n ff.append(f)\n k += 1\n # Head word+pos\n f = self.lookup_fid(\"{0}_{1}_{2}\".format(k, instance.words[h], instance.pos[h]), add)\n ff.append(f)\n k += 1\n\n if self.use_distance:\n # Direction of attachment\n f = self.lookup_fid(\"{0}_{1}\".format(k, att_dir), add)\n ff.append(f)\n k += 1\n\n # Distance\n f = self.lookup_fid(\"{0}_{1}\".format(k, dist), add)\n ff.append(f)\n k += 1\n\n if self.use_contextual:\n # Contextual features\n # Head pos+posl, modifier pos, dir\n f = self.lookup_fid(\"{0}_{1}_{2}_{3}_{4}\".format(k, instance.pos[h], hpp, instance.pos[m], att_dir), add)\n ff.append(f)\n k += 1\n # Head pos+posr, modifier pos, dir\n f = self.lookup_fid(\"{0}_{1}_{2}_{3}_{4}\".format(k, instance.pos[h], hpn, instance.pos[m], att_dir), add)\n ff.append(f)\n k += 1\n # Head pos+posl+posr, modifier pos, dir\n f = self.lookup_fid(\n \"{0}_{1}_{2}_{3}_{4}_{5}\".format(\n k,\n instance.pos[h],\n hpp,\n hpn,\n instance.pos[m],\n att_dir),\n add)\n ff.append(f)\n k += 1\n # Head pos, modifier pos+posl, dir\n f = self.lookup_fid(\"{0}_{1}_{2}_{3}_{4}\".format(k, instance.pos[h], instance.pos[m], mpp, att_dir), add)\n ff.append(f)\n k += 1\n # Head pos, modifier pos+posr, dir\n f = self.lookup_fid(\"{0}_{1}_{2}_{3}_{4}\".format(k, instance.pos[h], instance.pos[m], mpn, att_dir), add)\n ff.append(f)\n k += 1\n # Head pos, modifier pos+posl+posr, dir\n f = self.lookup_fid(\n \"{0}_{1}_{2}_{3}_{4}_{5}\".format(\n k,\n instance.pos[h],\n instance.pos[m],\n mpp,\n mpn,\n att_dir),\n add)\n ff.append(f)\n k += 1\n # Head pos+posl, modifier pos+posl, dir\n f = self.lookup_fid(\n \"{0}_{1}_{2}_{3}_{4}_{5}\".format(\n k,\n instance.pos[h],\n hpp,\n instance.pos[m],\n mpp,\n att_dir),\n add)\n ff.append(f)\n k += 1\n # Head pos+posl, modifier pos+posr, dir\n f = self.lookup_fid(\n \"{0}_{1}_{2}_{3}_{4}_{5}\".format(\n k,\n instance.pos[h],\n hpp,\n instance.pos[m],\n mpn,\n att_dir),\n add)\n ff.append(f)\n k += 1\n # Head pos+posr, modifier pos+posl, dir\n f = self.lookup_fid(\n \"{0}_{1}_{2}_{3}_{4}_{5}\".format(\n k,\n instance.pos[h],\n hpn,\n instance.pos[m],\n mpp,\n att_dir),\n add)\n ff.append(f)\n k += 1\n # Head pos+posr, modifier pos+posr, dir\n f = self.lookup_fid(\n \"{0}_{1}_{2}_{3}_{4}_{5}\".format(\n k,\n instance.pos[h],\n hpn,\n instance.pos[m],\n mpn,\n att_dir),\n add)\n ff.append(f)\n k += 1\n # Head pos+posl, modifier pos+posl+posr, dir\n f = self.lookup_fid(\n \"{0}_{1}_{2}_{3}_{4}_{5}_{6}\".format(\n k,\n instance.pos[h],\n hpp,\n instance.pos[m],\n mpp,\n mpn,\n att_dir),\n add)\n ff.append(f)\n k += 1\n # Head pos+posr, modifier pos+posl+posr, dir\n f = self.lookup_fid(\n \"{0}_{1}_{2}_{3}_{4}_{5}_{6}\".format(\n k,\n instance.pos[h],\n hpn,\n instance.pos[m],\n mpp,\n mpn,\n att_dir),\n add)\n ff.append(f)\n k += 1\n # Head pos+posl+posr, modifier pos+posl, dir\n f = self.lookup_fid(\n \"{0}_{1}_{2}_{3}_{4}_{5}_{6}\".format(\n k,\n instance.pos[h],\n hpp,\n hpn,\n instance.pos[m],\n mpp,\n att_dir),\n add)\n ff.append(f)\n k += 1\n # Head pos+posl+posr, modifier pos+posr, dir\n f = self.lookup_fid(\n \"{0}_{1}_{2}_{3}_{4}_{5}_{6}\".format(\n k,\n instance.pos[h],\n hpp,\n hpn,\n instance.pos[m],\n mpn,\n att_dir),\n add)\n ff.append(f)\n k += 1\n # Head pos+posl+posr, modifier pos+posl+posr, dir\n f = self.lookup_fid(\n \"{0}_{1}_{2}_{3}_{4}_{5}_{6}_{7}\".format(\n k, instance.pos[h],\n hpp, hpn, instance.pos[m],\n mpp, mpn, att_dir),\n add)\n ff.append(f)\n k += 1\n\n return ff\n\n def lookup_fid(self, fname, add=False):\n \"\"\"Looks up dictionary for feature ID.\"\"\"\n if fname not in self.feat_dict:\n if add:\n fid = self.n_feats\n self.n_feats += 1\n self.feat_dict[fname] = fid\n return fid\n else:\n return -1\n else:\n return self.feat_dict[fname]\n\n def compute_scores(self, feats, weights):\n \"\"\"Compute scores by taking the dot product between the feature and weight vector.\"\"\"\n nw = np.size(feats, 0) - 1\n scores = np.zeros((nw+1, nw+1))\n for h in range(nw+1):\n for m in range(nw+1):\n if feats[h][m] is None:\n continue\n for f in feats[h][m]:\n if f < 0:\n continue\n scores[h][m] += weights[f]\n return scores\n" ]
[ [ "numpy.size", "numpy.zeros", "numpy.empty" ] ]
Chucooleg/vnla
[ "b9c1367b263f00a38828ff24cefc8becc149be7a" ]
[ "code/tasks/VNLA/history_buffer.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport pickle\nfrom collections import defaultdict\nimport numpy as np\nimport os\nimport torch\n\n\ndef nested_defaultdict():\n '''Use this rather than lambda to allow defaultdict objects to be pickled'''\n return defaultdict(list)\n\n\nclass HistoryBuffer(object):\n '''\n History Buffer for training value estimation agents\n '''\n\n def __init__(self, hparams, device):\n\n self.device = device\n \n # Stored data format\n # (instr_id, global_iter_idx) : {viewpointIndex:[], scan:'', instruction: [], feature: [], action: [], q_values_target: [], bootstrap_mask: [], ... etc}\n self._indexed_data = defaultdict(nested_defaultdict)\n\n # Use this to remove earliest experience when buffer is full\n self._earliest_iter_idx = 0\n\n # Map global_iter_idx to set of instr_id\n self._iter_instr_map = defaultdict(set)\n\n # Set of (instr_id, global_iter_idx) keys for sampling minibatch\n self._instr_iter_keys = set()\n\n # max storage limit -- number of examples\n self._curr_buffer_size = 0\n self.max_buffer_size = hparams.max_buffer_size\n\n # bernoulli mask prob\n self.bootstrap = hparams.bootstrap\n if self.bootstrap:\n self.n_ensemble = hparams.n_ensemble\n self.bernoulli_probability = hparams.bernoulli_probability\n\n def __len__(self):\n return len(self._indexed_data)\n\n def curr_buffer_size(self):\n return self._curr_buffer_size\n\n def __getitem__(self, instr_iter_key):\n '''instr_iter_key should be (instr_id, global_iter_idx)'''\n return self._indexed_data[instr_iter_key]\n\n def save_buffer(self, dir_path):\n '''save history buffer files to a directory)'''\n \n print ('saving history buffer to {}'.format(dir_path))\n \n info_dict = {}\n info_dict['_earliest_iter_idx'] = self._earliest_iter_idx\n info_dict['_iter_instr_map'] = self._iter_instr_map\n info_dict['_instr_iter_keys'] = self._instr_iter_keys\n info_dict['_curr_buffer_size'] = self._curr_buffer_size\n info_dict['max_buffer_size'] = self.max_buffer_size\n \n data_dict = {}\n for key in self._instr_iter_keys:\n data_dict[key] = {\n 'viewpointIndex' : self._indexed_data[key]['viewpointIndex'],\n 'scan' : self._indexed_data[key]['scan'],\n 'instruction' : self._indexed_data[key]['instruction'],\n 'feature' : self._indexed_data[key]['feature'],\n 'action' : self._indexed_data[key]['action'],\n 'view_index_mask_indices' : self._indexed_data[key]['view_index_mask_indices'],\n 'viewix_actions_map' : self._indexed_data[key]['viewix_actions_map'],\n 'q_values_target' : self._indexed_data[key]['q_values_target'],\n 'bootstrap_mask' : self._indexed_data[key]['bootstrap_mask']\n }\n\n info_file_path = os.path.join(dir_path, 'history_buffer_info.pickle')\n data_file_path = os.path.join(dir_path, 'history_buffer_data.pickle') \n with open(info_file_path, 'wb') as handle:\n pickle.dump(info_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(data_file_path, 'wb') as handle:\n pickle.dump(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\t\n \n def load_buffer(self, dir_path):\n '''load history buffer files to object'''\n \n print ('loading history buffer from {}'.format(dir_path))\n \n info_file_path = os.path.join(dir_path, 'history_buffer_info.pickle')\n data_file_path = os.path.join(dir_path, 'history_buffer_data.pickle') \n with open(info_file_path, 'rb') as handle:\n info_dict = pickle.load(handle)\n with open(data_file_path, 'rb') as handle:\n data_dict = pickle.load(handle)\n \n self._earliest_iter_idx = info_dict['_earliest_iter_idx']\n self._iter_instr_map = info_dict['_iter_instr_map']\n self._instr_iter_keys = info_dict['_instr_iter_keys']\n self._curr_buffer_size = info_dict['_curr_buffer_size']\n self.max_buffer_size = info_dict['max_buffer_size']\n \n for key in self._instr_iter_keys: \n self._indexed_data[key]['instruction'] = data_dict[key]['instruction']\n self._indexed_data[key]['feature'] = data_dict[key]['feature']\n self._indexed_data[key]['action'] = data_dict[key]['action']\n self._indexed_data[key]['view_index_mask_indices'] = data_dict[key]['view_index_mask_indices']\n self._indexed_data[key]['viewix_actions_map'] = data_dict[key]['viewix_actions_map']\n self._indexed_data[key]['q_values_target'] = data_dict[key]['q_values_target']\n self._indexed_data[key]['bootstrap_mask'] = data_dict[key]['bootstrap_mask']\n\n def remove_earliest_experience(self, batch_size):\n '''Remove earliest iterations of experiences from buffer, until we have room to add the next batch.'''\n while self._curr_buffer_size + batch_size >= self.max_buffer_size:\n print(\"removing earliest experience\")\n remove_size = 0\n for instr_id in self._iter_instr_map[self._earliest_iter_idx]:\n remove_size += len(self._indexed_data[(instr_id, self._earliest_iter_idx)]['action'])\n del self._indexed_data[(instr_id, self._earliest_iter_idx)]\n self._instr_iter_keys.remove((instr_id, self._earliest_iter_idx))\n self._curr_buffer_size -= remove_size\n self._earliest_iter_idx += 1\n print(\"current buffer size = {}\".format(self._curr_buffer_size))\n print(\"current earliest iter idx = {}\".format(self._earliest_iter_idx))\n\n def is_full(self):\n '''Check if the buffer has stored up to its storage limit'''\n return self._curr_buffer_size >= self.max_buffer_size\n\n def add_experience(self, experience_batch):\n '''\n Add a single batch of experience to the buffer.\n Called per time step during rollout().\n '''\n\n batch_size = len(experience_batch)\n if self._curr_buffer_size + batch_size >= self.max_buffer_size:\n self.remove_earliest_experience(batch_size)\n\n assert not self.is_full()\n\n # Write experience to buffer\n for experience in experience_batch:\n key = (experience['instr_id'], experience['global_iter_idx'])\n\n # Append image feature related indices\n self._indexed_data[key]['viewpointIndex'].append(experience['viewpointIndex'])\n self._indexed_data[key]['scan'] = experience['scan']\n\n # Append list of token IDs\n self._indexed_data[key]['instruction'].append(experience['instruction'])\n\n # Append tensors\n self._indexed_data[key]['feature'].append(experience['feature'])\n self._indexed_data[key]['action'].append(experience['action'])\n \n # Append pano indexing\n # np array\n self._indexed_data[key]['view_index_mask_indices'].append(experience['view_index_mask_indices'])\n # tensor\n self._indexed_data[key]['viewix_actions_map'].append(experience['viewix_actions_map'])\n \n # # np array\n # self._indexed_data[key]['end_target_indices'].append(experience['end_target_indices'])\n\n # Append target\n self._indexed_data[key]['q_values_target'].append(experience['q_values_target'])\n\n # Append bootstrap mask\n bootstrap_mask = torch.bernoulli(torch.ones(self.n_ensemble, dtype=torch.float, device=self.device) * self.bernoulli_probability) if self.bootstrap else None\n self._indexed_data[key]['bootstrap_mask'].append(bootstrap_mask)\n \n # Build global_iter_idx to instr_id map\n self._iter_instr_map[experience['global_iter_idx']].add(experience['instr_id'])\n # Build set of keys for sampling\n self._instr_iter_keys.add(key)\n\n self._curr_buffer_size += batch_size\n\n def sample_minibatch(self, batch_size):\n '''Sample a training batch for training'''\n\n assert self._curr_buffer_size >= batch_size, \\\n '''Buffer doesn't have enough history to sample a batch'''\n\n # Sample (global_idx, instr_id) key pairs\n # shape (number of unique key pairs, 2)\n instr_key_pairs_list = sorted(list(self._instr_iter_keys))\n # shape (batch_size, 2)\n sampling_indices = np.random.randint(len(instr_key_pairs_list), size=batch_size)\n sampled_iter_instr_key_pair = [instr_key_pairs_list[idx] for idx in sampling_indices]\n\n # Debug\n # print ('training sampled training keys length', len(sampling_indices))\n # print ('training sampled instr_key_pairs_list', sorted(sampling_indices))\n\n # Further sample the time step\n traj_lens = [len(self._indexed_data[key]['action']) for key in sampled_iter_instr_key_pair]\n\n # Debug\n if 1 in traj_lens or 0 in traj_lens:\n raise ValueError('Should not sample time step 0 or 1')\n\n # Sample timesteps\n # do not sample from t=0 at <start> state\n sampled_timesteps = [np.random.randint(low=1, high=t_len) for t_len in traj_lens]\n\n # # Vectorized way to choose nearly random timesteps with upperbounds\n # random_ints = np.random.randint(1000000000, size=len(traj_lens))\n # sampled_timesteps = random_ints % traj_lens\n\n assert len(sampled_iter_instr_key_pair) == len(sampled_timesteps) == batch_size\n return sampled_iter_instr_key_pair, sampled_timesteps\n" ]
[ [ "torch.ones", "numpy.random.randint" ] ]
tobifinn/py_bacy
[ "f550876fe0303eb1711866268871f6fd478ef1c1" ]
[ "py_bacy/tasks/pytassim/cosmo.py" ]
[ "#!/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Created on 14.01.21\n#\n# Created for py_bacy\n#\n# @author: Tobias Sebastian Finn, [email protected]\n#\n# Copyright (C) {2021} {Tobias Sebastian Finn}\n\n\n# System modules\nfrom typing import Dict, Any, List, Tuple\nimport os.path\nimport glob\n\n# External modules\nimport prefect\nfrom prefect import task\n\nimport xarray as xr\nimport numpy as np\nfrom distributed import Client\nimport pandas as pd\n\nfrom pytassim.model.terrsysmp.cosmo import preprocess_cosmo, postprocess_cosmo\n\n# Internal modules\nfrom py_bacy.tasks.cosmo import get_cos_bg_fname\nfrom py_bacy.tasks.io import load_ens_data, write_ens_data\nfrom py_bacy.tasks.system import symlink\nfrom py_bacy.tasks.xarray import constrain_var\n\n\nEARTH_RADIUS = 6371000\nDEG_TO_M = 2 * np.pi / 360 * EARTH_RADIUS\n\nANA_FNAME = 'laf%Y%m%d%H%M%S.nc'\n\n\n__all__ = [\n 'link_background',\n 'load_background',\n 'post_process_analysis',\n 'write_analysis',\n 'link_output'\n]\n\n\ndef distance_func(x, y):\n diff_obs_cos_deg = y.values[:, 1:-1] - x[1:-1]\n diff_obs_cos_m = diff_obs_cos_deg * DEG_TO_M\n dist_obs_cos_2d = np.sqrt(np.sum(diff_obs_cos_m**2, axis=-1))\n\n cos_press = press_int(x[-1])\n obs_press = press_int(y.values[:, -1])\n obs_lnp = np.log(obs_press)\n cos_lnp = np.log(cos_press)\n dist_obs_cos_vert = np.abs(cos_lnp - obs_lnp)\n return dist_obs_cos_2d, dist_obs_cos_vert\n\n\ndef press_int(level_height):\n return 1013.25 * np.power(1 - (0.0065 * level_height / 288.15), 5.255)\n\n\n@task\ndef link_background(\n parent_model_output: str,\n input_folder: str,\n config: Dict[str, Any],\n cycle_config: Dict[str, Any],\n analysis_time: pd.Timestamp\n) -> str:\n logger = prefect.context.get('logger')\n bg_fname = get_cos_bg_fname.run(\n fname_template=config['bg_files'], curr_time=analysis_time\n )\n source_path = os.path.join(parent_model_output, bg_fname)\n source_files_found = list(sorted(glob.glob(source_path)))\n logger.debug('Found {0} as background files'.format(source_files_found))\n source_file = source_files_found[0]\n target_file = os.path.join(input_folder, bg_fname)\n target_file = symlink.run(source=source_file, target=target_file)\n return target_file\n\n\n@task\ndef load_background(\n bg_files: List[str],\n analysis_time: pd.Timestamp,\n assim_config: Dict[str, Any],\n cycle_config: Dict[str, Any],\n ens_members: List[int],\n client: Client,\n) -> Tuple[xr.Dataset, xr.DataArray]:\n ds_cosmo = load_ens_data.run(\n file_paths=bg_files, client=client\n )\n ds_cosmo['ensemble'] = ens_members\n background = preprocess_cosmo(ds_cosmo, assim_config['assim_vars'])\n background = background.load()\n return ds_cosmo, background\n\n\n@task\ndef post_process_analysis(\n analysis: xr.DataArray,\n model_dataset: xr.Dataset\n) -> xr.Dataset:\n analysis = analysis.compute()\n analysis = postprocess_cosmo(analysis, model_dataset)\n analysis = constrain_var.run(analysis, 'QV', lower_bound=0)\n analysis = constrain_var.run(analysis, 'QC', lower_bound=0)\n analysis = constrain_var.run(analysis, 'QI', lower_bound=0)\n analysis = constrain_var.run(analysis, 'QR', lower_bound=0)\n analysis = constrain_var.run(analysis, 'QS', lower_bound=0)\n analysis = constrain_var.run(analysis, 'QV_S', lower_bound=0)\n analysis = constrain_var.run(\n analysis, 'T', lower_bound=173.15, upper_bound=350\n )\n analysis = constrain_var.run(\n analysis, 'T_S', lower_bound=240, upper_bound=350\n )\n return analysis\n\n\n@task\ndef write_analysis(\n analysis: xr.Dataset,\n background_files: List[str],\n output_dirs: List[str],\n analysis_time: pd.Timestamp,\n assim_config: Dict[str, Any],\n cycle_config: Dict[str, Any],\n client: Client,\n) -> Tuple[List[str], xr.Dataset]:\n analysis_fname = analysis_time.strftime(ANA_FNAME)\n analysis_files = [\n os.path.join(output_dir, analysis_fname) for output_dir in output_dirs\n ]\n _ = write_ens_data.run(\n dataset_to_write=analysis,\n source_paths=background_files,\n target_paths=analysis_files,\n assim_vars=assim_config['assim_vars'],\n client=client\n )\n return analysis_files, analysis\n\n\n@task\ndef link_output(\n background_file: str,\n output_dir: str,\n analysis_time: pd.Timestamp\n) -> str:\n if not os.path.isfile(background_file):\n raise OSError('{0:s} does not exist!'.format(background_file))\n\n output_fname = analysis_time.strftime(ANA_FNAME)\n output_file = os.path.join(output_dir, output_fname)\n output_file = symlink.run(\n source=background_file, target=output_file\n )\n return output_file\n" ]
[ [ "numpy.log", "numpy.sum", "numpy.abs", "numpy.power" ] ]