repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
Jintao-Huang/yolov5_PyTorch
|
[
"8339b45ad420ee632e435e9f34c9bbb80a29691e"
] |
[
"utils/detection/trainer.py"
] |
[
"# Author: Jintao Huang\n# Time: 2020-6-6\n\nfrom torch.utils.data import DataLoader\nimport torch\n\n\nclass RuntimeErrorHandler:\n def __init__(self, ignore_num):\n self.ignore_num_ori = self.ignore_num = ignore_num\n\n def error(self, e):\n if self.ignore_num > 0:\n print(e, flush=True)\n self.ignore_num -= 1\n else:\n raise e\n\n def init(self):\n self.ignore_num = self.ignore_num_ori\n\n\nclass Trainer:\n def __init__(self, model, optim, train_dataset, batch_size, device,\n lr_scheduler=None, logger=None, checker=None, runtime_error_handler=None):\n self.model = model.to(device)\n self.optim = optim\n self.train_loader = DataLoader(train_dataset, batch_size, True, collate_fn=collate_fn, pin_memory=True)\n self.device = device\n self.lr_scheduler = lr_scheduler\n self.logger = logger\n assert checker\n self.checker = checker\n self.runtime_error_handler = runtime_error_handler or RuntimeErrorHandler(ignore_num=2)\n\n def train(self, epoch_range):\n for epoch in range(*epoch_range):\n self.model.train()\n if self.lr_scheduler:\n self.lr_scheduler.step(epoch)\n lr = self.optim.param_groups[0]['lr']\n self.logger.new_epoch(epoch, len(self.train_loader), lr)\n for i, (x, y) in enumerate(self.train_loader):\n try:\n x, y = to(x, y, self.device)\n loss = sum(self.model(x, y).values())\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n if self.logger:\n self.logger.step(loss.item(), i + 1)\n self.runtime_error_handler.init()\n except RuntimeError as e:\n x, y, loss = None, None, None\n torch.cuda.empty_cache()\n try:\n self.runtime_error_handler.error(e)\n except RuntimeError as e:\n self.checker.saver.save(\"tmp_epoch%d_step%d\" % (epoch, i + 1))\n raise e\n\n if self.checker:\n self.checker.step(epoch, last=(epoch == epoch_range[1] - 1))\n"
] |
[
[
"torch.utils.data.DataLoader",
"torch.cuda.empty_cache"
]
] |
dnddnjs/feudal-montezuma
|
[
"c888198f173423575bbc1fec8ef7c86bb36bcb37"
] |
[
"dlstm_a2c/env.py"
] |
[
"import gym\nimport torch\nimport numpy as np\nfrom copy import deepcopy\nfrom utils import pre_process\nfrom torch.multiprocessing import Process\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass EnvWorker(Process):\n def __init__(self, env_name, render, child_conn):\n super(EnvWorker, self).__init__()\n self.env = gym.make(env_name)\n self.render = render\n self.child_conn = child_conn\n self.init_state()\n\n def init_state(self):\n state = self.env.reset()\n \n state, _, _, _ = self.env.step(1)\n state = pre_process(state)\n self.history = np.moveaxis(state, -1, 0)\n\n def run(self):\n super(EnvWorker, self).run()\n\n episode = 0\n steps = 0\n score = 0\n life = 5\n dead = False\n\n while True:\n if self.render:\n self.env.render()\n\n action = self.child_conn.recv()\n next_state, reward, done, info = self.env.step(action + 1)\n \n if life > info['ale.lives']:\n dead = True\n life = info['ale.lives']\n\n next_state = pre_process(next_state)\n self.history = np.moveaxis(next_state, -1, 0)\n\n steps += 1\n score += reward\n\n self.child_conn.send([deepcopy(self.history), reward, dead, done])\n\n if done and dead:\n # print('{} episode | score: {:2f} | steps: {}'.format(\n # episode, score, steps\n # ))\n episode += 1\n steps = 0\n score = 0\n dead = False\n life = 5\n self.init_state()\n\n if dead:\n dead = False\n self.init_state()\n\n"
] |
[
[
"torch.cuda.is_available",
"numpy.moveaxis"
]
] |
samsniderheld/SpriteGAN
|
[
"6a6f9f000d155dde6432e04da154e1063c65de74"
] |
[
"Training/loss_functions.py"
] |
[
"import tensorflow as tf\n\ndef sagan_discriminator_loss(real, fake):\n\n real_loss = tf.reduce_mean(tf.nn.relu(1.0 - real))\n fake_loss = tf.reduce_mean(tf.nn.relu(1.0 + fake))\n\n loss = real_loss + fake_loss\n\n return loss\n\ndef sagan_generator_loss(fake):\n\n fake_loss = -tf.reduce_mean(fake)\n\n loss = fake_loss\n\n return loss"
] |
[
[
"tensorflow.nn.relu",
"tensorflow.reduce_mean"
]
] |
NiftyPET/ninst
|
[
"ecf0ef6016e5a59a49fdfc5bf2a7b09408acd9d8"
] |
[
"niftypet/ninst/raw/resources.py"
] |
[
"\"\"\"Resources file for NiftyPET NIPET and NIMPA etc.\"\"\"\n__author__ = (\"Pawel J. Markiewicz\", \"Casper O. da Costa-Luis\")\n__copyright__ = \"Copyright 2018-20\"\n\nfrom math import ceil, pi\n\ntry:\n from numpy import array\nexcept ImportError:\n\n def array(x):\n return x\n\n\n# > logging represented by an integer: 10, 20, 30... for DEBUG, INFO, WARNING...\n# > as it is in Python package logging, which is also used here.\nLOG = 20\n\n# Hardware (CT-based) mu-maps, which come with the mMR scanner.\n# The names may be different\nhrdwr_mu = [\n \"umap_HNMCL_10606489.v.hdr\", # (1) Head and neck lower coil\n \"umap_HNMCU_10606489.v.hdr\", # (2) Head and neck upper coil\n \"umap_SPMC_10606491.v.hdr\", # (3) Spine coil\n \"umap_PT_2291734.v.hdr\", # (4) Table\n]\n\n# Radioisotope look-up table\nriLUT = {\n \"Ge68\": {\"BF\": 0.891, \"thalf\": 270.9516 * 24 * 60 * 60},\n \"Ga68\": {\"BF\": 0.891, \"thalf\": 67.71 * 60},\n \"F18\": {\"BF\": 0.967, \"thalf\": 109.77120 * 60},\n \"C11\": {\"BF\": 0.998, \"thalf\": 20.38 * 60},\n \"O15\": {\"BF\": 0.999, \"thalf\": 122.2416},\n}\n\n# -----------------------------------------------------\n# The name and path to the NiftyPET tools (software)\nDIRTOOLS = \"NiftyPET_tools\"\nMSVC_VRSN = \"Visual Studio 12 2013 Win64\"\nCMAKE_TLS_PAR = \"\" # -DUSE_SSE=OFF'\n# PATHTOOLS = os.path.join('/chosen/path/', DIRTOOLS)\n# > path to Python wrapper of Vinci\nVINCIPATH = \"\"\n\n# > path to reference images for testing NiftyPET\nREFPATH = \"\"\n# -----------------------------------------------------\n\n# -----------------------------------------------------\n# DO NOT MODIFY BELOW--DONE AUTOMATICALLY\n# # # start GPU properties # # #\n# # # end GPU properties # # #\n\n# paths to apps and tools needed by NiftyPET\n# # # start NiftyPET tools # # #\n# # # end NiftyPET tools # # #\n# -----------------------------------------------------\n\n# enable xnat module\nENBLXNAT = False\n# enable Agg\nENBLAGG = False\n# compile DCM2NIIX, otherwise download a compiled version for the system used\nCMPL_DCM2NIIX = False\n\n# ============ SIEMENS mMR SCANNER C O N S T A N T S ===============\n\n# > LM header offset in bytes (for mMR it is in a separate DICOM format)\nLMOFF = 0\n\n# > bytes per event in list mode data\nBPE = 4\n\n# number of rings (axially) and crystals (transaxially)\nNRNG = 64\n\n# number of crystals transaxially\nNCRS = 504\n\n# > idealised crystal surface (used for scatter modelling)\nSRFCRS = 0.1695112\n\n# reduced number of crystals by the gaps (dead crystals)\nNCRSR = 448\n\n# maximum ring difference\nMRD = 60\n\n# number of linear indexes for 1/2 Michelogram (NRNG**2/2 + NRNG/2)\nNLI2R = 2080 - 6\n\n# number of angular indexes in a 2D sinogram\nA = 252\n# number of bin indexes in a 2D sino\nW = 344\nH = W / 2\n\nNSN11 = 837\nNSN1 = 4084\nNSN64 = NRNG * NRNG\n# 0: SSRB, 1: span-1, or 11: span-11 (11, default)\nSPAN = 11\n\nRNG_STRT = 0\nRNG_END = 64\n\n\n# ------------------------------------------------------\n# > scatter axial ring definition\nsct_irng = [0, 10, 19, 28, 35, 44, 53, 63]\n# > resulting number of rings used for scatter modelling\nNSRNG = len(sct_irng)\n# ------------------------------------------------------\n\n\n# no of sinos in a segment out of 11 segments\nseg = array([127, 115, 115, 93, 93, 71, 71, 49, 49, 27, 27])\n\n# minimum and maximum ring difference for each segment\nminrd = array([-5, -16, 6, -27, 17, -38, 28, -49, 39, -60, 50])\nmaxrd = array([5, -6, 16, -17, 27, -28, 38, -39, 49, -50, 60])\n# ----------\n\n\n# ------------------------------------------------------\n# > transaxial projection parameters (should be in\n# > with the parameters as defined in def.h for C files)\n\n# > parameters for each transaxial LOR\nNTT = 10\n\n# > all voxels intersected by a given LOR\nNTV = 1807\n# ------------------------------------------------------\n\n# number of direct sinograms (i.e., for segment 0)\nSEG0 = 127\n\n# Reference image size (usually the default from Siemens)\n# and GPU dimensions for optimal execution\n# ~~~\nSO_IMZ = 127\nSO_IMY = 344\nSO_IMX = 344\nSO_VXX = 0.208626\nSO_VXZ = 0.203125\nSO_VXY = SO_VXX\n\nSZ_IMZ = 127\nSZ_IMY = 320\nSZ_IMX = 320\nSZ_VOXY = 0.208626\nSZ_VOXZ = 0.203125\n# ~~~\n# SO_IMZ = 127\n# SO_IMY = 384\n# SO_IMX = 384\n# SO_VXX = 0.1669\n# SO_VXZ = 0.203125\n# SO_VXY = SO_VXX\n\n# SZ_IMZ = 127\n# SZ_IMY = 384\n# SZ_IMX = 384\n# SZ_VOXY = 0.1669\n# SZ_VOXZ = 0.203125\n# ~~~\nSIGMA_RM = 0\n\n# > radius PSF kernel size used in CUDA convolution\nRSZ_PSF_KRNL = 8\n\n# ~~~\n# inverse size\nSZ_VOXZi = round(1 / SZ_VOXZ, 6)\n# squared radius of the transaxial field of view\nTFOV2 = 890.0\n\n# -------Scatter image size in x,y,z directions\n# target scale factors for scatter mu-map and emission image respectively\n# transmission (mu-map)\nTRGTSCT = [0.5, 0.33]\nSS_IMX = int(ceil(TRGTSCT[0] * SO_IMX) // 2 * 2)\nSS_IMY = int(ceil(TRGTSCT[0] * SO_IMY) // 2 * 2)\nSS_IMZ = int(ceil(TRGTSCT[0] * SO_IMZ) // 2 * 2 - 1)\nSS_VXY = round((SO_VXY * SO_IMX) / SS_IMX, 6)\nSS_VXZ = round((SO_VXZ * SO_IMZ) / SS_IMZ, 6)\nIS_VXZ = round(1 / SS_VXZ, 6)\n# scaling [z,y,x]\nSCTSCLMU = [float(SS_IMZ) / SO_IMZ, float(SS_IMY) / SO_IMY, float(SS_IMX) / SO_IMX]\n# emission\nSSE_IMX = int(ceil(TRGTSCT[1] * SO_IMX) // 2 * 2)\nSSE_IMY = int(ceil(TRGTSCT[1] * SO_IMY) // 2 * 2)\nSSE_IMZ = int(ceil(TRGTSCT[1] * SO_IMZ) // 2 * 2 + 1)\nSSE_VXY = round((SO_VXY * SO_IMX) / SSE_IMX, 6)\nSSE_VXZ = round((SO_VXZ * SO_IMZ) / SSE_IMZ, 6)\n# scaling [z,y,x]\nSCTSCLEM = [float(SSE_IMZ) / SO_IMZ, float(SSE_IMY) / SO_IMY, float(SSE_IMX) / SO_IMX]\n\n\n# # scaling for the emission image [z,y,x]\n# SCTSCLEM = [0.34, 0.33, 0.33]\n# # scaling for the mu-map\n# SCTSCLMU = [0.499, 0.5, 0.5]\n\n# SS_IMX = int(ceil(SCTSCLMU[2]*SO_IMX)//2*2)#172\n# SS_IMY = int(ceil(SCTSCLMU[1]*SO_IMY)//2*2)#172\n# SS_IMZ = int(ceil(SCTSCLMU[0]*SO_IMZ)//2*2-1)#63\n# SS_VXY = round((SO_VXY*SO_IMX)/SS_IMX,6) # 0.417252 #\n# SS_VXZ = round((SO_VXZ*SO_IMZ)/SS_IMZ,6) # 0.409474 #\n# IS_VXZ = round(1/SS_VXZ,6)\n\n# SSE_IMX = int(ceil(SCTSCLEM[2]*SO_IMX)//2*2) #114\n# SSE_IMY = int(ceil(SCTSCLEM[1]*SO_IMY)//2*2) #114\n# SSE_IMZ = int(ceil(SCTSCLEM[0]*SO_IMZ)//2*2-1) #43\n\n# SSE_VXY = round((SO_VXY*SO_IMX)/SSE_IMX,6) #0.629538\n# SSE_VXZ = round((SO_VXZ*SO_IMZ)/SSE_IMZ,6) #0.599927\n# -------\n\n# > decay correction\nDCYCRR = True\n\n# --- Time of Flight ---\n# speed of light\nCLGHT = 29979245800 # cm/s\n# coincidence time window [ps]\nCWND = 5859.38e-12\n\n# number of TOF bins\nTOFBINN = 1\n# size of TOF bin in [ps]\nTOFBINS = 390e-12\n# size of TOF BIN in cm of travelled distance\nTOFBIND = TOFBINS * CLGHT\n# inverse of the above\nITOFBIND = 1 / TOFBIND\n\n\n# ring radius\nR = 32.8\n# effective ring radius accounting for the depth of interaction\nRE = R + 0.67 # 0.67\n\nRE_2 = float(\"{0:.6f}\".format(RE ** 2))\n\n# > inverse of the radius\nIRE = float(\"{0:.6f}\".format(RE ** -1))\n\n# axial crystal width\nAXR = 0.40625\n\n# crystal angle\nALPHA = 0.714286 * pi / 180 # 2*pi/NCRS\n\n# crystal gap period\nTGAP = 9\n\n# crystal gap offset (used for getting the sino gaps right at the position)\nOFFGAP = 1\n\n# --- FOR SCATTER ---\n# electron radius **2\nR02 = 7.940787449825884e-26\n# detection lower energy threashold\nLLD = 430000\nE511 = 511008\n# energy resolution\nER = 0 # 0.154\n# discretisation of the scatter angle spectrum\nNCOS = 256\n# cosine of maximum allowed scatter angle\nCOSUPSMX = 0.725 # 0.58 #0.722 #Elow = E511/(2-cos(upsmx))\n# step of the discreatisation\nCOSSTP = (1 - COSUPSMX) / (NCOS - 1)\n# inverse of the step\nICOSSTP = 1 / COSSTP\n\n# intensity percentage threshold of voxels to be considered in the image\nETHRLD = 0.05\n\n\n# =================================================================================================\ndef get_gpu_constants(Cnt=None):\n \"\"\"Return a dictionary of GPU related constants\"\"\"\n if Cnt is None:\n Cnt = {}\n\n for k in [\n \"DEV_ID\", # device id; used for choosing the GPU device for calculations\n \"CC_ARCH\", # chosen device architectures for NVCC compilation\n ]:\n val = globals().get(k)\n if val is not None:\n Cnt[k.replace(\"_\", \"\")] = val\n\n return Cnt\n\n\n# =================================================================================================\ndef get_setup(Cnt=None):\n \"\"\"Return a dictionary of GPU, mu-map hardware and third party set-up.\"\"\"\n if Cnt is None:\n Cnt = {}\n\n # the name of the folder for NiftyPET tools\n Cnt[\"DIRTOOLS\"] = DIRTOOLS\n\n # additional paramteres for compiling tools with cmake\n Cnt[\"CMAKE_TLS_PAR\"] = CMAKE_TLS_PAR\n\n # hardware mu-maps\n Cnt[\"HMULIST\"] = hrdwr_mu\n\n # Microsoft Visual Studio Compiler version\n Cnt[\"MSVC_VRSN\"] = MSVC_VRSN\n\n # GPU related setup\n Cnt = get_gpu_constants(Cnt)\n\n for k in [\n \"PATHTOOLS\",\n \"RESPATH\", # image processing setup\n \"REGPATH\",\n \"DCM2NIIX\",\n \"HMUDIR\", # hardware mu-maps\n \"VINCIPATH\",\n \"REFPATH\", # > testing\n ]:\n val = globals().get(k)\n if val is not None:\n Cnt[k] = val\n\n Cnt[\"ENBLXNAT\"] = ENBLXNAT\n Cnt[\"ENBLAGG\"] = ENBLAGG\n Cnt[\"CMPL_DCM2NIIX\"] = CMPL_DCM2NIIX\n\n return Cnt\n\n\n# =================================================================================================\ndef get_mmr_constants():\n \"\"\"\n Put all the constants together in a dictionary\n \"\"\"\n\n Cnt = {\n \"LOG\": LOG,\n \"VERBOSE\": False,\n \"BPE\": BPE,\n \"LMOFF\": LMOFF,\n \"ISOTOPE\": \"F18\",\n \"DCYCRR\": DCYCRR,\n \"ALPHA\": ALPHA,\n \"NRNG\": NRNG,\n \"NCRS\": NCRS,\n \"NCRSR\": NCRSR,\n \"SRFCRS\": SRFCRS,\n \"NBCKT\": 224,\n \"NSANGLES\": A,\n \"NSBINS\": W,\n \"Naw\": -1, # number of total active bins per 2D sino\n \"NSN11\": NSN11, # number of sinos in span-11\n \"NSN1\": NSN1, # number of sinos in span-1\n \"NSN64\": NSN64, # number of sinos in span-1 with no MRD limit\n \"MRD\": MRD, # maximum ring difference RD\n \"SPN\": SPAN, # span-1 (1), span-11 (11), ssrb (0)\n \"TFOV2\": TFOV2, # squared radius of TFOV\n \"RNG_STRT\": RNG_STRT, # limit axial extension by defining start and end ring\n \"RNG_END\": RNG_END, # only works with span-1 (Cnt['SPN']==1)\n \"SS_IMZ\": SS_IMZ, # Scatter mu-map image size\n \"SS_IMY\": SS_IMY,\n \"SS_IMX\": SS_IMX,\n \"SS_VXZ\": SS_VXZ,\n \"SS_VXY\": SS_VXY,\n \"IS_VXZ\": IS_VXZ,\n \"SSE_IMZ\": SSE_IMZ, # Scatter emission image size\n \"SSE_IMY\": SSE_IMY,\n \"SSE_IMX\": SSE_IMX,\n \"SSE_VXZ\": SSE_VXZ,\n \"SSE_VXY\": SSE_VXY,\n \"SZ_IMZ\": SZ_IMZ, # GPU optimised image size\n \"SZ_IMY\": SZ_IMY,\n \"SZ_IMX\": SZ_IMX,\n \"SZ_VOXZ\": SZ_VOXZ,\n \"SZ_VOXY\": SZ_VOXY,\n \"SZ_VOXZi\": SZ_VOXZi,\n \"SO_IMZ\": SO_IMZ, # Original image size (from Siemens)\n \"SO_IMY\": SO_IMY,\n \"SO_IMX\": SO_IMX,\n \"SO_VXZ\": SO_VXZ,\n \"SO_VXY\": SO_VXY,\n \"SO_VXX\": SO_VXX,\n \"SIGMA_RM\": SIGMA_RM, # resolution modelling sigma\n \"RSZ_PSF_KRNL\": RSZ_PSF_KRNL, # radius PSF kernel size used in CUDA convolution\n \"NTT\": NTT,\n \"NTV\": NTV,\n \"NSEG0\": SEG0,\n \"R_RING\": RE, # effective ring radius\n \"R_2\": RE_2,\n \"IR_RING\": IRE,\n \"R\": R,\n \"SEG\": seg,\n \"MNRD\": minrd,\n \"MXRD\": maxrd,\n # > scatter moved to scatter LUTs script in sct folder\n # 'NSRNG':NSRNG, # number of scatter rings for modelling\n # 'SCTRNG':sct_irng, # scatter ring indexes\n \"TGAP\": TGAP,\n \"OFFGAP\": OFFGAP,\n \"AXR\": AXR,\n \"R02\": R02, # squared electron radius\n \"LLD\": LLD, # lower energy threashold\n \"E511\": E511,\n \"ER\": ER, # energy resolution\n # > scatter:\n \"SIRNG\": sct_irng, # scatter ring indices\n \"NSRNG\": NSRNG, # number of rings for scatter modelling\n \"COSUPSMX\": COSUPSMX, # cosine of max allowed scatter angle\n \"NCOS\": NCOS, # number of cos samples for LUT\n \"COSSTP\": COSSTP, # cosine step\n \"ICOSSTP\": ICOSSTP, # inverse of cosine step\n \"ETHRLD\": ETHRLD, # intensity emission image threshold (for scatter modelling)\n \"CLGHT\": CLGHT, # speed of light [cm/s]\n \"CWND\": CWND, # coincidence time window [ps]\n \"TOFBINN\": TOFBINN, # number of TOF bins\n \"TOFBINS\": TOFBINS, # TOF bin width [ps]\n \"TOFBIND\": TOFBIND,\n \"ITOFBIND\": ITOFBIND,\n # affine and image size for the reconstructed image,\n # assuming the centre of voxels in mm\n \"AFFINE\": array(\n [\n [-10 * SO_VXX, 0.0, 0.0, 5.0 * SO_IMX * SO_VXX], # +5.*SO_VXX\n [0.0, 10 * SO_VXY, 0.0, -5.0 * SO_IMY * SO_VXY], # +5.*SO_VXY\n [0.0, 0.0, 10 * SO_VXZ, -5.0 * SO_IMZ * SO_VXZ], # -5.*SO_VXZ\n [0.0, 0.0, 0.0, 1.0],\n ]\n ),\n \"IMSIZE\": array([SO_IMZ, SO_IMY, SO_IMX]),\n \"BTP\": 0, # 1:non parametric bootstrap, 2: parametric bootstrap (recommended)\n \"BTPRT\": 1.0, # Ratio of bootstrapped/original events (enables downsampling)\n \"SCTSCLEM\": SCTSCLEM,\n \"SCTSCLMU\": SCTSCLMU,\n }\n\n # get the setup for GPU and third party apps\n Cnt = get_setup(Cnt=Cnt)\n\n return Cnt\n"
] |
[
[
"numpy.array"
]
] |
k-cybulski/sigman-project
|
[
"1f51e04dddb375eb58182664296b7b3f1db71756",
"1f51e04dddb375eb58182664296b7b3f1db71756"
] |
[
"procedures/points_aritmetic.py",
"sigman/file_manager.py"
] |
[
"import numpy as np\n\nfrom sigman.analyzer import InvalidArgumentError\n\nprocedure_type = 'points'\ndescription = (\n\"\"\"Procedure perform basic math operation on given pare of the points (+-*/), as example:\n y[t] = a[t]+b[t]\n Time is taken from a.\n\"\"\")\nauthor = 'mzylinski'\narguments = {\n 'Operation':(\"+ - addition;\"\n \"- - subtraction;\" \n \"* - multiplication;\"\n \"/ - division;\"\n \"sqr - roots a\")\n }\ndefault_arguments = {\n 'Operation':'+',\n }\noutput_type = 'Points'\nrequired_waves = ['']\nrequired_points = ['a', 'b']\n\n\ndef procedure(waves, points, begin_time, end_time, settings):\n a = points['a']\n b = points['b']\n \n\n r_x = []\n r_y = []\n \n if (len(a)>= len(b)):\n d = len (b)-1\n else:\n d = len (a)-1\n\n for i in range(0,d):\n if (settings['operacja']== '+'):\n y = a.data_y[i]+b.data_y[i]\n if (settings['operacja']== '-'):\n y = a.data_y[i]-b.data_y[i]\n if (settings['operacja']== '*'):\n y = a.data_y[i]*b.data_y[i]\n if (settings['operacja']== 'sqr'):\n y = np.sqrt(a.data_y[i])\n if (settings['operacja']== '/'):\n if (b.data_y[i] != 0):\n y = a.data_y[i]/b.data_y[i]\n else:\n y = 0\n r_x.append(a.data_x[i])\n r_y.append(y)\n\n\n return r_x, r_y\n\n\n\ndef interpret_arguments(waves, points, arguments):\n output_arguments = {}\n for key, item in arguments.items():\n if (item != '+' and item != '-' and item != '*'and item != 'sqr' and item != '/'):\n raise InvalidArgumentError(\"{} is invalid.\".format(arguments[key]))\n else:\n output_arguments[key] = item\n return output_arguments\n\ndef execute(waves, points, begin_time, end_time, arguments):\n arguments = interpret_arguments(waves, points, arguments)\n return procedure(waves, points, begin_time, end_time, arguments)",
"\"\"\"\nThis file contains functions allowing the import and export of data.\n\"\"\"\n\nimport csv\nimport os.path\nimport pickle\n\nimport numpy as np\nfrom QtSigman import DefaultColors\nimport sigman as sm \n\ndef save_composite_data(file_name, composite_data):\n \"\"\"Saves the given `Composite_data` in a pickle file.\"\"\"\n with open(file_name, 'wb') as pickle_file:\n pickle.dump(composite_data, pickle_file)\n\ndef load_composite_data(file_name):\n \"\"\"Loads `Composite_data` from a given pickle file.\"\"\"\n with open(file_name, 'rb') as pickle_file:\n return pickle.load(pickle_file)\n\ndef _import_dat(file_name):\n \"\"\"Imports two tables of coordinates from a .dat file.\"\"\"\n x = []\n y = []\n with open(file_name) as csv_file:\n reader = csv.reader(csv_file, delimiter=' ')\n for row in reader: \n x.append(float(row[0]))\n # Some .dat files have two spaces instead of one, in which case\n # row[1] is empty\n if row[1]==\"\": \n y.append(float(row[2]))\n else:\n y.append(float(row[1]))\n return x, y\n\n\ndef _import_wave_dat(file_name, wave_type, offset=0):\n \"\"\"Imports a waveform of constant frequency from a .dat file and\n returns a corresponding `Wave`.\"\"\"\n x, y = _import_dat(file_name)\n sample_rate = len(x)/(x[-1]-x[0])\n return sm.Wave(y, sample_rate, \n wave_type=wave_type, \n offset=offset)\n \ndef _import_point_dat(file_name, point_type):\n \"\"\"Imports coordinates from a .dat file and returns a corresponding\n `Points` instance.\"\"\"\n x, y = _import_dat(file_name) \n return sm.Points(x, y, \n point_type = point_type)\n\ndef import_wave(file_name, wave_type, offset=0):\n \"\"\"Imports a `Wave` instance from a given file.\"\"\"\n extension = os.path.splitext(file_name)[1][1:]\n if extension == 'dat':\n import_func = _import_wave_dat\n else:\n raise ValueError(\"Invalid file format\")\n return import_func(\n file_name, \n wave_type=wave_type, \n offset=offset)\n\ndef import_points(file_name, point_type):\n \"\"\"Imports a `Points` instance from a given file.\"\"\"\n extension = os.path.splitext(file_name)[1][1:]\n if extension == 'dat':\n import_func = _import_point_dat\n else:\n raise ValueError(\"Invalid file format\")\n return import_func(\n file_name,\n point_type = point_type)\n\ndef _export_dat(file_name, data_x, data_y):\n \"\"\"Writes two coordinate tables in a .dat file\"\"\"\n with open(file_name, 'w', newline='') as csv_file:#fix extra line in file create on windows\n writer = csv.writer(csv_file, delimiter=' ')\n for x, y in zip(data_x, data_y):\n writer.writerow([x,y])\n\ndef _export_wave_dat(file_name, wave):\n \"\"\"Exports `Wave` to a .dat file.\"\"\"\n data_x, data_y = wave.generate_coordinate_tables()\n _export_dat(file_name, data_x, data_y)\n\ndef _export_point_dat(file_name, points):\n \"\"\"Exports `Points` to a .dat file.\"\"\"\n _export_dat(file_name, points.data_x, points.data_y)\n\ndef export_wave(file_name, wave):\n \"\"\"Exports `Wave` into a file with the format depending on\n the extension.\n \"\"\"\n extension = os.path.splitext(file_name)[1][1:]\n if extension == 'dat':\n export_func = _export_wave_dat\n else:\n raise ValueError(\"Invalid file format\")\n export_func(file_name, wave)\n\ndef export_points(file_name, points):\n \"\"\"Exports `Points` into a file with the format depending on\n the extension.\n \"\"\"\n extension = os.path.splitext(file_name)[1][1:]\n if extension == 'dat':\n export_func = _export_point_dat\n else:\n raise ValueError(\"Invalid file format\")\n export_func(file_name, points)\n\ndef export(file_name, object):\n \"\"\"Exports `Points` or `Wave` into a file with the format depending\n on the extension.\n \"\"\"\n if isinstance(object, sm.Points):\n export_points(file_name, object)\n elif isinstance(object, sm.Wave):\n export_wave(file_name, object)\n\ndef _estimate_points_offset(reference_points, align_points,\n cross_correlation=False):\n \"\"\"Estimates the offset between two sets of points that describe\n the same data. Returns the time in seconds that the align_points\n need to be moved by.\n \n align_points must be longer than reference_points\n \"\"\"\n align_data = align_points.data_y\n reference_data = reference_points.data_y\n if not cross_correlation:\n differences = []\n difference = 0\n if (len(reference_data)>len(align_data)):\n for i in range(0,len(reference_data)-len(align_data)):\n difference = 0\n for j in range (0,len(align_data)):\n difference = difference + abs((reference_data[i+j]-align_data[j])) \n differences.append(difference)\n\n offset = (np.argmin(differences))\n else:\n for i in range(0,len(align_data)-len(reference_data)):\n difference = 0\n for j in range (0,len(align_data)):\n if (i+j>= len(reference_data)):\n break\n difference = difference + abs((reference_data[i+j]-align_data[j])) \n differences.append(difference)\n\n offset = (np.argmin(differences))\n else:\n if len(align_data) < len(reference_data):\n raise ValueError(\"Points to align must have more data than refrence.\")\n index_offset = np.argmin(np.correlate(align_points.data_y,\n reference_points.data_y))\n offset = align_points.data_x[index_offset] - reference_points.data_x[0]\n # We need to account for the fact align_points and reference_points\n # are off by 2x the time of the first value in align_points\n offset -= align_points.data_x[0]*2\n time_offset = align_points.data_x[0] - reference_points.data_x[offset]\n return time_offset\n\ndef _hr_from_r(time):\n HR = [0] * (len(time) - 1)\n for i in range(len(time) - 1):\n HR[i] = round(60 / (time[i+1] - time[i]))\n return HR\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\ndef import_modelflow_data(file_name, reference_points, reference_points_type):\n \"\"\"Imports and aligns Finapres Modeflow data to already existing \n points.\n \n Args:\n base_data - sm.Points to align to\n base_data_type - can be 'sbp', 'dbp' or 'r'\n \"\"\"\n if reference_points_type not in ['sbp', 'dbp', 'r']:\n raise ValueError(\"Invlaid reference data type\")\n x = []\n y = None\n names = None\n # Data retrieval\n with open(file_name) as f:\n if '.A00' in file_name:\n data_section = False\n for line in f:\n if not data_section and \"END preamble\" in line:\n data_section = True \n continue\n if data_section:\n data = line.split()\n if len(data) > 2:\n if names is None:\n # trim \" characters\n names = [name[1:-1] for name in data[1:]]\n continue\n try:\n x.append(float(data[0]))\n if y is None:\n y = [[float(str_)] for str_ in data[1:]]\n else:\n for i in range(1, len(data)):\n y[i-1].append(float(data[i]))\n except ValueError: # If values are strings\n continue\n else:\n i = 0\n for line in f:\n i = i + 1\n if i == 8:\n pom = line.split(';')\n if '\\n' in pom:\n del pom[pom.index('\\n')]\n pom[0] = 'HR'\n names = pom\n if i > 8:\n pom = line.split(';') \n if '\\n' in pom:\n del pom[pom.index('\\n')]\n if len(pom) > 2:\n if is_number(pom[0]):\n x.append(float(pom[0])) \n if y is None:\n y = [[0 for k in range(1)] for j in range(len(pom))]\n for k in range(1, len(pom)):\n if (is_number(pom[k])):\n y[k][0]=(float(pom[k])) \n else:\n y[k][0] = 0\n else:\n for k in range(1, len(pom)):\n if (is_number(pom[k])):\n y[k].append(float(pom[k])) \n else:\n y[k].append (0)\n y[0] = _hr_from_r(x) \n # Alignment and object initialization\n # modelflow_data[0] -> fiSYS -> SBP\n # modelflow_data[1] -> fiDIA -> DBP\n # modelflow_data[6] -> HR -> can be calculated from R\n points_list = []\n hr_points = None\n offset = 0\n for y_vals, name in zip(y, names):\n points = sm.Points(x, y_vals, name)\n points_list.append(points)\n if offset == 0:\n if reference_points_type == 'sbp' and name =='fiSYS':\n offset = _estimate_points_offset(points, reference_points)\n elif reference_points_type == 'dbp' and name == 'fiDIA':\n offset = _estimate_points_offset(points, reference_points)\n elif reference_points_type == 'r' and name == 'HR':\n hr_from_r = _hr_from_r(reference_points.data_x)\n hr_points = sm.Points(reference_points.data_x, hr_from_r,\n 'HRfromR')\n offset = _estimate_points_offset(points, hr_points)\n \n\n for points in points_list:\n points.move_in_time(offset)\n \n if hr_points is not None:\n points_list.append(hr_points)\n names.append('HRfromR')\n\n\n return points_list, names\n\n\ndef import_signal_from_signal_express_file (file_name):\n \"\"\"Import wave from signal express export Ascci file. First parse the head of the file next read all wave data.\n \"\"\"\n x = []\n y = []\n names = []\n dt = 0\n with open(file_name,encoding=\"CP1250\") as f:\n i = 1\n for line in f:\n if (i == 1):\n if 'channel names:' not in line:\n break;\n if (i == 2):\n pom = line.split ('\t')\n for name in pom:\n names.append (name[(name.rfind('-')+2):].replace('\\n',''))\n if (i == 6):\n dt = float(line.replace (',','.'))\n if (i > 7):\n signals_value = line.split ('\t')\n nr = 0\n if len(y) == 0:\n y = [[0 for k in range(1)] for j in range(len(pom))]\n for value in signals_value:\n if (i == 8):\n y[nr][0]=(float(value.replace (',','.')))\n else:\n y[nr].append(float(value.replace (',','.')))\n nr = nr + 1\n i= i + 1\n setOfWaves = []\n for i in range(len(names)):\n wave = sm.Wave(y[i], dt, names[i], 0) #TODO: move to fm\n wave.offset = 0\n wave.type = names[i]\n #TODO: Check whether the name is already taken\n setOfWaves.append((wave, names[i],\n DefaultColors.getColor(names[i]), -1))\n return setOfWaves\n"
] |
[
[
"numpy.sqrt"
],
[
"numpy.correlate",
"numpy.argmin"
]
] |
mikael-epigram/keras
|
[
"f9c7402548914120520a7939011c1b376a906e23"
] |
[
"keras/layers/recurrent.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n# pylint: disable=g-classes-have-attributes\n\"\"\"Recurrent layers and their base classes.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport collections\nimport functools\nimport warnings\n\nimport numpy as np\nfrom keras import activations\nfrom keras import backend\nfrom keras import constraints\nfrom keras import initializers\nfrom keras import regularizers\nfrom keras.engine.base_layer import Layer\nfrom keras.engine.input_spec import InputSpec\nfrom keras.saving.saved_model import layer_serialization\nfrom keras.utils import control_flow_util\nfrom keras.utils import generic_utils\nfrom keras.utils import tf_utils\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util.tf_export import keras_export\nfrom tensorflow.tools.docs import doc_controls\n\n\nRECURRENT_DROPOUT_WARNING_MSG = (\n 'RNN `implementation=2` is not supported when `recurrent_dropout` is set. '\n 'Using `implementation=1`.')\n\n\n@keras_export('keras.layers.StackedRNNCells')\nclass StackedRNNCells(Layer):\n \"\"\"Wrapper allowing a stack of RNN cells to behave as a single cell.\n\n Used to implement efficient stacked RNNs.\n\n Args:\n cells: List of RNN cell instances.\n\n Examples:\n\n ```python\n batch_size = 3\n sentence_max_length = 5\n n_features = 2\n new_shape = (batch_size, sentence_max_length, n_features)\n x = tf.constant(np.reshape(np.arange(30), new_shape), dtype = tf.float32)\n\n rnn_cells = [tf.keras.layers.LSTMCell(128) for _ in range(2)]\n stacked_lstm = tf.keras.layers.StackedRNNCells(rnn_cells)\n lstm_layer = tf.keras.layers.RNN(stacked_lstm)\n\n result = lstm_layer(x)\n ```\n \"\"\"\n\n def __init__(self, cells, **kwargs):\n for cell in cells:\n if 'call' not in dir(cell):\n raise ValueError('All cells must have a `call` method. '\n f'Received cell without a `call` method: {cell}')\n if 'state_size' not in dir(cell):\n raise ValueError('All cells must have a `state_size` attribute. '\n f'Received cell without a `state_size`: {cell}')\n self.cells = cells\n # reverse_state_order determines whether the state size will be in a reverse\n # order of the cells' state. User might want to set this to True to keep the\n # existing behavior. This is only useful when use RNN(return_state=True)\n # since the state will be returned as the same order of state_size.\n self.reverse_state_order = kwargs.pop('reverse_state_order', False)\n if self.reverse_state_order:\n logging.warning('reverse_state_order=True in StackedRNNCells will soon '\n 'be deprecated. Please update the code to work with the '\n 'natural order of states if you rely on the RNN states, '\n 'eg RNN(return_state=True).')\n super(StackedRNNCells, self).__init__(**kwargs)\n\n @property\n def state_size(self):\n return tuple(c.state_size for c in\n (self.cells[::-1] if self.reverse_state_order else self.cells))\n\n @property\n def output_size(self):\n if getattr(self.cells[-1], 'output_size', None) is not None:\n return self.cells[-1].output_size\n elif _is_multiple_state(self.cells[-1].state_size):\n return self.cells[-1].state_size[0]\n else:\n return self.cells[-1].state_size\n\n def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n initial_states = []\n for cell in self.cells[::-1] if self.reverse_state_order else self.cells:\n get_initial_state_fn = getattr(cell, 'get_initial_state', None)\n if get_initial_state_fn:\n initial_states.append(get_initial_state_fn(\n inputs=inputs, batch_size=batch_size, dtype=dtype))\n else:\n initial_states.append(_generate_zero_filled_state_for_cell(\n cell, inputs, batch_size, dtype))\n\n return tuple(initial_states)\n\n def call(self, inputs, states, constants=None, training=None, **kwargs):\n # Recover per-cell states.\n state_size = (self.state_size[::-1]\n if self.reverse_state_order else self.state_size)\n nested_states = tf.nest.pack_sequence_as(state_size, tf.nest.flatten(states))\n\n # Call the cells in order and store the returned states.\n new_nested_states = []\n for cell, states in zip(self.cells, nested_states):\n states = states if tf.nest.is_nested(states) else [states]\n # TF cell does not wrap the state into list when there is only one state.\n is_tf_rnn_cell = getattr(cell, '_is_tf_rnn_cell', None) is not None\n states = states[0] if len(states) == 1 and is_tf_rnn_cell else states\n if generic_utils.has_arg(cell.call, 'training'):\n kwargs['training'] = training\n else:\n kwargs.pop('training', None)\n # Use the __call__ function for callable objects, eg layers, so that it\n # will have the proper name scopes for the ops, etc.\n cell_call_fn = cell.__call__ if callable(cell) else cell.call\n if generic_utils.has_arg(cell.call, 'constants'):\n inputs, states = cell_call_fn(inputs, states,\n constants=constants, **kwargs)\n else:\n inputs, states = cell_call_fn(inputs, states, **kwargs)\n new_nested_states.append(states)\n\n return inputs, tf.nest.pack_sequence_as(state_size,\n tf.nest.flatten(new_nested_states))\n\n @tf_utils.shape_type_conversion\n def build(self, input_shape):\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n\n def get_batch_input_shape(batch_size, dim):\n shape = tf.TensorShape(dim).as_list()\n return tuple([batch_size] + shape)\n\n for cell in self.cells:\n if isinstance(cell, Layer) and not cell.built:\n with backend.name_scope(cell.name):\n cell.build(input_shape)\n cell.built = True\n if getattr(cell, 'output_size', None) is not None:\n output_dim = cell.output_size\n elif _is_multiple_state(cell.state_size):\n output_dim = cell.state_size[0]\n else:\n output_dim = cell.state_size\n batch_size = tf.nest.flatten(input_shape)[0]\n if tf.nest.is_nested(output_dim):\n input_shape = tf.nest.map_structure(\n functools.partial(get_batch_input_shape, batch_size), output_dim)\n input_shape = tuple(input_shape)\n else:\n input_shape = tuple([batch_size] + tf.TensorShape(output_dim).as_list())\n self.built = True\n\n def get_config(self):\n cells = []\n for cell in self.cells:\n cells.append(generic_utils.serialize_keras_object(cell))\n config = {'cells': cells}\n base_config = super(StackedRNNCells, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top\n cells = []\n for cell_config in config.pop('cells'):\n cells.append(\n deserialize_layer(cell_config, custom_objects=custom_objects))\n return cls(cells, **config)\n\n\n@keras_export('keras.layers.RNN')\nclass RNN(Layer):\n \"\"\"Base class for recurrent layers.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n Args:\n cell: A RNN cell instance or a list of RNN cell instances.\n A RNN cell is a class that has:\n - A `call(input_at_t, states_at_t)` method, returning\n `(output_at_t, states_at_t_plus_1)`. The call method of the\n cell can also take the optional argument `constants`, see\n section \"Note on passing external constants\" below.\n - A `state_size` attribute. This can be a single integer\n (single state) in which case it is the size of the recurrent\n state. This can also be a list/tuple of integers (one size per state).\n The `state_size` can also be TensorShape or tuple/list of\n TensorShape, to represent high dimension state.\n - A `output_size` attribute. This can be a single integer or a\n TensorShape, which represent the shape of the output. For backward\n compatible reason, if this attribute is not available for the\n cell, the value will be inferred by the first element of the\n `state_size`.\n - A `get_initial_state(inputs=None, batch_size=None, dtype=None)`\n method that creates a tensor meant to be fed to `call()` as the\n initial state, if the user didn't specify any initial state via other\n means. The returned initial state should have a shape of\n [batch_size, cell.state_size]. The cell might choose to create a\n tensor full of zeros, or full of other values based on the cell's\n implementation.\n `inputs` is the input tensor to the RNN layer, which should\n contain the batch size as its shape[0], and also dtype. Note that\n the shape[0] might be `None` during the graph construction. Either\n the `inputs` or the pair of `batch_size` and `dtype` are provided.\n `batch_size` is a scalar tensor that represents the batch size\n of the inputs. `dtype` is `tf.DType` that represents the dtype of\n the inputs.\n For backward compatibility, if this method is not implemented\n by the cell, the RNN layer will create a zero filled tensor with the\n size of [batch_size, cell.state_size].\n In the case that `cell` is a list of RNN cell instances, the cells\n will be stacked on top of each other in the RNN, resulting in an\n efficient stacked RNN.\n return_sequences: Boolean (default `False`). Whether to return the last\n output in the output sequence, or the full sequence.\n return_state: Boolean (default `False`). Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default `False`).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default `False`). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default `False`).\n If True, the network will be unrolled, else a symbolic loop will be used.\n Unrolling can speed-up a RNN, although it tends to be more\n memory-intensive. Unrolling is only suitable for short sequences.\n time_major: The shape format of the `inputs` and `outputs` tensors.\n If True, the inputs and outputs will be in shape\n `(timesteps, batch, ...)`, whereas in the False case, it will be\n `(batch, timesteps, ...)`. Using `time_major = True` is a bit more\n efficient because it avoids transposes at the beginning and end of the\n RNN calculation. However, most TensorFlow data is batch-major, so by\n default this function accepts input and emits output in batch-major\n form.\n zero_output_for_mask: Boolean (default `False`).\n Whether the output should use zeros for the masked timesteps. Note that\n this field is only used when `return_sequences` is True and mask is\n provided. It can useful if you want to reuse the raw output sequence of\n the RNN without interference from the masked timesteps, eg, merging\n bidirectional RNNs.\n\n Call arguments:\n inputs: Input tensor.\n mask: Binary tensor of shape `[batch_size, timesteps]` indicating whether\n a given timestep should be masked. An individual `True` entry indicates\n that the corresponding timestep should be utilized, while a `False`\n entry indicates that the corresponding timestep should be ignored.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is for use with cells that use dropout.\n initial_state: List of initial state tensors to be passed to the first\n call of the cell.\n constants: List of constant tensors to be passed to the cell at each\n timestep.\n\n Input shape:\n N-D tensor with shape `[batch_size, timesteps, ...]` or\n `[timesteps, batch_size, ...]` when time_major is True.\n\n Output shape:\n - If `return_state`: a list of tensors. The first tensor is\n the output. The remaining tensors are the last states,\n each with shape `[batch_size, state_size]`, where `state_size` could\n be a high dimension tensor shape.\n - If `return_sequences`: N-D tensor with shape\n `[batch_size, timesteps, output_size]`, where `output_size` could\n be a high dimension tensor shape, or\n `[timesteps, batch_size, output_size]` when `time_major` is True.\n - Else, N-D tensor with shape `[batch_size, output_size]`, where\n `output_size` could be a high dimension tensor shape.\n\n Masking:\n This layer supports masking for input data with a variable number\n of timesteps. To introduce masks to your data,\n use an [tf.keras.layers.Embedding] layer with the `mask_zero` parameter\n set to `True`.\n\n Note on using statefulness in RNNs:\n You can set RNN layers to be 'stateful', which means that the states\n computed for the samples in one batch will be reused as initial states\n for the samples in the next batch. This assumes a one-to-one mapping\n between samples in different successive batches.\n\n To enable statefulness:\n - Specify `stateful=True` in the layer constructor.\n - Specify a fixed batch size for your model, by passing\n If sequential model:\n `batch_input_shape=(...)` to the first layer in your model.\n Else for functional model with 1 or more Input layers:\n `batch_shape=(...)` to all the first layers in your model.\n This is the expected shape of your inputs\n *including the batch size*.\n It should be a tuple of integers, e.g. `(32, 10, 100)`.\n - Specify `shuffle=False` when calling `fit()`.\n\n To reset the states of your model, call `.reset_states()` on either\n a specific layer, or on your entire model.\n\n Note on specifying the initial state of RNNs:\n You can specify the initial state of RNN layers symbolically by\n calling them with the keyword argument `initial_state`. The value of\n `initial_state` should be a tensor or list of tensors representing\n the initial state of the RNN layer.\n\n You can specify the initial state of RNN layers numerically by\n calling `reset_states` with the keyword argument `states`. The value of\n `states` should be a numpy array or list of numpy arrays representing\n the initial state of the RNN layer.\n\n Note on passing external constants to RNNs:\n You can pass \"external\" constants to the cell using the `constants`\n keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This\n requires that the `cell.call` method accepts the same keyword argument\n `constants`. Such constants can be used to condition the cell\n transformation on additional static inputs (not changing over time),\n a.k.a. an attention mechanism.\n\n Examples:\n\n ```python\n # First, let's define a RNN Cell, as a layer subclass.\n\n class MinimalRNNCell(keras.layers.Layer):\n\n def __init__(self, units, **kwargs):\n self.units = units\n self.state_size = units\n super(MinimalRNNCell, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='uniform',\n name='kernel')\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n initializer='uniform',\n name='recurrent_kernel')\n self.built = True\n\n def call(self, inputs, states):\n prev_output = states[0]\n h = backend.dot(inputs, self.kernel)\n output = h + backend.dot(prev_output, self.recurrent_kernel)\n return output, [output]\n\n # Let's use this cell in a RNN layer:\n\n cell = MinimalRNNCell(32)\n x = keras.Input((None, 5))\n layer = RNN(cell)\n y = layer(x)\n\n # Here's how to use the cell to build a stacked RNN:\n\n cells = [MinimalRNNCell(32), MinimalRNNCell(64)]\n x = keras.Input((None, 5))\n layer = RNN(cells)\n y = layer(x)\n ```\n \"\"\"\n\n def __init__(self,\n cell,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n time_major=False,\n **kwargs):\n if isinstance(cell, (list, tuple)):\n cell = StackedRNNCells(cell)\n if 'call' not in dir(cell):\n raise ValueError('Argument `cell` should have a `call` method. '\n f'The RNN was passed: cell={cell}')\n if 'state_size' not in dir(cell):\n raise ValueError('The RNN cell should have a `state_size` attribute '\n '(tuple of integers, one integer per RNN state). '\n f'Received: cell={cell}')\n # If True, the output for masked timestep will be zeros, whereas in the\n # False case, output from previous timestep is returned for masked timestep.\n self.zero_output_for_mask = kwargs.pop('zero_output_for_mask', False)\n\n if 'input_shape' not in kwargs and (\n 'input_dim' in kwargs or 'input_length' in kwargs):\n input_shape = (kwargs.pop('input_length', None),\n kwargs.pop('input_dim', None))\n kwargs['input_shape'] = input_shape\n\n super(RNN, self).__init__(**kwargs)\n self.cell = cell\n self.return_sequences = return_sequences\n self.return_state = return_state\n self.go_backwards = go_backwards\n self.stateful = stateful\n self.unroll = unroll\n self.time_major = time_major\n\n self.supports_masking = True\n # The input shape is unknown yet, it could have nested tensor inputs, and\n # the input spec will be the list of specs for nested inputs, the structure\n # of the input_spec will be the same as the input.\n self.input_spec = None\n self.state_spec = None\n self._states = None\n self.constants_spec = None\n self._num_constants = 0\n\n if stateful:\n if tf.distribute.has_strategy():\n raise ValueError('Stateful RNNs (created with `stateful=True`) '\n 'are not yet supported with tf.distribute.Strategy.')\n\n @property\n def _use_input_spec_as_call_signature(self):\n if self.unroll:\n # When the RNN layer is unrolled, the time step shape cannot be unknown.\n # The input spec does not define the time step (because this layer can be\n # called with any time step value, as long as it is not None), so it\n # cannot be used as the call function signature when saving to SavedModel.\n return False\n return super(RNN, self)._use_input_spec_as_call_signature\n\n @property\n def states(self):\n if self._states is None:\n state = tf.nest.map_structure(lambda _: None, self.cell.state_size)\n return state if tf.nest.is_nested(self.cell.state_size) else [state]\n return self._states\n\n @states.setter\n # Automatic tracking catches \"self._states\" which adds an extra weight and\n # breaks HDF5 checkpoints.\n @tf.__internal__.tracking.no_automatic_dependency_tracking\n def states(self, states):\n self._states = states\n\n def compute_output_shape(self, input_shape):\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n # Check whether the input shape contains any nested shapes. It could be\n # (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy\n # inputs.\n try:\n input_shape = tf.TensorShape(input_shape)\n except (ValueError, TypeError):\n # A nested tensor input\n input_shape = tf.nest.flatten(input_shape)[0]\n\n batch = input_shape[0]\n time_step = input_shape[1]\n if self.time_major:\n batch, time_step = time_step, batch\n\n if _is_multiple_state(self.cell.state_size):\n state_size = self.cell.state_size\n else:\n state_size = [self.cell.state_size]\n\n def _get_output_shape(flat_output_size):\n output_dim = tf.TensorShape(flat_output_size).as_list()\n if self.return_sequences:\n if self.time_major:\n output_shape = tf.TensorShape(\n [time_step, batch] + output_dim)\n else:\n output_shape = tf.TensorShape(\n [batch, time_step] + output_dim)\n else:\n output_shape = tf.TensorShape([batch] + output_dim)\n return output_shape\n\n if getattr(self.cell, 'output_size', None) is not None:\n # cell.output_size could be nested structure.\n output_shape = tf.nest.flatten(tf.nest.map_structure(\n _get_output_shape, self.cell.output_size))\n output_shape = output_shape[0] if len(output_shape) == 1 else output_shape\n else:\n # Note that state_size[0] could be a tensor_shape or int.\n output_shape = _get_output_shape(state_size[0])\n\n if self.return_state:\n def _get_state_shape(flat_state):\n state_shape = [batch] + tf.TensorShape(flat_state).as_list()\n return tf.TensorShape(state_shape)\n state_shape = tf.nest.map_structure(_get_state_shape, state_size)\n return generic_utils.to_list(output_shape) + tf.nest.flatten(state_shape)\n else:\n return output_shape\n\n def compute_mask(self, inputs, mask):\n # Time step masks must be the same for each input.\n # This is because the mask for an RNN is of size [batch, time_steps, 1],\n # and specifies which time steps should be skipped, and a time step\n # must be skipped for all inputs.\n # TODO(scottzhu): Should we accept multiple different masks?\n mask = tf.nest.flatten(mask)[0]\n output_mask = mask if self.return_sequences else None\n if self.return_state:\n state_mask = [None for _ in self.states]\n return [output_mask] + state_mask\n else:\n return output_mask\n\n def build(self, input_shape):\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n # The input_shape here could be a nest structure.\n\n # do the tensor_shape to shapes here. The input could be single tensor, or a\n # nested structure of tensors.\n def get_input_spec(shape):\n \"\"\"Convert input shape to InputSpec.\"\"\"\n if isinstance(shape, tf.TensorShape):\n input_spec_shape = shape.as_list()\n else:\n input_spec_shape = list(shape)\n batch_index, time_step_index = (1, 0) if self.time_major else (0, 1)\n if not self.stateful:\n input_spec_shape[batch_index] = None\n input_spec_shape[time_step_index] = None\n return InputSpec(shape=tuple(input_spec_shape))\n\n def get_step_input_shape(shape):\n if isinstance(shape, tf.TensorShape):\n shape = tuple(shape.as_list())\n # remove the timestep from the input_shape\n return shape[1:] if self.time_major else (shape[0],) + shape[2:]\n\n def get_state_spec(shape):\n state_spec_shape = tf.TensorShape(shape).as_list()\n # append batch dim\n state_spec_shape = [None] + state_spec_shape\n return InputSpec(shape=tuple(state_spec_shape))\n\n # Check whether the input shape contains any nested shapes. It could be\n # (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy\n # inputs.\n try:\n input_shape = tf.TensorShape(input_shape)\n except (ValueError, TypeError):\n # A nested tensor input\n pass\n\n if not tf.nest.is_nested(input_shape):\n # This indicates the there is only one input.\n if self.input_spec is not None:\n self.input_spec[0] = get_input_spec(input_shape)\n else:\n self.input_spec = [get_input_spec(input_shape)]\n step_input_shape = get_step_input_shape(input_shape)\n else:\n if self.input_spec is not None:\n self.input_spec[0] = tf.nest.map_structure(get_input_spec, input_shape)\n else:\n self.input_spec = generic_utils.to_list(\n tf.nest.map_structure(get_input_spec, input_shape))\n step_input_shape = tf.nest.map_structure(get_step_input_shape, input_shape)\n\n # allow cell (if layer) to build before we set or validate state_spec.\n if isinstance(self.cell, Layer) and not self.cell.built:\n with backend.name_scope(self.cell.name):\n self.cell.build(step_input_shape)\n self.cell.built = True\n\n # set or validate state_spec\n if _is_multiple_state(self.cell.state_size):\n state_size = list(self.cell.state_size)\n else:\n state_size = [self.cell.state_size]\n\n if self.state_spec is not None:\n # initial_state was passed in call, check compatibility\n self._validate_state_spec(state_size, self.state_spec)\n else:\n if tf.nest.is_nested(state_size):\n self.state_spec = tf.nest.map_structure(get_state_spec, state_size)\n else:\n self.state_spec = [\n InputSpec(shape=[None] + tf.TensorShape(dim).as_list())\n for dim in state_size\n ]\n # ensure the generated state_spec is correct.\n self._validate_state_spec(state_size, self.state_spec)\n if self.stateful:\n self.reset_states()\n self.built = True\n\n @staticmethod\n def _validate_state_spec(cell_state_sizes, init_state_specs):\n \"\"\"Validate the state spec between the initial_state and the state_size.\n\n Args:\n cell_state_sizes: list, the `state_size` attribute from the cell.\n init_state_specs: list, the `state_spec` from the initial_state that is\n passed in `call()`.\n\n Raises:\n ValueError: When initial state spec is not compatible with the state size.\n \"\"\"\n validation_error = ValueError(\n 'An `initial_state` was passed that is not compatible with '\n '`cell.state_size`. Received `state_spec`={}; '\n 'however `cell.state_size` is '\n '{}'.format(init_state_specs, cell_state_sizes))\n flat_cell_state_sizes = tf.nest.flatten(cell_state_sizes)\n flat_state_specs = tf.nest.flatten(init_state_specs)\n\n if len(flat_cell_state_sizes) != len(flat_state_specs):\n raise validation_error\n for cell_state_spec, cell_state_size in zip(flat_state_specs,\n flat_cell_state_sizes):\n if not tf.TensorShape(\n # Ignore the first axis for init_state which is for batch\n cell_state_spec.shape[1:]).is_compatible_with(\n tf.TensorShape(cell_state_size)):\n raise validation_error\n\n @doc_controls.do_not_doc_inheritable\n def get_initial_state(self, inputs):\n get_initial_state_fn = getattr(self.cell, 'get_initial_state', None)\n\n if tf.nest.is_nested(inputs):\n # The input are nested sequences. Use the first element in the seq to get\n # batch size and dtype.\n inputs = tf.nest.flatten(inputs)[0]\n\n input_shape = tf.shape(inputs)\n batch_size = input_shape[1] if self.time_major else input_shape[0]\n dtype = inputs.dtype\n if get_initial_state_fn:\n init_state = get_initial_state_fn(\n inputs=None, batch_size=batch_size, dtype=dtype)\n else:\n init_state = _generate_zero_filled_state(batch_size, self.cell.state_size,\n dtype)\n # Keras RNN expect the states in a list, even if it's a single state tensor.\n if not tf.nest.is_nested(init_state):\n init_state = [init_state]\n # Force the state to be a list in case it is a namedtuple eg LSTMStateTuple.\n return list(init_state)\n\n def __call__(self, inputs, initial_state=None, constants=None, **kwargs):\n inputs, initial_state, constants = _standardize_args(inputs,\n initial_state,\n constants,\n self._num_constants)\n\n if initial_state is None and constants is None:\n return super(RNN, self).__call__(inputs, **kwargs)\n\n # If any of `initial_state` or `constants` are specified and are Keras\n # tensors, then add them to the inputs and temporarily modify the\n # input_spec to include them.\n\n additional_inputs = []\n additional_specs = []\n if initial_state is not None:\n additional_inputs += initial_state\n self.state_spec = tf.nest.map_structure(\n lambda s: InputSpec(shape=backend.int_shape(s)), initial_state)\n additional_specs += self.state_spec\n if constants is not None:\n additional_inputs += constants\n self.constants_spec = [\n InputSpec(shape=backend.int_shape(constant)) for constant in constants\n ]\n self._num_constants = len(constants)\n additional_specs += self.constants_spec\n # additional_inputs can be empty if initial_state or constants are provided\n # but empty (e.g. the cell is stateless).\n flat_additional_inputs = tf.nest.flatten(additional_inputs)\n is_keras_tensor = backend.is_keras_tensor(\n flat_additional_inputs[0]) if flat_additional_inputs else True\n for tensor in flat_additional_inputs:\n if backend.is_keras_tensor(tensor) != is_keras_tensor:\n raise ValueError(\n 'The initial state or constants of an RNN layer cannot be '\n 'specified via a mix of Keras tensors and non-Keras tensors '\n '(a \"Keras tensor\" is a tensor that was returned by a Keras layer '\n ' or by `Input` during Functional model construction). '\n f'Received: initial_state={initial_state}, constants={constants}')\n\n if is_keras_tensor:\n # Compute the full input spec, including state and constants\n full_input = [inputs] + additional_inputs\n if self.built:\n # Keep the input_spec since it has been populated in build() method.\n full_input_spec = self.input_spec + additional_specs\n else:\n # The original input_spec is None since there could be a nested tensor\n # input. Update the input_spec to match the inputs.\n full_input_spec = generic_utils.to_list(\n tf.nest.map_structure(lambda _: None, inputs)) + additional_specs\n # Perform the call with temporarily replaced input_spec\n self.input_spec = full_input_spec\n output = super(RNN, self).__call__(full_input, **kwargs)\n # Remove the additional_specs from input spec and keep the rest. It is\n # important to keep since the input spec was populated by build(), and\n # will be reused in the stateful=True.\n self.input_spec = self.input_spec[:-len(additional_specs)]\n return output\n else:\n if initial_state is not None:\n kwargs['initial_state'] = initial_state\n if constants is not None:\n kwargs['constants'] = constants\n return super(RNN, self).__call__(inputs, **kwargs)\n\n def call(self,\n inputs,\n mask=None,\n training=None,\n initial_state=None,\n constants=None):\n # The input should be dense, padded with zeros. If a ragged input is fed\n # into the layer, it is padded and the row lengths are used for masking.\n inputs, row_lengths = backend.convert_inputs_if_ragged(inputs)\n is_ragged_input = (row_lengths is not None)\n self._validate_args_if_ragged(is_ragged_input, mask)\n\n inputs, initial_state, constants = self._process_inputs(\n inputs, initial_state, constants)\n\n self._maybe_reset_cell_dropout_mask(self.cell)\n if isinstance(self.cell, StackedRNNCells):\n for cell in self.cell.cells:\n self._maybe_reset_cell_dropout_mask(cell)\n\n if mask is not None:\n # Time step masks must be the same for each input.\n # TODO(scottzhu): Should we accept multiple different masks?\n mask = tf.nest.flatten(mask)[0]\n\n if tf.nest.is_nested(inputs):\n # In the case of nested input, use the first element for shape check.\n input_shape = backend.int_shape(tf.nest.flatten(inputs)[0])\n else:\n input_shape = backend.int_shape(inputs)\n timesteps = input_shape[0] if self.time_major else input_shape[1]\n if self.unroll and timesteps is None:\n raise ValueError('Cannot unroll a RNN if the '\n 'time dimension is undefined. \\n'\n '- If using a Sequential model, '\n 'specify the time dimension by passing '\n 'an `input_shape` or `batch_input_shape` '\n 'argument to your first layer. If your '\n 'first layer is an Embedding, you can '\n 'also use the `input_length` argument.\\n'\n '- If using the functional API, specify '\n 'the time dimension by passing a `shape` '\n 'or `batch_shape` argument to your Input layer.')\n\n kwargs = {}\n if generic_utils.has_arg(self.cell.call, 'training'):\n kwargs['training'] = training\n\n # TF RNN cells expect single tensor as state instead of list wrapped tensor.\n is_tf_rnn_cell = getattr(self.cell, '_is_tf_rnn_cell', None) is not None\n # Use the __call__ function for callable objects, eg layers, so that it\n # will have the proper name scopes for the ops, etc.\n cell_call_fn = self.cell.__call__ if callable(self.cell) else self.cell.call\n if constants:\n if not generic_utils.has_arg(self.cell.call, 'constants'):\n raise ValueError(\n f'RNN cell {self.cell} does not support constants. '\n f'Received: constants={constants}')\n\n def step(inputs, states):\n constants = states[-self._num_constants:] # pylint: disable=invalid-unary-operand-type\n states = states[:-self._num_constants] # pylint: disable=invalid-unary-operand-type\n\n states = states[0] if len(states) == 1 and is_tf_rnn_cell else states\n output, new_states = cell_call_fn(\n inputs, states, constants=constants, **kwargs)\n if not tf.nest.is_nested(new_states):\n new_states = [new_states]\n return output, new_states\n else:\n\n def step(inputs, states):\n states = states[0] if len(states) == 1 and is_tf_rnn_cell else states\n output, new_states = cell_call_fn(inputs, states, **kwargs)\n if not tf.nest.is_nested(new_states):\n new_states = [new_states]\n return output, new_states\n last_output, outputs, states = backend.rnn(\n step,\n inputs,\n initial_state,\n constants=constants,\n go_backwards=self.go_backwards,\n mask=mask,\n unroll=self.unroll,\n input_length=row_lengths if row_lengths is not None else timesteps,\n time_major=self.time_major,\n zero_output_for_mask=self.zero_output_for_mask)\n\n if self.stateful:\n updates = [\n tf.compat.v1.assign(self_state, tf.cast(state, self_state.dtype))\n for self_state, state in zip(\n tf.nest.flatten(self.states), tf.nest.flatten(states))\n ]\n self.add_update(updates)\n\n if self.return_sequences:\n output = backend.maybe_convert_to_ragged(\n is_ragged_input, outputs, row_lengths, go_backwards=self.go_backwards)\n else:\n output = last_output\n\n if self.return_state:\n if not isinstance(states, (list, tuple)):\n states = [states]\n else:\n states = list(states)\n return generic_utils.to_list(output) + states\n else:\n return output\n\n def _process_inputs(self, inputs, initial_state, constants):\n # input shape: `(samples, time (padded with zeros), input_dim)`\n # note that the .build() method of subclasses MUST define\n # self.input_spec and self.state_spec with complete input shapes.\n if (isinstance(inputs, collections.abc.Sequence)\n and not isinstance(inputs, tuple)):\n # get initial_state from full input spec\n # as they could be copied to multiple GPU.\n if not self._num_constants:\n initial_state = inputs[1:]\n else:\n initial_state = inputs[1:-self._num_constants]\n constants = inputs[-self._num_constants:]\n if len(initial_state) == 0:\n initial_state = None\n inputs = inputs[0]\n\n if self.stateful:\n if initial_state is not None:\n # When layer is stateful and initial_state is provided, check if the\n # recorded state is same as the default value (zeros). Use the recorded\n # state if it is not same as the default.\n non_zero_count = tf.add_n([tf.math.count_nonzero(s)\n for s in tf.nest.flatten(self.states)])\n # Set strict = True to keep the original structure of the state.\n initial_state = tf.compat.v1.cond(non_zero_count > 0,\n true_fn=lambda: self.states,\n false_fn=lambda: initial_state,\n strict=True)\n else:\n initial_state = self.states\n initial_state = tf.nest.map_structure(\n lambda v: tf.cast(v, self.compute_dtype), initial_state\n )\n elif initial_state is None:\n initial_state = self.get_initial_state(inputs)\n\n if len(initial_state) != len(self.states):\n raise ValueError(f'Layer has {len(self.states)} '\n f'states but was passed {len(initial_state)} initial '\n f'states. Received: initial_state={initial_state}')\n return inputs, initial_state, constants\n\n def _validate_args_if_ragged(self, is_ragged_input, mask):\n if not is_ragged_input:\n return\n\n if mask is not None:\n raise ValueError(f'The mask that was passed in was {mask}, which '\n 'cannot be applied to RaggedTensor inputs. Please '\n 'make sure that there is no mask injected by upstream '\n 'layers.')\n if self.unroll:\n raise ValueError('The input received contains RaggedTensors and does '\n 'not support unrolling. Disable unrolling by passing '\n '`unroll=False` in the RNN Layer constructor.')\n\n def _maybe_reset_cell_dropout_mask(self, cell):\n if isinstance(cell, DropoutRNNCellMixin):\n cell.reset_dropout_mask()\n cell.reset_recurrent_dropout_mask()\n\n def reset_states(self, states=None):\n \"\"\"Reset the recorded states for the stateful RNN layer.\n\n Can only be used when RNN layer is constructed with `stateful` = `True`.\n Args:\n states: Numpy arrays that contains the value for the initial state, which\n will be feed to cell at the first time step. When the value is None,\n zero filled numpy array will be created based on the cell state size.\n\n Raises:\n AttributeError: When the RNN layer is not stateful.\n ValueError: When the batch size of the RNN layer is unknown.\n ValueError: When the input numpy array is not compatible with the RNN\n layer state, either size wise or dtype wise.\n \"\"\"\n if not self.stateful:\n raise AttributeError('Layer must be stateful.')\n spec_shape = None\n if self.input_spec is not None:\n spec_shape = tf.nest.flatten(self.input_spec[0])[0].shape\n if spec_shape is None:\n # It is possible to have spec shape to be None, eg when construct a RNN\n # with a custom cell, or standard RNN layers (LSTM/GRU) which we only know\n # it has 3 dim input, but not its full shape spec before build().\n batch_size = None\n else:\n batch_size = spec_shape[1] if self.time_major else spec_shape[0]\n if not batch_size:\n raise ValueError('If a RNN is stateful, it needs to know '\n 'its batch size. Specify the batch size '\n 'of your input tensors: \\n'\n '- If using a Sequential model, '\n 'specify the batch size by passing '\n 'a `batch_input_shape` '\n 'argument to your first layer.\\n'\n '- If using the functional API, specify '\n 'the batch size by passing a '\n '`batch_shape` argument to your Input layer.')\n # initialize state if None\n if tf.nest.flatten(self.states)[0] is None:\n if getattr(self.cell, 'get_initial_state', None):\n flat_init_state_values = tf.nest.flatten(self.cell.get_initial_state(\n inputs=None, batch_size=batch_size,\n # Use variable_dtype instead of compute_dtype, since the state is\n # stored in a variable\n dtype=self.variable_dtype or backend.floatx()))\n else:\n flat_init_state_values = tf.nest.flatten(_generate_zero_filled_state(\n batch_size, self.cell.state_size,\n self.variable_dtype or backend.floatx()))\n flat_states_variables = tf.nest.map_structure(\n backend.variable, flat_init_state_values)\n self.states = tf.nest.pack_sequence_as(self.cell.state_size,\n flat_states_variables)\n if not tf.nest.is_nested(self.states):\n self.states = [self.states]\n elif states is None:\n for state, size in zip(tf.nest.flatten(self.states),\n tf.nest.flatten(self.cell.state_size)):\n backend.set_value(\n state,\n np.zeros([batch_size] + tf.TensorShape(size).as_list()))\n else:\n flat_states = tf.nest.flatten(self.states)\n flat_input_states = tf.nest.flatten(states)\n if len(flat_input_states) != len(flat_states):\n raise ValueError(f'Layer {self.name} expects {len(flat_states)} '\n f'states, but it received {len(flat_input_states)} '\n f'state values. States received: {states}')\n set_value_tuples = []\n for i, (value, state) in enumerate(zip(flat_input_states,\n flat_states)):\n if value.shape != state.shape:\n raise ValueError(\n f'State {i} is incompatible with layer {self.name}: '\n f'expected shape={(batch_size, state)} '\n f'but found shape={value.shape}')\n set_value_tuples.append((state, value))\n backend.batch_set_value(set_value_tuples)\n\n def get_config(self):\n config = {\n 'return_sequences': self.return_sequences,\n 'return_state': self.return_state,\n 'go_backwards': self.go_backwards,\n 'stateful': self.stateful,\n 'unroll': self.unroll,\n 'time_major': self.time_major\n }\n if self._num_constants:\n config['num_constants'] = self._num_constants\n if self.zero_output_for_mask:\n config['zero_output_for_mask'] = self.zero_output_for_mask\n\n config['cell'] = generic_utils.serialize_keras_object(self.cell)\n base_config = super(RNN, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top\n cell = deserialize_layer(config.pop('cell'), custom_objects=custom_objects)\n num_constants = config.pop('num_constants', 0)\n layer = cls(cell, **config)\n layer._num_constants = num_constants\n return layer\n\n @property\n def _trackable_saved_model_saver(self):\n return layer_serialization.RNNSavedModelSaver(self)\n\n\n@keras_export('keras.layers.AbstractRNNCell')\nclass AbstractRNNCell(Layer):\n \"\"\"Abstract object representing an RNN cell.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n This is the base class for implementing RNN cells with custom behavior.\n\n Every `RNNCell` must have the properties below and implement `call` with\n the signature `(output, next_state) = call(input, state)`.\n\n Examples:\n\n ```python\n class MinimalRNNCell(AbstractRNNCell):\n\n def __init__(self, units, **kwargs):\n self.units = units\n super(MinimalRNNCell, self).__init__(**kwargs)\n\n @property\n def state_size(self):\n return self.units\n\n def build(self, input_shape):\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='uniform',\n name='kernel')\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n initializer='uniform',\n name='recurrent_kernel')\n self.built = True\n\n def call(self, inputs, states):\n prev_output = states[0]\n h = backend.dot(inputs, self.kernel)\n output = h + backend.dot(prev_output, self.recurrent_kernel)\n return output, output\n ```\n\n This definition of cell differs from the definition used in the literature.\n In the literature, 'cell' refers to an object with a single scalar output.\n This definition refers to a horizontal array of such units.\n\n An RNN cell, in the most abstract setting, is anything that has\n a state and performs some operation that takes a matrix of inputs.\n This operation results in an output matrix with `self.output_size` columns.\n If `self.state_size` is an integer, this operation also results in a new\n state matrix with `self.state_size` columns. If `self.state_size` is a\n (possibly nested tuple of) TensorShape object(s), then it should return a\n matching structure of Tensors having shape `[batch_size].concatenate(s)`\n for each `s` in `self.batch_size`.\n \"\"\"\n\n def call(self, inputs, states):\n \"\"\"The function that contains the logic for one RNN step calculation.\n\n Args:\n inputs: the input tensor, which is a slide from the overall RNN input by\n the time dimension (usually the second dimension).\n states: the state tensor from previous step, which has the same shape\n as `(batch, state_size)`. In the case of timestep 0, it will be the\n initial state user specified, or zero filled tensor otherwise.\n\n Returns:\n A tuple of two tensors:\n 1. output tensor for the current timestep, with size `output_size`.\n 2. state tensor for next step, which has the shape of `state_size`.\n \"\"\"\n raise NotImplementedError\n\n @property\n def state_size(self):\n \"\"\"size(s) of state(s) used by this cell.\n\n It can be represented by an Integer, a TensorShape or a tuple of Integers\n or TensorShapes.\n \"\"\"\n raise NotImplementedError\n\n @property\n def output_size(self):\n \"\"\"Integer or TensorShape: size of outputs produced by this cell.\"\"\"\n raise NotImplementedError\n\n def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)\n\n\n@doc_controls.do_not_generate_docs\nclass DropoutRNNCellMixin:\n \"\"\"Object that hold dropout related fields for RNN Cell.\n\n This class is not a standalone RNN cell. It suppose to be used with a RNN cell\n by multiple inheritance. Any cell that mix with class should have following\n fields:\n dropout: a float number within range [0, 1). The ratio that the input\n tensor need to dropout.\n recurrent_dropout: a float number within range [0, 1). The ratio that the\n recurrent state weights need to dropout.\n This object will create and cache created dropout masks, and reuse them for\n the incoming data, so that the same mask is used for every batch input.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._create_non_trackable_mask_cache()\n super(DropoutRNNCellMixin, self).__init__(*args, **kwargs)\n\n @tf.__internal__.tracking.no_automatic_dependency_tracking\n def _create_non_trackable_mask_cache(self):\n \"\"\"Create the cache for dropout and recurrent dropout mask.\n\n Note that the following two masks will be used in \"graph function\" mode,\n e.g. these masks are symbolic tensors. In eager mode, the `eager_*_mask`\n tensors will be generated differently than in the \"graph function\" case,\n and they will be cached.\n\n Also note that in graph mode, we still cache those masks only because the\n RNN could be created with `unroll=True`. In that case, the `cell.call()`\n function will be invoked multiple times, and we want to ensure same mask\n is used every time.\n\n Also the caches are created without tracking. Since they are not picklable\n by python when deepcopy, we don't want `layer._obj_reference_counts_dict`\n to track it by default.\n \"\"\"\n self._dropout_mask_cache = backend.ContextValueCache(\n self._create_dropout_mask)\n self._recurrent_dropout_mask_cache = backend.ContextValueCache(\n self._create_recurrent_dropout_mask)\n\n def reset_dropout_mask(self):\n \"\"\"Reset the cached dropout masks if any.\n\n This is important for the RNN layer to invoke this in it `call()` method so\n that the cached mask is cleared before calling the `cell.call()`. The mask\n should be cached across the timestep within the same batch, but shouldn't\n be cached between batches. Otherwise it will introduce unreasonable bias\n against certain index of data within the batch.\n \"\"\"\n self._dropout_mask_cache.clear()\n\n def reset_recurrent_dropout_mask(self):\n \"\"\"Reset the cached recurrent dropout masks if any.\n\n This is important for the RNN layer to invoke this in it call() method so\n that the cached mask is cleared before calling the cell.call(). The mask\n should be cached across the timestep within the same batch, but shouldn't\n be cached between batches. Otherwise it will introduce unreasonable bias\n against certain index of data within the batch.\n \"\"\"\n self._recurrent_dropout_mask_cache.clear()\n\n def _create_dropout_mask(self, inputs, training, count=1):\n return _generate_dropout_mask(\n tf.ones_like(inputs),\n self.dropout,\n training=training,\n count=count)\n\n def _create_recurrent_dropout_mask(self, inputs, training, count=1):\n return _generate_dropout_mask(\n tf.ones_like(inputs),\n self.recurrent_dropout,\n training=training,\n count=count)\n\n def get_dropout_mask_for_cell(self, inputs, training, count=1):\n \"\"\"Get the dropout mask for RNN cell's input.\n\n It will create mask based on context if there isn't any existing cached\n mask. If a new mask is generated, it will update the cache in the cell.\n\n Args:\n inputs: The input tensor whose shape will be used to generate dropout\n mask.\n training: Boolean tensor, whether its in training mode, dropout will be\n ignored in non-training mode.\n count: Int, how many dropout mask will be generated. It is useful for cell\n that has internal weights fused together.\n Returns:\n List of mask tensor, generated or cached mask based on context.\n \"\"\"\n if self.dropout == 0:\n return None\n init_kwargs = dict(inputs=inputs, training=training, count=count)\n return self._dropout_mask_cache.setdefault(kwargs=init_kwargs)\n\n def get_recurrent_dropout_mask_for_cell(self, inputs, training, count=1):\n \"\"\"Get the recurrent dropout mask for RNN cell.\n\n It will create mask based on context if there isn't any existing cached\n mask. If a new mask is generated, it will update the cache in the cell.\n\n Args:\n inputs: The input tensor whose shape will be used to generate dropout\n mask.\n training: Boolean tensor, whether its in training mode, dropout will be\n ignored in non-training mode.\n count: Int, how many dropout mask will be generated. It is useful for cell\n that has internal weights fused together.\n Returns:\n List of mask tensor, generated or cached mask based on context.\n \"\"\"\n if self.recurrent_dropout == 0:\n return None\n init_kwargs = dict(inputs=inputs, training=training, count=count)\n return self._recurrent_dropout_mask_cache.setdefault(kwargs=init_kwargs)\n\n def __getstate__(self):\n # Used for deepcopy. The caching can't be pickled by python, since it will\n # contain tensor and graph.\n state = super(DropoutRNNCellMixin, self).__getstate__()\n state.pop('_dropout_mask_cache', None)\n state.pop('_recurrent_dropout_mask_cache', None)\n return state\n\n def __setstate__(self, state):\n state['_dropout_mask_cache'] = backend.ContextValueCache(\n self._create_dropout_mask)\n state['_recurrent_dropout_mask_cache'] = backend.ContextValueCache(\n self._create_recurrent_dropout_mask)\n super(DropoutRNNCellMixin, self).__setstate__(state)\n\n\n@keras_export('keras.layers.SimpleRNNCell')\nclass SimpleRNNCell(DropoutRNNCellMixin, Layer):\n \"\"\"Cell class for SimpleRNN.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n This class processes one step within the whole time sequence input, whereas\n `tf.keras.layer.SimpleRNN` processes the whole sequence.\n\n Args:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, (default `True`), whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `glorot_uniform`.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation of the recurrent state.\n Default: `orthogonal`.\n bias_initializer: Initializer for the bias vector. Default: `zeros`.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.\n bias_regularizer: Regularizer function applied to the bias vector. Default:\n `None`.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix. Default: `None`.\n bias_constraint: Constraint function applied to the bias vector. Default:\n `None`.\n dropout: Float between 0 and 1. Fraction of the units to drop for the linear\n transformation of the inputs. Default: 0.\n recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for\n the linear transformation of the recurrent state. Default: 0.\n\n Call arguments:\n inputs: A 2D tensor, with shape of `[batch, feature]`.\n states: A 2D tensor with shape of `[batch, units]`, which is the state from\n the previous time step. For timestep 0, the initial state provided by user\n will be feed to cell.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. Only relevant when `dropout` or\n `recurrent_dropout` is used.\n\n Examples:\n\n ```python\n inputs = np.random.random([32, 10, 8]).astype(np.float32)\n rnn = tf.keras.layers.RNN(tf.keras.layers.SimpleRNNCell(4))\n\n output = rnn(inputs) # The output has shape `[32, 4]`.\n\n rnn = tf.keras.layers.RNN(\n tf.keras.layers.SimpleRNNCell(4),\n return_sequences=True,\n return_state=True)\n\n # whole_sequence_output has shape `[32, 10, 4]`.\n # final_state has shape `[32, 4]`.\n whole_sequence_output, final_state = rnn(inputs)\n ```\n \"\"\"\n\n def __init__(self,\n units,\n activation='tanh',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n **kwargs):\n if units < 0:\n raise ValueError(f'Received an invalid value for argument `units`, '\n f'expected a positive integer, got {units}.')\n # By default use cached variable under v2 mode, see b/143699808.\n if tf.compat.v1.executing_eagerly_outside_functions():\n self._enable_caching_device = kwargs.pop('enable_caching_device', True)\n else:\n self._enable_caching_device = kwargs.pop('enable_caching_device', False)\n super(SimpleRNNCell, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.dropout = min(1., max(0., dropout))\n self.recurrent_dropout = min(1., max(0., recurrent_dropout))\n self.state_size = self.units\n self.output_size = self.units\n\n @tf_utils.shape_type_conversion\n def build(self, input_shape):\n default_caching_device = _caching_device(self)\n self.kernel = self.add_weight(\n shape=(input_shape[-1], self.units),\n name='kernel',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n caching_device=default_caching_device)\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n name='recurrent_kernel',\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint,\n caching_device=default_caching_device)\n if self.use_bias:\n self.bias = self.add_weight(\n shape=(self.units,),\n name='bias',\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n caching_device=default_caching_device)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs, states, training=None):\n prev_output = states[0] if tf.nest.is_nested(states) else states\n dp_mask = self.get_dropout_mask_for_cell(inputs, training)\n rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(\n prev_output, training)\n\n if dp_mask is not None:\n h = backend.dot(inputs * dp_mask, self.kernel)\n else:\n h = backend.dot(inputs, self.kernel)\n if self.bias is not None:\n h = backend.bias_add(h, self.bias)\n\n if rec_dp_mask is not None:\n prev_output = prev_output * rec_dp_mask\n output = h + backend.dot(prev_output, self.recurrent_kernel)\n if self.activation is not None:\n output = self.activation(output)\n\n new_state = [output] if tf.nest.is_nested(states) else output\n return output, new_state\n\n def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)\n\n def get_config(self):\n config = {\n 'units':\n self.units,\n 'activation':\n activations.serialize(self.activation),\n 'use_bias':\n self.use_bias,\n 'kernel_initializer':\n initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer':\n initializers.serialize(self.recurrent_initializer),\n 'bias_initializer':\n initializers.serialize(self.bias_initializer),\n 'kernel_regularizer':\n regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer':\n regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer':\n regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint':\n constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint':\n constraints.serialize(self.recurrent_constraint),\n 'bias_constraint':\n constraints.serialize(self.bias_constraint),\n 'dropout':\n self.dropout,\n 'recurrent_dropout':\n self.recurrent_dropout\n }\n config.update(_config_for_enable_caching_device(self))\n base_config = super(SimpleRNNCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.SimpleRNN')\nclass SimpleRNN(RNN):\n \"\"\"Fully-connected RNN where the output is to be fed back to input.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n Args:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, (default `True`), whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `glorot_uniform`.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation of the recurrent state.\n Default: `orthogonal`.\n bias_initializer: Initializer for the bias vector. Default: `zeros`.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.\n bias_regularizer: Regularizer function applied to the bias vector. Default:\n `None`.\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\"). Default: `None`.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix. Default: `None`.\n bias_constraint: Constraint function applied to the bias vector. Default:\n `None`.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for the linear transformation of the inputs.\n Default: 0.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for the linear transformation of the\n recurrent state. Default: 0.\n return_sequences: Boolean. Whether to return the last output\n in the output sequence, or the full sequence. Default: `False`.\n return_state: Boolean. Whether to return the last state\n in addition to the output. Default: `False`\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n\n Call arguments:\n inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.\n mask: Binary tensor of shape `[batch, timesteps]` indicating whether\n a given timestep should be masked. An individual `True` entry indicates\n that the corresponding timestep should be utilized, while a `False` entry\n indicates that the corresponding timestep should be ignored.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is only relevant if `dropout` or\n `recurrent_dropout` is used.\n initial_state: List of initial state tensors to be passed to the first\n call of the cell.\n\n Examples:\n\n ```python\n inputs = np.random.random([32, 10, 8]).astype(np.float32)\n simple_rnn = tf.keras.layers.SimpleRNN(4)\n\n output = simple_rnn(inputs) # The output has shape `[32, 4]`.\n\n simple_rnn = tf.keras.layers.SimpleRNN(\n 4, return_sequences=True, return_state=True)\n\n # whole_sequence_output has shape `[32, 10, 4]`.\n # final_state has shape `[32, 4]`.\n whole_sequence_output, final_state = simple_rnn(inputs)\n ```\n \"\"\"\n\n def __init__(self,\n units,\n activation='tanh',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs):\n if 'implementation' in kwargs:\n kwargs.pop('implementation')\n logging.warning('The `implementation` argument '\n 'in `SimpleRNN` has been deprecated. '\n 'Please remove it from your layer call.')\n if 'enable_caching_device' in kwargs:\n cell_kwargs = {'enable_caching_device':\n kwargs.pop('enable_caching_device')}\n else:\n cell_kwargs = {}\n cell = SimpleRNNCell(\n units,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout,\n dtype=kwargs.get('dtype'),\n trainable=kwargs.get('trainable', True),\n **cell_kwargs)\n super(SimpleRNN, self).__init__(\n cell,\n return_sequences=return_sequences,\n return_state=return_state,\n go_backwards=go_backwards,\n stateful=stateful,\n unroll=unroll,\n **kwargs)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.input_spec = [InputSpec(ndim=3)]\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n return super(SimpleRNN, self).call(\n inputs, mask=mask, training=training, initial_state=initial_state)\n\n @property\n def units(self):\n return self.cell.units\n\n @property\n def activation(self):\n return self.cell.activation\n\n @property\n def use_bias(self):\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n return self.cell.recurrent_initializer\n\n @property\n def bias_initializer(self):\n return self.cell.bias_initializer\n\n @property\n def kernel_regularizer(self):\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n return self.cell.bias_regularizer\n\n @property\n def kernel_constraint(self):\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n return self.cell.bias_constraint\n\n @property\n def dropout(self):\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n return self.cell.recurrent_dropout\n\n def get_config(self):\n config = {\n 'units':\n self.units,\n 'activation':\n activations.serialize(self.activation),\n 'use_bias':\n self.use_bias,\n 'kernel_initializer':\n initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer':\n initializers.serialize(self.recurrent_initializer),\n 'bias_initializer':\n initializers.serialize(self.bias_initializer),\n 'kernel_regularizer':\n regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer':\n regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer':\n regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint':\n constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint':\n constraints.serialize(self.recurrent_constraint),\n 'bias_constraint':\n constraints.serialize(self.bias_constraint),\n 'dropout':\n self.dropout,\n 'recurrent_dropout':\n self.recurrent_dropout\n }\n base_config = super(SimpleRNN, self).get_config()\n config.update(_config_for_enable_caching_device(self.cell))\n del base_config['cell']\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n if 'implementation' in config:\n config.pop('implementation')\n return cls(**config)\n\n\n@keras_export(v1=['keras.layers.GRUCell'])\nclass GRUCell(DropoutRNNCellMixin, Layer):\n \"\"\"Cell class for the GRU layer.\n\n Args:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step.\n Default: hard sigmoid (`hard_sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n reset_after: GRU convention (whether to apply reset gate after or\n before matrix multiplication). False = \"before\" (default),\n True = \"after\" (cuDNN compatible).\n\n Call arguments:\n inputs: A 2D tensor.\n states: List of state tensors corresponding to the previous timestep.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. Only relevant when `dropout` or\n `recurrent_dropout` is used.\n \"\"\"\n\n def __init__(self,\n units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n reset_after=False,\n **kwargs):\n if units < 0:\n raise ValueError(f'Received an invalid value for argument `units`, '\n f'expected a positive integer, got {units}.')\n # By default use cached variable under v2 mode, see b/143699808.\n if tf.compat.v1.executing_eagerly_outside_functions():\n self._enable_caching_device = kwargs.pop('enable_caching_device', True)\n else:\n self._enable_caching_device = kwargs.pop('enable_caching_device', False)\n super(GRUCell, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.recurrent_activation = activations.get(recurrent_activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.dropout = min(1., max(0., dropout))\n self.recurrent_dropout = min(1., max(0., recurrent_dropout))\n\n implementation = kwargs.pop('implementation', 1)\n if self.recurrent_dropout != 0 and implementation != 1:\n logging.debug(RECURRENT_DROPOUT_WARNING_MSG)\n self.implementation = 1\n else:\n self.implementation = implementation\n self.reset_after = reset_after\n self.state_size = self.units\n self.output_size = self.units\n\n @tf_utils.shape_type_conversion\n def build(self, input_shape):\n input_dim = input_shape[-1]\n default_caching_device = _caching_device(self)\n self.kernel = self.add_weight(\n shape=(input_dim, self.units * 3),\n name='kernel',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n caching_device=default_caching_device)\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units * 3),\n name='recurrent_kernel',\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint,\n caching_device=default_caching_device)\n\n if self.use_bias:\n if not self.reset_after:\n bias_shape = (3 * self.units,)\n else:\n # separate biases for input and recurrent kernels\n # Note: the shape is intentionally different from CuDNNGRU biases\n # `(2 * 3 * self.units,)`, so that we can distinguish the classes\n # when loading and converting saved weights.\n bias_shape = (2, 3 * self.units)\n self.bias = self.add_weight(shape=bias_shape,\n name='bias',\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n caching_device=default_caching_device)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs, states, training=None):\n h_tm1 = states[0] if tf.nest.is_nested(states) else states # previous memory\n\n dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=3)\n rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(\n h_tm1, training, count=3)\n\n if self.use_bias:\n if not self.reset_after:\n input_bias, recurrent_bias = self.bias, None\n else:\n input_bias, recurrent_bias = tf.unstack(self.bias)\n\n if self.implementation == 1:\n if 0. < self.dropout < 1.:\n inputs_z = inputs * dp_mask[0]\n inputs_r = inputs * dp_mask[1]\n inputs_h = inputs * dp_mask[2]\n else:\n inputs_z = inputs\n inputs_r = inputs\n inputs_h = inputs\n\n x_z = backend.dot(inputs_z, self.kernel[:, :self.units])\n x_r = backend.dot(inputs_r, self.kernel[:, self.units:self.units * 2])\n x_h = backend.dot(inputs_h, self.kernel[:, self.units * 2:])\n\n if self.use_bias:\n x_z = backend.bias_add(x_z, input_bias[:self.units])\n x_r = backend.bias_add(x_r, input_bias[self.units: self.units * 2])\n x_h = backend.bias_add(x_h, input_bias[self.units * 2:])\n\n if 0. < self.recurrent_dropout < 1.:\n h_tm1_z = h_tm1 * rec_dp_mask[0]\n h_tm1_r = h_tm1 * rec_dp_mask[1]\n h_tm1_h = h_tm1 * rec_dp_mask[2]\n else:\n h_tm1_z = h_tm1\n h_tm1_r = h_tm1\n h_tm1_h = h_tm1\n\n recurrent_z = backend.dot(h_tm1_z, self.recurrent_kernel[:, :self.units])\n recurrent_r = backend.dot(\n h_tm1_r, self.recurrent_kernel[:, self.units:self.units * 2])\n if self.reset_after and self.use_bias:\n recurrent_z = backend.bias_add(recurrent_z, recurrent_bias[:self.units])\n recurrent_r = backend.bias_add(\n recurrent_r, recurrent_bias[self.units:self.units * 2])\n\n z = self.recurrent_activation(x_z + recurrent_z)\n r = self.recurrent_activation(x_r + recurrent_r)\n\n # reset gate applied after/before matrix multiplication\n if self.reset_after:\n recurrent_h = backend.dot(\n h_tm1_h, self.recurrent_kernel[:, self.units * 2:])\n if self.use_bias:\n recurrent_h = backend.bias_add(\n recurrent_h, recurrent_bias[self.units * 2:])\n recurrent_h = r * recurrent_h\n else:\n recurrent_h = backend.dot(\n r * h_tm1_h, self.recurrent_kernel[:, self.units * 2:])\n\n hh = self.activation(x_h + recurrent_h)\n else:\n if 0. < self.dropout < 1.:\n inputs = inputs * dp_mask[0]\n\n # inputs projected by all gate matrices at once\n matrix_x = backend.dot(inputs, self.kernel)\n if self.use_bias:\n # biases: bias_z_i, bias_r_i, bias_h_i\n matrix_x = backend.bias_add(matrix_x, input_bias)\n\n x_z, x_r, x_h = tf.split(matrix_x, 3, axis=-1)\n\n if self.reset_after:\n # hidden state projected by all gate matrices at once\n matrix_inner = backend.dot(h_tm1, self.recurrent_kernel)\n if self.use_bias:\n matrix_inner = backend.bias_add(matrix_inner, recurrent_bias)\n else:\n # hidden state projected separately for update/reset and new\n matrix_inner = backend.dot(\n h_tm1, self.recurrent_kernel[:, :2 * self.units])\n\n recurrent_z, recurrent_r, recurrent_h = tf.split(\n matrix_inner, [self.units, self.units, -1], axis=-1)\n\n z = self.recurrent_activation(x_z + recurrent_z)\n r = self.recurrent_activation(x_r + recurrent_r)\n\n if self.reset_after:\n recurrent_h = r * recurrent_h\n else:\n recurrent_h = backend.dot(\n r * h_tm1, self.recurrent_kernel[:, 2 * self.units:])\n\n hh = self.activation(x_h + recurrent_h)\n # previous and candidate state mixed by update gate\n h = z * h_tm1 + (1 - z) * hh\n new_state = [h] if tf.nest.is_nested(states) else h\n return h, new_state\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'recurrent_activation':\n activations.serialize(self.recurrent_activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer':\n initializers.serialize(self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer':\n regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint':\n constraints.serialize(self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout,\n 'implementation': self.implementation,\n 'reset_after': self.reset_after\n }\n config.update(_config_for_enable_caching_device(self))\n base_config = super(GRUCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)\n\n\n@keras_export(v1=['keras.layers.GRU'])\nclass GRU(RNN):\n \"\"\"Gated Recurrent Unit - Cho et al. 2014.\n\n There are two variants. The default one is based on 1406.1078v3 and\n has reset gate applied to hidden state before matrix multiplication. The\n other one is based on original 1406.1078v1 and has the order reversed.\n\n The second variant is compatible with CuDNNGRU (GPU-only) and allows\n inference on CPU. Thus it has separate biases for `kernel` and\n `recurrent_kernel`. Use `'reset_after'=True` and\n `recurrent_activation='sigmoid'`.\n\n Args:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step.\n Default: hard sigmoid (`hard_sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\")..\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n return_sequences: Boolean. Whether to return the last output\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n time_major: The shape format of the `inputs` and `outputs` tensors.\n If True, the inputs and outputs will be in shape\n `(timesteps, batch, ...)`, whereas in the False case, it will be\n `(batch, timesteps, ...)`. Using `time_major = True` is a bit more\n efficient because it avoids transposes at the beginning and end of the\n RNN calculation. However, most TensorFlow data is batch-major, so by\n default this function accepts input and emits output in batch-major\n form.\n reset_after: GRU convention (whether to apply reset gate after or\n before matrix multiplication). False = \"before\" (default),\n True = \"after\" (cuDNN compatible).\n\n Call arguments:\n inputs: A 3D tensor.\n mask: Binary tensor of shape `(samples, timesteps)` indicating whether\n a given timestep should be masked. An individual `True` entry indicates\n that the corresponding timestep should be utilized, while a `False`\n entry indicates that the corresponding timestep should be ignored.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is only relevant if `dropout` or\n `recurrent_dropout` is used.\n initial_state: List of initial state tensors to be passed to the first\n call of the cell.\n \"\"\"\n\n def __init__(self,\n units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n reset_after=False,\n **kwargs):\n implementation = kwargs.pop('implementation', 1)\n if implementation == 0:\n logging.warning('`implementation=0` has been deprecated, '\n 'and now defaults to `implementation=1`.'\n 'Please update your layer call.')\n if 'enable_caching_device' in kwargs:\n cell_kwargs = {'enable_caching_device':\n kwargs.pop('enable_caching_device')}\n else:\n cell_kwargs = {}\n cell = GRUCell(\n units,\n activation=activation,\n recurrent_activation=recurrent_activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout,\n implementation=implementation,\n reset_after=reset_after,\n dtype=kwargs.get('dtype'),\n trainable=kwargs.get('trainable', True),\n **cell_kwargs)\n super(GRU, self).__init__(\n cell,\n return_sequences=return_sequences,\n return_state=return_state,\n go_backwards=go_backwards,\n stateful=stateful,\n unroll=unroll,\n **kwargs)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.input_spec = [InputSpec(ndim=3)]\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n return super(GRU, self).call(\n inputs, mask=mask, training=training, initial_state=initial_state)\n\n @property\n def units(self):\n return self.cell.units\n\n @property\n def activation(self):\n return self.cell.activation\n\n @property\n def recurrent_activation(self):\n return self.cell.recurrent_activation\n\n @property\n def use_bias(self):\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n return self.cell.recurrent_initializer\n\n @property\n def bias_initializer(self):\n return self.cell.bias_initializer\n\n @property\n def kernel_regularizer(self):\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n return self.cell.bias_regularizer\n\n @property\n def kernel_constraint(self):\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n return self.cell.bias_constraint\n\n @property\n def dropout(self):\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n return self.cell.recurrent_dropout\n\n @property\n def implementation(self):\n return self.cell.implementation\n\n @property\n def reset_after(self):\n return self.cell.reset_after\n\n def get_config(self):\n config = {\n 'units':\n self.units,\n 'activation':\n activations.serialize(self.activation),\n 'recurrent_activation':\n activations.serialize(self.recurrent_activation),\n 'use_bias':\n self.use_bias,\n 'kernel_initializer':\n initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer':\n initializers.serialize(self.recurrent_initializer),\n 'bias_initializer':\n initializers.serialize(self.bias_initializer),\n 'kernel_regularizer':\n regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer':\n regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer':\n regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint':\n constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint':\n constraints.serialize(self.recurrent_constraint),\n 'bias_constraint':\n constraints.serialize(self.bias_constraint),\n 'dropout':\n self.dropout,\n 'recurrent_dropout':\n self.recurrent_dropout,\n 'implementation':\n self.implementation,\n 'reset_after':\n self.reset_after\n }\n config.update(_config_for_enable_caching_device(self.cell))\n base_config = super(GRU, self).get_config()\n del base_config['cell']\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n if 'implementation' in config and config['implementation'] == 0:\n config['implementation'] = 1\n return cls(**config)\n\n\n@keras_export(v1=['keras.layers.LSTMCell'])\nclass LSTMCell(DropoutRNNCellMixin, Layer):\n \"\"\"Cell class for the LSTM layer.\n\n Args:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step.\n Default: hard sigmoid (`hard_sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n unit_forget_bias: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al., 2015](\n http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n\n Call arguments:\n inputs: A 2D tensor.\n states: List of state tensors corresponding to the previous timestep.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. Only relevant when `dropout` or\n `recurrent_dropout` is used.\n \"\"\"\n\n def __init__(self,\n units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n **kwargs):\n if units < 0:\n raise ValueError(f'Received an invalid value for argument `units`, '\n f'expected a positive integer, got {units}.')\n # By default use cached variable under v2 mode, see b/143699808.\n if tf.compat.v1.executing_eagerly_outside_functions():\n self._enable_caching_device = kwargs.pop('enable_caching_device', True)\n else:\n self._enable_caching_device = kwargs.pop('enable_caching_device', False)\n super(LSTMCell, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.recurrent_activation = activations.get(recurrent_activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.unit_forget_bias = unit_forget_bias\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.dropout = min(1., max(0., dropout))\n self.recurrent_dropout = min(1., max(0., recurrent_dropout))\n implementation = kwargs.pop('implementation', 1)\n if self.recurrent_dropout != 0 and implementation != 1:\n logging.debug(RECURRENT_DROPOUT_WARNING_MSG)\n self.implementation = 1\n else:\n self.implementation = implementation\n self.state_size = [self.units, self.units]\n self.output_size = self.units\n\n @tf_utils.shape_type_conversion\n def build(self, input_shape):\n default_caching_device = _caching_device(self)\n input_dim = input_shape[-1]\n self.kernel = self.add_weight(\n shape=(input_dim, self.units * 4),\n name='kernel',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n caching_device=default_caching_device)\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units * 4),\n name='recurrent_kernel',\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint,\n caching_device=default_caching_device)\n\n if self.use_bias:\n if self.unit_forget_bias:\n\n def bias_initializer(_, *args, **kwargs):\n return backend.concatenate([\n self.bias_initializer((self.units,), *args, **kwargs),\n initializers.get('ones')((self.units,), *args, **kwargs),\n self.bias_initializer((self.units * 2,), *args, **kwargs),\n ])\n else:\n bias_initializer = self.bias_initializer\n self.bias = self.add_weight(\n shape=(self.units * 4,),\n name='bias',\n initializer=bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n caching_device=default_caching_device)\n else:\n self.bias = None\n self.built = True\n\n def _compute_carry_and_output(self, x, h_tm1, c_tm1):\n \"\"\"Computes carry and output using split kernels.\"\"\"\n x_i, x_f, x_c, x_o = x\n h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1\n i = self.recurrent_activation(\n x_i + backend.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]))\n f = self.recurrent_activation(x_f + backend.dot(\n h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]))\n c = f * c_tm1 + i * self.activation(x_c + backend.dot(\n h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))\n o = self.recurrent_activation(\n x_o + backend.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]))\n return c, o\n\n def _compute_carry_and_output_fused(self, z, c_tm1):\n \"\"\"Computes carry and output using fused kernels.\"\"\"\n z0, z1, z2, z3 = z\n i = self.recurrent_activation(z0)\n f = self.recurrent_activation(z1)\n c = f * c_tm1 + i * self.activation(z2)\n o = self.recurrent_activation(z3)\n return c, o\n\n def call(self, inputs, states, training=None):\n h_tm1 = states[0] # previous memory state\n c_tm1 = states[1] # previous carry state\n\n dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)\n rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(\n h_tm1, training, count=4)\n\n if self.implementation == 1:\n if 0 < self.dropout < 1.:\n inputs_i = inputs * dp_mask[0]\n inputs_f = inputs * dp_mask[1]\n inputs_c = inputs * dp_mask[2]\n inputs_o = inputs * dp_mask[3]\n else:\n inputs_i = inputs\n inputs_f = inputs\n inputs_c = inputs\n inputs_o = inputs\n k_i, k_f, k_c, k_o = tf.split(\n self.kernel, num_or_size_splits=4, axis=1)\n x_i = backend.dot(inputs_i, k_i)\n x_f = backend.dot(inputs_f, k_f)\n x_c = backend.dot(inputs_c, k_c)\n x_o = backend.dot(inputs_o, k_o)\n if self.use_bias:\n b_i, b_f, b_c, b_o = tf.split(\n self.bias, num_or_size_splits=4, axis=0)\n x_i = backend.bias_add(x_i, b_i)\n x_f = backend.bias_add(x_f, b_f)\n x_c = backend.bias_add(x_c, b_c)\n x_o = backend.bias_add(x_o, b_o)\n\n if 0 < self.recurrent_dropout < 1.:\n h_tm1_i = h_tm1 * rec_dp_mask[0]\n h_tm1_f = h_tm1 * rec_dp_mask[1]\n h_tm1_c = h_tm1 * rec_dp_mask[2]\n h_tm1_o = h_tm1 * rec_dp_mask[3]\n else:\n h_tm1_i = h_tm1\n h_tm1_f = h_tm1\n h_tm1_c = h_tm1\n h_tm1_o = h_tm1\n x = (x_i, x_f, x_c, x_o)\n h_tm1 = (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o)\n c, o = self._compute_carry_and_output(x, h_tm1, c_tm1)\n else:\n if 0. < self.dropout < 1.:\n inputs = inputs * dp_mask[0]\n z = backend.dot(inputs, self.kernel)\n z += backend.dot(h_tm1, self.recurrent_kernel)\n if self.use_bias:\n z = backend.bias_add(z, self.bias)\n\n z = tf.split(z, num_or_size_splits=4, axis=1)\n c, o = self._compute_carry_and_output_fused(z, c_tm1)\n\n h = o * self.activation(c)\n return h, [h, c]\n\n def get_config(self):\n config = {\n 'units':\n self.units,\n 'activation':\n activations.serialize(self.activation),\n 'recurrent_activation':\n activations.serialize(self.recurrent_activation),\n 'use_bias':\n self.use_bias,\n 'kernel_initializer':\n initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer':\n initializers.serialize(self.recurrent_initializer),\n 'bias_initializer':\n initializers.serialize(self.bias_initializer),\n 'unit_forget_bias':\n self.unit_forget_bias,\n 'kernel_regularizer':\n regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer':\n regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer':\n regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint':\n constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint':\n constraints.serialize(self.recurrent_constraint),\n 'bias_constraint':\n constraints.serialize(self.bias_constraint),\n 'dropout':\n self.dropout,\n 'recurrent_dropout':\n self.recurrent_dropout,\n 'implementation':\n self.implementation\n }\n config.update(_config_for_enable_caching_device(self))\n base_config = super(LSTMCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n return list(_generate_zero_filled_state_for_cell(\n self, inputs, batch_size, dtype))\n\n\n@keras_export('keras.experimental.PeepholeLSTMCell')\nclass PeepholeLSTMCell(LSTMCell):\n \"\"\"Equivalent to LSTMCell class but adds peephole connections.\n\n Peephole connections allow the gates to utilize the previous internal state as\n well as the previous hidden state (which is what LSTMCell is limited to).\n This allows PeepholeLSTMCell to better learn precise timings over LSTMCell.\n\n From [Gers et al., 2002](\n http://www.jmlr.org/papers/volume3/gers02a/gers02a.pdf):\n\n \"We find that LSTM augmented by 'peephole connections' from its internal\n cells to its multiplicative gates can learn the fine distinction between\n sequences of spikes spaced either 50 or 49 time steps apart without the help\n of any short training exemplars.\"\n\n The peephole implementation is based on:\n\n [Sak et al., 2014](https://research.google.com/pubs/archive/43905.pdf)\n\n Example:\n\n ```python\n # Create 2 PeepholeLSTMCells\n peephole_lstm_cells = [PeepholeLSTMCell(size) for size in [128, 256]]\n # Create a layer composed sequentially of the peephole LSTM cells.\n layer = RNN(peephole_lstm_cells)\n input = keras.Input((timesteps, input_dim))\n output = layer(input)\n ```\n \"\"\"\n\n def __init__(self,\n units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n **kwargs):\n warnings.warn(\n '`tf.keras.experimental.PeepholeLSTMCell` is deprecated '\n 'and will be removed in a future version. '\n 'Please use tensorflow_addons.rnn.PeepholeLSTMCell '\n 'instead.',\n stacklevel=2)\n super(PeepholeLSTMCell, self).__init__(\n units=units,\n activation=activation,\n recurrent_activation=recurrent_activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n bias_initializer=bias_initializer,\n unit_forget_bias=unit_forget_bias,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout,\n implementation=kwargs.pop('implementation', 1),\n **kwargs)\n\n def build(self, input_shape):\n super(PeepholeLSTMCell, self).build(input_shape)\n # The following are the weight matrices for the peephole connections. These\n # are multiplied with the previous internal state during the computation of\n # carry and output.\n self.input_gate_peephole_weights = self.add_weight(\n shape=(self.units,),\n name='input_gate_peephole_weights',\n initializer=self.kernel_initializer)\n self.forget_gate_peephole_weights = self.add_weight(\n shape=(self.units,),\n name='forget_gate_peephole_weights',\n initializer=self.kernel_initializer)\n self.output_gate_peephole_weights = self.add_weight(\n shape=(self.units,),\n name='output_gate_peephole_weights',\n initializer=self.kernel_initializer)\n\n def _compute_carry_and_output(self, x, h_tm1, c_tm1):\n x_i, x_f, x_c, x_o = x\n h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1\n i = self.recurrent_activation(\n x_i + backend.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]) +\n self.input_gate_peephole_weights * c_tm1)\n f = self.recurrent_activation(x_f + backend.dot(\n h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]) +\n self.forget_gate_peephole_weights * c_tm1)\n c = f * c_tm1 + i * self.activation(x_c + backend.dot(\n h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))\n o = self.recurrent_activation(\n x_o + backend.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]) +\n self.output_gate_peephole_weights * c)\n return c, o\n\n def _compute_carry_and_output_fused(self, z, c_tm1):\n z0, z1, z2, z3 = z\n i = self.recurrent_activation(z0 +\n self.input_gate_peephole_weights * c_tm1)\n f = self.recurrent_activation(z1 +\n self.forget_gate_peephole_weights * c_tm1)\n c = f * c_tm1 + i * self.activation(z2)\n o = self.recurrent_activation(z3 + self.output_gate_peephole_weights * c)\n return c, o\n\n\n@keras_export(v1=['keras.layers.LSTM'])\nclass LSTM(RNN):\n \"\"\"Long Short-Term Memory layer - Hochreiter 1997.\n\n Note that this cell is not optimized for performance on GPU. Please use\n `tf.compat.v1.keras.layers.CuDNNLSTM` for better performance on GPU.\n\n Args:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step.\n Default: hard sigmoid (`hard_sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs..\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n unit_forget_bias: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al., 2015](\n http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n return_sequences: Boolean. Whether to return the last output\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n time_major: The shape format of the `inputs` and `outputs` tensors.\n If True, the inputs and outputs will be in shape\n `(timesteps, batch, ...)`, whereas in the False case, it will be\n `(batch, timesteps, ...)`. Using `time_major = True` is a bit more\n efficient because it avoids transposes at the beginning and end of the\n RNN calculation. However, most TensorFlow data is batch-major, so by\n default this function accepts input and emits output in batch-major\n form.\n\n Call arguments:\n inputs: A 3D tensor.\n mask: Binary tensor of shape `(samples, timesteps)` indicating whether\n a given timestep should be masked. An individual `True` entry indicates\n that the corresponding timestep should be utilized, while a `False`\n entry indicates that the corresponding timestep should be ignored.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is only relevant if `dropout` or\n `recurrent_dropout` is used.\n initial_state: List of initial state tensors to be passed to the first\n call of the cell.\n \"\"\"\n\n def __init__(self,\n units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs):\n implementation = kwargs.pop('implementation', 1)\n if implementation == 0:\n logging.warning('`implementation=0` has been deprecated, '\n 'and now defaults to `implementation=1`.'\n 'Please update your layer call.')\n if 'enable_caching_device' in kwargs:\n cell_kwargs = {'enable_caching_device':\n kwargs.pop('enable_caching_device')}\n else:\n cell_kwargs = {}\n cell = LSTMCell(\n units,\n activation=activation,\n recurrent_activation=recurrent_activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n unit_forget_bias=unit_forget_bias,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout,\n implementation=implementation,\n dtype=kwargs.get('dtype'),\n trainable=kwargs.get('trainable', True),\n **cell_kwargs)\n super(LSTM, self).__init__(\n cell,\n return_sequences=return_sequences,\n return_state=return_state,\n go_backwards=go_backwards,\n stateful=stateful,\n unroll=unroll,\n **kwargs)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.input_spec = [InputSpec(ndim=3)]\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n return super(LSTM, self).call(\n inputs, mask=mask, training=training, initial_state=initial_state)\n\n @property\n def units(self):\n return self.cell.units\n\n @property\n def activation(self):\n return self.cell.activation\n\n @property\n def recurrent_activation(self):\n return self.cell.recurrent_activation\n\n @property\n def use_bias(self):\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n return self.cell.recurrent_initializer\n\n @property\n def bias_initializer(self):\n return self.cell.bias_initializer\n\n @property\n def unit_forget_bias(self):\n return self.cell.unit_forget_bias\n\n @property\n def kernel_regularizer(self):\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n return self.cell.bias_regularizer\n\n @property\n def kernel_constraint(self):\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n return self.cell.bias_constraint\n\n @property\n def dropout(self):\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n return self.cell.recurrent_dropout\n\n @property\n def implementation(self):\n return self.cell.implementation\n\n def get_config(self):\n config = {\n 'units':\n self.units,\n 'activation':\n activations.serialize(self.activation),\n 'recurrent_activation':\n activations.serialize(self.recurrent_activation),\n 'use_bias':\n self.use_bias,\n 'kernel_initializer':\n initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer':\n initializers.serialize(self.recurrent_initializer),\n 'bias_initializer':\n initializers.serialize(self.bias_initializer),\n 'unit_forget_bias':\n self.unit_forget_bias,\n 'kernel_regularizer':\n regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer':\n regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer':\n regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint':\n constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint':\n constraints.serialize(self.recurrent_constraint),\n 'bias_constraint':\n constraints.serialize(self.bias_constraint),\n 'dropout':\n self.dropout,\n 'recurrent_dropout':\n self.recurrent_dropout,\n 'implementation':\n self.implementation\n }\n config.update(_config_for_enable_caching_device(self.cell))\n base_config = super(LSTM, self).get_config()\n del base_config['cell']\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n if 'implementation' in config and config['implementation'] == 0:\n config['implementation'] = 1\n return cls(**config)\n\n\ndef _generate_dropout_mask(ones, rate, training=None, count=1):\n def dropped_inputs():\n return backend.dropout(ones, rate)\n\n if count > 1:\n return [\n backend.in_train_phase(dropped_inputs, ones, training=training)\n for _ in range(count)\n ]\n return backend.in_train_phase(dropped_inputs, ones, training=training)\n\n\ndef _standardize_args(inputs, initial_state, constants, num_constants):\n \"\"\"Standardizes `__call__` to a single list of tensor inputs.\n\n When running a model loaded from a file, the input tensors\n `initial_state` and `constants` can be passed to `RNN.__call__()` as part\n of `inputs` instead of by the dedicated keyword arguments. This method\n makes sure the arguments are separated and that `initial_state` and\n `constants` are lists of tensors (or None).\n\n Args:\n inputs: Tensor or list/tuple of tensors. which may include constants\n and initial states. In that case `num_constant` must be specified.\n initial_state: Tensor or list of tensors or None, initial states.\n constants: Tensor or list of tensors or None, constant tensors.\n num_constants: Expected number of constants (if constants are passed as\n part of the `inputs` list.\n\n Returns:\n inputs: Single tensor or tuple of tensors.\n initial_state: List of tensors or None.\n constants: List of tensors or None.\n \"\"\"\n if isinstance(inputs, list):\n # There are several situations here:\n # In the graph mode, __call__ will be only called once. The initial_state\n # and constants could be in inputs (from file loading).\n # In the eager mode, __call__ will be called twice, once during\n # rnn_layer(inputs=input_t, constants=c_t, ...), and second time will be\n # model.fit/train_on_batch/predict with real np data. In the second case,\n # the inputs will contain initial_state and constants as eager tensor.\n #\n # For either case, the real input is the first item in the list, which\n # could be a nested structure itself. Then followed by initial_states, which\n # could be a list of items, or list of list if the initial_state is complex\n # structure, and finally followed by constants which is a flat list.\n assert initial_state is None and constants is None\n if num_constants:\n constants = inputs[-num_constants:]\n inputs = inputs[:-num_constants]\n if len(inputs) > 1:\n initial_state = inputs[1:]\n inputs = inputs[:1]\n\n if len(inputs) > 1:\n inputs = tuple(inputs)\n else:\n inputs = inputs[0]\n\n def to_list_or_none(x):\n if x is None or isinstance(x, list):\n return x\n if isinstance(x, tuple):\n return list(x)\n return [x]\n\n initial_state = to_list_or_none(initial_state)\n constants = to_list_or_none(constants)\n\n return inputs, initial_state, constants\n\n\ndef _is_multiple_state(state_size):\n \"\"\"Check whether the state_size contains multiple states.\"\"\"\n return (hasattr(state_size, '__len__') and\n not isinstance(state_size, tf.TensorShape))\n\n\ndef _generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype):\n if inputs is not None:\n batch_size = tf.shape(inputs)[0]\n dtype = inputs.dtype\n return _generate_zero_filled_state(batch_size, cell.state_size, dtype)\n\n\ndef _generate_zero_filled_state(batch_size_tensor, state_size, dtype):\n \"\"\"Generate a zero filled tensor with shape [batch_size, state_size].\"\"\"\n if batch_size_tensor is None or dtype is None:\n raise ValueError(\n 'batch_size and dtype cannot be None while constructing initial state. '\n f'Received: batch_size={batch_size_tensor}, dtype={dtype}')\n\n def create_zeros(unnested_state_size):\n flat_dims = tf.TensorShape(unnested_state_size).as_list()\n init_state_size = [batch_size_tensor] + flat_dims\n return tf.zeros(init_state_size, dtype=dtype)\n\n if tf.nest.is_nested(state_size):\n return tf.nest.map_structure(create_zeros, state_size)\n else:\n return create_zeros(state_size)\n\n\ndef _caching_device(rnn_cell):\n \"\"\"Returns the caching device for the RNN variable.\n\n This is useful for distributed training, when variable is not located as same\n device as the training worker. By enabling the device cache, this allows\n worker to read the variable once and cache locally, rather than read it every\n time step from remote when it is needed.\n\n Note that this is assuming the variable that cell needs for each time step is\n having the same value in the forward path, and only gets updated in the\n backprop. It is true for all the default cells (SimpleRNN, GRU, LSTM). If the\n cell body relies on any variable that gets updated every time step, then\n caching device will cause it to read the stall value.\n\n Args:\n rnn_cell: the rnn cell instance.\n \"\"\"\n if tf.executing_eagerly():\n # caching_device is not supported in eager mode.\n return None\n if not getattr(rnn_cell, '_enable_caching_device', False):\n return None\n # Don't set a caching device when running in a loop, since it is possible that\n # train steps could be wrapped in a tf.while_loop. In that scenario caching\n # prevents forward computations in loop iterations from re-reading the\n # updated weights.\n if control_flow_util.IsInWhileLoop(tf.compat.v1.get_default_graph()):\n logging.warning(\n 'Variable read device caching has been disabled because the '\n 'RNN is in tf.while_loop loop context, which will cause '\n 'reading stalled value in forward path. This could slow down '\n 'the training due to duplicated variable reads. Please '\n 'consider updating your code to remove tf.while_loop if possible.')\n return None\n if (rnn_cell._dtype_policy.compute_dtype !=\n rnn_cell._dtype_policy.variable_dtype):\n logging.warning(\n 'Variable read device caching has been disabled since it '\n 'doesn\\'t work with the mixed precision API. This is '\n 'likely to cause a slowdown for RNN training due to '\n 'duplicated read of variable for each timestep, which '\n 'will be significant in a multi remote worker setting. '\n 'Please consider disabling mixed precision API if '\n 'the performance has been affected.')\n return None\n # Cache the value on the device that access the variable.\n return lambda op: op.device\n\n\ndef _config_for_enable_caching_device(rnn_cell):\n \"\"\"Return the dict config for RNN cell wrt to enable_caching_device field.\n\n Since enable_caching_device is a internal implementation detail for speed up\n the RNN variable read when running on the multi remote worker setting, we\n don't want this config to be serialized constantly in the JSON. We will only\n serialize this field when a none default value is used to create the cell.\n Args:\n rnn_cell: the RNN cell for serialize.\n\n Returns:\n A dict which contains the JSON config for enable_caching_device value or\n empty dict if the enable_caching_device value is same as the default value.\n \"\"\"\n default_enable_caching_device = tf.compat.v1.executing_eagerly_outside_functions()\n if rnn_cell._enable_caching_device != default_enable_caching_device:\n return {'enable_caching_device': rnn_cell._enable_caching_device}\n return {}\n"
] |
[
[
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.compat.v1.executing_eagerly_outside_functions",
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.nest.is_nested",
"tensorflow.python.platform.tf_logging.debug",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.TensorShape",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.compat.v2.zeros",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.compat.v2.unstack",
"tensorflow.compat.v2.compat.v1.cond",
"tensorflow.compat.v2.nest.pack_sequence_as",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.compat.v1.get_default_graph",
"tensorflow.compat.v2.ones_like",
"tensorflow.compat.v2.math.count_nonzero",
"tensorflow.compat.v2.split",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.distribute.has_strategy"
]
] |
ajgpitch/GPyOpt
|
[
"3f99f432198b54cf71bdbb350f3d623337c17696"
] |
[
"GPyOpt/core/bo.py"
] |
[
"# Copyright (c) 2016, the GPyOpt Authors\n# Licensed under the BSD 3-clause license (see LICENSE.txt)\n\nimport GPyOpt\nimport collections\nimport numpy as np\nimport time\nimport csv\n\nfrom ..util.general import best_value, normalize\nfrom ..util.duplicate_manager import DuplicateManager\nfrom ..core.errors import InvalidConfigError\nfrom ..core.task.cost import CostModel\nfrom ..optimization.acquisition_optimizer import ContextManager\ntry:\n from ..plotting.plots_bo import plot_acquisition, plot_convergence\nexcept:\n pass\n\n\nclass BO(object):\n \"\"\"\n Runner of Bayesian optimization loop. This class wraps the optimization loop around the different handlers.\n :param model: GPyOpt model class.\n :param space: GPyOpt space class.\n :param objective: GPyOpt objective class.\n :param acquisition: GPyOpt acquisition class.\n :param evaluator: GPyOpt evaluator class.\n :param X_init: 2d numpy array containing the initial inputs (one per row) of the model.\n :param Y_init: 2d numpy array containing the initial outputs (one per row) of the model.\n :param cost: GPyOpt cost class (default, none).\n :param normalize_Y: whether to normalize the outputs before performing any optimization (default, True).\n :param model_update_interval: interval of collected observations after which the model is updated (default, 1).\n :param de_duplication: GPyOpt DuplicateManager class. Avoids re-evaluating the objective at previous, pending or infeasible locations (default, False).\n \"\"\"\n\n\n def __init__(self, model, space, objective, acquisition, evaluator, X_init, Y_init=None, cost = None, normalize_Y = True, model_update_interval = 1, de_duplication = False):\n self.model = model\n self.space = space\n self.objective = objective\n self.acquisition = acquisition\n self.evaluator = evaluator\n self.normalize_Y = normalize_Y\n self.model_update_interval = model_update_interval\n self.X = X_init\n self.Y = Y_init\n self.cost = CostModel(cost)\n self.normalization_type = 'stats' ## not added in the API\n self.de_duplication = de_duplication\n self.model_parameters_iterations = None\n self.context = None\n self.num_acquisitions = 0\n\n def suggest_next_locations(self, context = None, pending_X = None, ignored_X = None):\n \"\"\"\n Run a single optimization step and return the next locations to evaluate the objective.\n Number of suggested locations equals to batch_size.\n\n :param context: fixes specified variables to a particular context (values) for the optimization run (default, None).\n :param pending_X: matrix of input configurations that are in a pending state (i.e., do not have an evaluation yet) (default, None).\n :param ignored_X: matrix of input configurations that the user black-lists, i.e., those configurations will not be suggested again (default, None).\n \"\"\"\n self.model_parameters_iterations = None\n self.num_acquisitions = 0\n self.context = context\n self._update_model(self.normalization_type)\n\n suggested_locations = self._compute_next_evaluations(pending_zipped_X = pending_X, ignored_zipped_X = ignored_X)\n\n return suggested_locations\n\n def run_optimization(self, max_iter = 0, max_time = np.inf, eps = 1e-8, context = None, verbosity=False, save_models_parameters= True, report_file = None, evaluations_file = None, models_file=None):\n \"\"\"\n Runs Bayesian Optimization for a number 'max_iter' of iterations (after the initial exploration data)\n\n :param max_iter: exploration horizon, or number of acquisitions. If nothing is provided optimizes the current acquisition.\n :param max_time: maximum exploration horizon in seconds.\n :param eps: minimum distance between two consecutive x's to keep running the model.\n :param context: fixes specified variables to a particular context (values) for the optimization run (default, None).\n :param verbosity: flag to print the optimization results after each iteration (default, False).\n :param report_file: file to which the results of the optimization are saved (default, None).\n :param evaluations_file: file to which the evalations are saved (default, None).\n :param models_file: file to which the model parameters are saved (default, None).\n \"\"\"\n\n if self.objective is None:\n raise InvalidConfigError(\"Cannot run the optimization loop without the objective function\")\n\n # --- Save the options to print and save the results\n self.verbosity = verbosity\n self.save_models_parameters = save_models_parameters\n self.report_file = report_file\n self.evaluations_file = evaluations_file\n self.models_file = models_file\n self.model_parameters_iterations = None\n self.context = context\n\n # --- Check if we can save the model parameters in each iteration\n if self.save_models_parameters == True:\n if not (isinstance(self.model, GPyOpt.models.GPModel) or isinstance(self.model, GPyOpt.models.GPModel_MCMC)):\n print('Models printout after each iteration is only available for GP and GP_MCMC models')\n self.save_models_parameters = False\n\n # --- Setting up stop conditions\n self.eps = eps\n if (max_iter is None) and (max_time is None):\n self.max_iter = 0\n self.max_time = np.inf\n elif (max_iter is None) and (max_time is not None):\n self.max_iter = np.inf\n self.max_time = max_time\n elif (max_iter is not None) and (max_time is None):\n self.max_iter = max_iter\n self.max_time = np.inf\n else:\n self.max_iter = max_iter\n self.max_time = max_time\n\n # --- Initial function evaluation and model fitting\n if self.X is not None and self.Y is None:\n self.Y, cost_values = self.objective.evaluate(self.X)\n if self.cost.cost_type == 'evaluation_time':\n self.cost.update_cost_model(self.X, cost_values)\n\n # --- Initialize iterations and running time\n self.time_zero = time.time()\n self.cum_time = 0\n self.num_acquisitions = 0\n self.suggested_sample = self.X\n self.Y_new = self.Y\n\n # --- Initialize time cost of the evaluations\n while (self.max_time > self.cum_time):\n # --- Update model\n try:\n self._update_model(self.normalization_type)\n except np.linalg.linalg.LinAlgError:\n break\n\n if (self.num_acquisitions >= self.max_iter\n or (len(self.X) > 1 and self._distance_last_evaluations() <= self.eps)):\n break\n\n self.suggested_sample = self._compute_next_evaluations()\n\n # --- Augment X\n self.X = np.vstack((self.X,self.suggested_sample))\n\n # --- Evaluate *f* in X, augment Y and update cost function (if needed)\n self.evaluate_objective()\n\n # --- Update current evaluation time and function evaluations\n self.cum_time = time.time() - self.time_zero\n self.num_acquisitions += 1\n\n if verbosity:\n print(\"num acquisition: {}, time elapsed: {:.2f}s\".format(\n self.num_acquisitions, self.cum_time))\n\n # --- Stop messages and execution time\n self._compute_results()\n\n # --- Print the desired result in files\n if self.report_file is not None:\n self.save_report(self.report_file)\n if self.evaluations_file is not None:\n self.save_evaluations(self.evaluations_file)\n if self.models_file is not None:\n self.save_models(self.models_file)\n\n def _print_convergence(self):\n \"\"\"\n Prints the reason why the optimization stopped.\n \"\"\"\n\n if self.verbosity:\n if (self.num_acquisitions == self.max_iter) and (not self.initial_iter):\n print(' ** Maximum number of iterations reached **')\n return 1\n elif (self._distance_last_evaluations() < self.eps) and (not self.initial_iter):\n print(' ** Two equal location selected **')\n return 1\n elif (self.max_time < self.cum_time) and not (self.initial_iter):\n print(' ** Evaluation time reached **')\n return 0\n\n if self.initial_iter:\n print('** GPyOpt Bayesian Optimization class initialized successfully **')\n self.initial_iter = False\n\n\n def evaluate_objective(self):\n \"\"\"\n Evaluates the objective\n \"\"\"\n self.Y_new, cost_new = self.objective.evaluate(self.suggested_sample)\n self.cost.update_cost_model(self.suggested_sample, cost_new)\n self.Y = np.vstack((self.Y,self.Y_new))\n\n def _compute_results(self):\n \"\"\"\n Computes the optimum and its value.\n \"\"\"\n self.Y_best = best_value(self.Y)\n self.x_opt = self.X[np.argmin(self.Y),:]\n self.fx_opt = np.min(self.Y)\n\n def _distance_last_evaluations(self):\n \"\"\"\n Computes the distance between the last two evaluations.\n \"\"\"\n if self.X.shape[0] < 2:\n # less than 2 evaluations\n return np.inf\n return np.sqrt(np.sum((self.X[-1, :] - self.X[-2, :]) ** 2))\n\n def _compute_next_evaluations(self, pending_zipped_X=None, ignored_zipped_X=None):\n \"\"\"\n Computes the location of the new evaluation (optimizes the acquisition in the standard case).\n :param pending_zipped_X: matrix of input configurations that are in a pending state (i.e., do not have an evaluation yet).\n :param ignored_zipped_X: matrix of input configurations that the user black-lists, i.e., those configurations will not be suggested again.\n :return:\n \"\"\"\n\n ## --- Update the context if any\n self.acquisition.optimizer.context_manager = ContextManager(self.space, self.context)\n\n ### --- Activate de_duplication\n if self.de_duplication:\n duplicate_manager = DuplicateManager(space=self.space, zipped_X=self.X, pending_zipped_X=pending_zipped_X, ignored_zipped_X=ignored_zipped_X)\n else:\n duplicate_manager = None\n\n ### We zip the value in case there are categorical variables\n return self.space.zip_inputs(self.evaluator.compute_batch(duplicate_manager=duplicate_manager, context_manager= self.acquisition.optimizer.context_manager))\n\n def _update_model(self, normalization_type='stats'):\n \"\"\"\n Updates the model (when more than one observation is available) and saves the parameters (if available).\n \"\"\"\n if self.num_acquisitions % self.model_update_interval == 0:\n\n # input that goes into the model (is unziped in case there are categorical variables)\n X_inmodel = self.space.unzip_inputs(self.X)\n\n # Y_inmodel is the output that goes into the model\n if self.normalize_Y:\n Y_inmodel = normalize(self.Y, normalization_type)\n else:\n Y_inmodel = self.Y\n\n self.model.updateModel(X_inmodel, Y_inmodel, None, None)\n\n # Save parameters of the model\n self._save_model_parameter_values()\n\n def _save_model_parameter_values(self):\n if self.model_parameters_iterations is None:\n self.model_parameters_iterations = self.model.get_model_parameters()\n else:\n self.model_parameters_iterations = np.vstack((self.model_parameters_iterations,self.model.get_model_parameters()))\n\n def plot_acquisition(self,filename=None):\n \"\"\"\n Plots the model and the acquisition function.\n if self.input_dim = 1: Plots data, mean and variance in one plot and the acquisition function in another plot\n if self.input_dim = 2: as before but it separates the mean and variance of the model in two different plots\n :param filename: name of the file where the plot is saved\n \"\"\"\n if self.model.model is None:\n from copy import deepcopy\n model_to_plot = deepcopy(self.model)\n if self.normalize_Y:\n Y = normalize(self.Y, self.normalization_type)\n else:\n Y = self.Y\n model_to_plot.updateModel(self.X, Y, self.X, Y)\n else:\n model_to_plot = self.model\n\n return plot_acquisition(self.acquisition.space.get_bounds(),\n model_to_plot.model.X.shape[1],\n model_to_plot.model,\n model_to_plot.model.X,\n model_to_plot.model.Y,\n self.acquisition.acquisition_function,\n self.suggest_next_locations(),\n filename)\n\n def plot_convergence(self,filename=None):\n \"\"\"\n Makes twp plots to evaluate the convergence of the model:\n plot 1: Iterations vs. distance between consecutive selected x's\n plot 2: Iterations vs. the mean of the current model in the selected sample.\n :param filename: name of the file where the plot is saved\n \"\"\"\n return plot_convergence(self.X,self.Y_best,filename)\n\n def get_evaluations(self):\n return self.X.copy(), self.Y.copy()\n\n def save_report(self, report_file= None):\n \"\"\"\n Saves a report with the main results of the optimization.\n\n :param report_file: name of the file in which the results of the optimization are saved.\n \"\"\"\n\n with open(report_file,'w') as file:\n import GPyOpt\n import time\n\n file.write('-----------------------------' + ' GPyOpt Report file ' + '-----------------------------------\\n')\n file.write('GPyOpt Version ' + str(GPyOpt.__version__) + '\\n')\n file.write('Date and time: ' + time.strftime(\"%c\")+'\\n')\n if self.num_acquisitions==self.max_iter:\n file.write('Optimization completed: ' +'YES, ' + str(self.X.shape[0]).strip('[]') + ' samples collected.\\n')\n file.write('Number initial samples: ' + str(self.initial_design_numdata) +' \\n')\n else:\n file.write('Optimization completed: ' +'NO,' + str(self.X.shape[0]).strip('[]') + ' samples collected.\\n')\n file.write('Number initial samples: ' + str(self.initial_design_numdata) +' \\n')\n\n file.write('Tolerance: ' + str(self.eps) + '.\\n')\n file.write('Optimization time: ' + str(self.cum_time).strip('[]') +' seconds.\\n')\n\n file.write('\\n')\n file.write('--------------------------------' + ' Problem set up ' + '------------------------------------\\n')\n file.write('Problem name: ' + self.objective_name +'\\n')\n file.write('Problem dimension: ' + str(self.space.dimensionality) +'\\n')\n file.write('Number continuous variables ' + str(len(self.space.get_continuous_dims()) ) +'\\n')\n file.write('Number discrete variables ' + str(len(self.space.get_discrete_dims())) +'\\n')\n file.write('Number bandits ' + str(self.space.get_bandit().shape[0]) +'\\n')\n file.write('Noiseless evaluations: ' + str(self.exact_feval) +'\\n')\n file.write('Cost used: ' + self.cost.cost_type +'\\n')\n file.write('Constraints: ' + str(self.constraints==True) +'\\n')\n\n file.write('\\n')\n file.write('------------------------------' + ' Optimization set up ' + '---------------------------------\\n')\n file.write('Normalized outputs: ' + str(self.normalize_Y) + '\\n')\n file.write('Model type: ' + str(self.model_type).strip('[]') + '\\n')\n file.write('Model update interval: ' + str(self.model_update_interval) + '\\n')\n file.write('Acquisition type: ' + str(self.acquisition_type).strip('[]') + '\\n')\n file.write('Acquisition optimizer: ' + str(self.acquisition_optimizer.optimizer_name).strip('[]') + '\\n')\n\n file.write('Acquisition type: ' + str(self.acquisition_type).strip('[]') + '\\n')\n if hasattr(self, 'acquisition_optimizer') and hasattr(self.acquisition_optimizer, 'optimizer_name'):\n file.write('Acquisition optimizer: ' + str(self.acquisition_optimizer.optimizer_name).strip('[]') + '\\n')\n else:\n file.write('Acquisition optimizer: None\\n')\n file.write('Evaluator type (batch size): ' + str(self.evaluator_type).strip('[]') + ' (' + str(self.batch_size) + ')' + '\\n')\n file.write('Cores used: ' + str(self.num_cores) + '\\n')\n\n file.write('\\n')\n file.write('---------------------------------' + ' Summary ' + '------------------------------------------\\n')\n file.write('Value at minimum: ' + str(min(self.Y)).strip('[]') +'\\n')\n file.write('Best found minimum location: ' + str(self.X[np.argmin(self.Y),:]).strip('[]') +'\\n')\n\n file.write('----------------------------------------------------------------------------------------------\\n')\n file.close()\n\n def _write_csv(self, filename, data):\n with open(filename, 'w') as csv_file:\n writer = csv.writer(csv_file, delimiter='\\t')\n writer.writerows(data)\n\n def save_evaluations(self, evaluations_file = None):\n \"\"\"\n Saves evaluations at each iteration of the optimization\n\n :param evaluations_file: name of the file in which the results are saved.\n \"\"\"\n iterations = np.array(range(1, self.Y.shape[0] + 1))[:, None]\n results = np.hstack((iterations, self.Y, self.X))\n header = ['Iteration', 'Y'] + ['var_' + str(k) for k in range(1, self.X.shape[1] + 1)]\n\n data = [header] + results.tolist()\n self._write_csv(evaluations_file, data)\n\n def save_models(self, models_file):\n \"\"\"\n Saves model parameters at each iteration of the optimization\n\n :param models_file: name of the file or a file buffer, in which the results are saved.\n \"\"\"\n if self.model_parameters_iterations is None:\n raise ValueError(\"No iterations have been carried out yet and hence no iterations of the BO can be saved\")\n\n iterations = np.array(range(1,self.model_parameters_iterations.shape[0]+1))[:,None]\n results = np.hstack((iterations,self.model_parameters_iterations))\n header = ['Iteration'] + self.model.get_model_parameters_names()\n\n data = [header] + results.tolist()\n self._write_csv(models_file, data)\n"
] |
[
[
"numpy.hstack",
"numpy.min",
"numpy.argmin",
"numpy.sum",
"numpy.vstack"
]
] |
kwilcox/codar_processing
|
[
"3a327f5378a6a9d78d263c8e7b317088823245c1"
] |
[
"codar_processing/src/waves.py"
] |
[
"import datetime as dt\nimport pandas as pd\nimport re\nimport xarray as xr\nfrom codar_processing.src.common import CTFParser\n\n\ndef concatenate_waves(wave_list):\n \"\"\"\n This function takes a list of radial files. Loads them all separately using the Wave object and then combines\n them along the time dimension using xarrays built-in concatenation routines.\n :param wave_list: list of radial files that you want to concatenate\n :return: wave files concatenated into an xarray dataset by range, bearing, and time\n \"\"\"\n\n wave_dict = {}\n for each in sorted(wave_list):\n wave = Waves(each, multi_dimensional=True)\n wave_dict[wave.file_name] = wave.ds\n\n ds = xr.concat(wave_dict.values(), 'time')\n return ds\n\n\nclass Waves(CTFParser):\n \"\"\"\n Waves Subclass.\n\n This class should be used when loading a CODAR wave (.wls) file. This class utilizes the generic LLUV class from\n ~/codar_processing/common.py in order to load CODAR wave files\n \"\"\"\n\n def __init__(self, fname, replace_invalid=True, multi_dimensional=True):\n rename = dict(datetime='time',\n MWHT='wave_height',\n MWPD='wave_period',\n WAVB='wave_bearing',\n WNDB='wind_bearing',\n PMWH='maximum_observable_wave_height',\n ACNT='cross_spectra_averaged_count',\n DIST='distance_from_origin',\n RCLL='range_cell_result',\n WDPT='doppler_points_used',\n MTHD='wave_method',\n FLAG='vector_flag',\n WHNM='num_valid_source_wave_vectors',\n WHSD='standard_deviation_of_wave_heights')\n\n CTFParser.__init__(self, fname)\n if self._tables['1']['data']['DIST'].isnull().all():\n df = self._tables['1']['data']\n self.data = df\n index = 'datetime' # define index so pd.to_xarray function will automatically assign dimension and coordinates\n else:\n data_tables = []\n for key in self._tables.keys():\n df = self._tables[key]['data']\n data_tables.append(df)\n self.data = pd.concat(data_tables, axis=0)\n index = ['datetime', 'DIST'] # define two indices for multidimensional indexing.\n\n # Use separate date and time columns to create datetime column and drop those columns.\n self.data['datetime'] = self.data[['TYRS', 'TMON', 'TDAY', 'THRS', 'TMIN', 'TSEC']].apply(lambda s: dt.datetime(*s), axis=1)\n\n if replace_invalid:\n self.replace_invalid_values()\n\n if multi_dimensional:\n # Set index of dataframe and also drop columns that we don't need to see anymore\n self.data = self.data.set_index(index).drop(['TIME', 'TYRS', 'TMON', 'TDAY', 'THRS', 'TMIN', 'TSEC'], axis=1)\n\n # Convert from pandas dataframe into an xarray dataset\n self.ds = self.data.to_xarray()\n\n # rename variables to something meaningful\n self.ds = self.ds.rename(rename)\n\n # Clean up wave header and assign header data to global attributes\n self.clean_wave_header()\n self.data = self.ds.assign_attrs(self.metadata)\n\n def clean_wave_header(self):\n \"\"\"\n Cleans the header data from the wave data for proper input into MySQL database\n \"\"\"\n\n keep = ['TimeCoverage', 'WaveMinDopplerPoints', 'AntennaBearing', 'DopplerCells', 'TransmitCenterFreqMHz',\n 'CTF', 'TableColumnTypes', 'TimeZone', 'WaveBraggPeakDropOff', 'RangeResolutionKMeters',\n 'CoastlineSector', 'WaveMergeMethod', 'RangeCells', 'WaveBraggPeakNull', 'WaveUseInnerBragg',\n 'BraggSmoothingPoints', 'Manufacturer', 'TimeStamp', 'FileType', 'TableRows', 'BraggHasSecondOrder',\n 'Origin', 'MaximumWavePeriod', 'UUID', 'WaveBraggNoiseThreshold', 'TransmitBandwidthKHz', 'Site',\n 'TransmitSweepRateHz', 'WaveBearingLimits', 'WavesFollowTheWind', 'CurrentVelocityLimit']\n\n key_list = list(self.metadata.keys())\n for key in key_list:\n if not key in keep:\n del self.metadata[key]\n\n for k, v in self.metadata.items():\n if 'Site' in k:\n self.metadata[k] = ''.join(e for e in v if e.isalnum())\n elif 'TimeStamp' in k:\n t_list = v.split()\n t_list = [int(s) for s in t_list]\n self.metadata[k] = dt.datetime(t_list[0], t_list[1], t_list[2], t_list[3], t_list[4], t_list[5]).strftime(\n '%Y-%m-%d %H:%M:%S')\n elif k in ('TimeCoverage', 'RangeResolutionKMeters'):\n self.metadata[k] = re.findall(\"\\d+\\.\\d+\", v)[0]\n elif k in ('WaveMergeMethod', 'WaveUseInnerBragg', 'WavesFollowTheWind'):\n self.metadata[k] = re.search(r'\\d+', v).group()\n elif 'TimeZone' in k:\n self.metadata[k] = re.search('\"(.*)\"', v).group(1)\n elif k in ('WaveBearingLimits', 'CoastlineSector'):\n bearings = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", v)\n self.metadata[k] = ', '.join(e for e in bearings)\n else:\n continue\n\n def file_type(self):\n \"\"\"Return a string representing the type of file this is.\"\"\"\n return 'wave'\n\n def flag_wave_heights(self, wave_min=0.2, wave_max=5):\n \"\"\"\n Flag bad wave heights in Wave instance. This method labels wave heights between wave_min and wave_max good,\n while labeling anything else bad\n :param wave_min: Minimum Wave Height - Waves above this will be considered good\n :param wave_max: Maximum Wave Height - Waves less than this will be considered good\n :return:\n \"\"\"\n self.data['mwht_flag'] = 1\n boolean = self.data['MWHT'].between(wave_min, wave_max, inclusive=True)\n self.data['mwht_flag'] = self.data['mwht_flag'].where(boolean, other=4)\n\n def is_valid(self):\n if self.data.empty:\n return False\n else:\n return True\n"
] |
[
[
"pandas.concat"
]
] |
pwharned/lime
|
[
"3d320a0b2d84c230a7c0b7497bfc45a688e227d4"
] |
[
"lime/lime_tabular.py"
] |
[
"\"\"\"\nFunctions for explaining classifiers that use tabular data (matrices).\n\"\"\"\nimport collections\nimport copy\nfrom functools import partial\nimport json\nimport warnings\n\nimport numpy as np\nimport scipy as sp\nimport sklearn\nimport sklearn.preprocessing\nfrom sklearn.utils import check_random_state\nfrom pyDOE2 import lhs\nfrom scipy.stats.distributions import norm\n\nfrom lime.discretize import QuartileDiscretizer\nfrom lime.discretize import DecileDiscretizer\nfrom lime.discretize import EntropyDiscretizer\nfrom lime.discretize import BaseDiscretizer\nfrom lime.discretize import StatsDiscretizer\nfrom . import explanation\nfrom . import lime_base\n\n\nclass TableDomainMapper(explanation.DomainMapper):\n \"\"\"Maps feature ids to names, generates table views, etc\"\"\"\n\n def __init__(self, feature_names, feature_values, scaled_row,\n categorical_features, discretized_feature_names=None,\n feature_indexes=None):\n \"\"\"Init.\n\n Args:\n feature_names: list of feature names, in order\n feature_values: list of strings with the values of the original row\n scaled_row: scaled row\n categorical_features: list of categorical features ids (ints)\n feature_indexes: optional feature indexes used in the sparse case\n \"\"\"\n self.exp_feature_names = feature_names\n self.discretized_feature_names = discretized_feature_names\n self.feature_names = feature_names\n self.feature_values = feature_values\n self.feature_indexes = feature_indexes\n self.scaled_row = scaled_row\n if sp.sparse.issparse(scaled_row):\n self.all_categorical = False\n else:\n self.all_categorical = len(categorical_features) == len(scaled_row)\n self.categorical_features = categorical_features\n\n def map_exp_ids(self, exp):\n \"\"\"Maps ids to feature names.\n\n Args:\n exp: list of tuples [(id, weight), (id,weight)]\n\n Returns:\n list of tuples (feature_name, weight)\n \"\"\"\n names = self.exp_feature_names\n if self.discretized_feature_names is not None:\n names = self.discretized_feature_names\n return [(names[x[0]], x[1]) for x in exp]\n\n def visualize_instance_html(self,\n exp,\n label,\n div_name,\n exp_object_name,\n show_table=True,\n show_all=False):\n \"\"\"Shows the current example in a table format.\n\n Args:\n exp: list of tuples [(id, weight), (id,weight)]\n label: label id (integer)\n div_name: name of div object to be used for rendering(in js)\n exp_object_name: name of js explanation object\n show_table: if False, don't show table visualization.\n show_all: if True, show zero-weighted features in the table.\n \"\"\"\n if not show_table:\n return ''\n weights = [0] * len(self.feature_names)\n for x in exp:\n weights[x[0]] = x[1]\n if self.feature_indexes is not None:\n # Sparse case: only display the non-zero values and importances\n fnames = [self.exp_feature_names[i] for i in self.feature_indexes]\n fweights = [weights[i] for i in self.feature_indexes]\n if show_all:\n out_list = list(zip(fnames,\n self.feature_values,\n fweights))\n else:\n out_dict = dict(map(lambda x: (x[0], (x[1], x[2], x[3])),\n zip(self.feature_indexes,\n fnames,\n self.feature_values,\n fweights)))\n out_list = [out_dict.get(x[0], (str(x[0]), 0.0, 0.0)) for x in exp]\n else:\n out_list = list(zip(self.exp_feature_names,\n self.feature_values,\n weights))\n if not show_all:\n out_list = [out_list[x[0]] for x in exp]\n ret = u'''\n %s.show_raw_tabular(%s, %d, %s);\n ''' % (exp_object_name, json.dumps(out_list, ensure_ascii=False), label, div_name)\n return ret\n\n\nclass LimeTabularExplainer(object):\n \"\"\"Explains predictions on tabular (i.e. matrix) data.\n For numerical features, perturb them by sampling from a Normal(0,1) and\n doing the inverse operation of mean-centering and scaling, according to the\n means and stds in the training data. For categorical features, perturb by\n sampling according to the training distribution, and making a binary\n feature that is 1 when the value is the same as the instance being\n explained.\"\"\"\n\n def __init__(self,\n training_data,\n mode=\"classification\",\n training_labels=None,\n feature_names=None,\n categorical_features=None,\n categorical_names=None,\n kernel_width=None,\n kernel=None,\n verbose=False,\n class_names=None,\n feature_selection='auto',\n discretize_continuous=True,\n discretizer='quartile',\n sample_around_instance=False,\n random_state=None,\n training_data_stats=None):\n \"\"\"Init function.\n\n Args:\n training_data: numpy 2d array\n mode: \"classification\" or \"regression\"\n training_labels: labels for training data. Not required, but may be\n used by discretizer.\n feature_names: list of names (strings) corresponding to the columns\n in the training data.\n categorical_features: list of indices (ints) corresponding to the\n categorical columns. Everything else will be considered\n continuous. Values in these columns MUST be integers.\n categorical_names: map from int to list of names, where\n categorical_names[x][y] represents the name of the yth value of\n column x.\n kernel_width: kernel width for the exponential kernel.\n If None, defaults to sqrt (number of columns) * 0.75\n kernel: similarity kernel that takes euclidean distances and kernel\n width as input and outputs weights in (0,1). If None, defaults to\n an exponential kernel.\n verbose: if true, print local prediction values from linear model\n class_names: list of class names, ordered according to whatever the\n classifier is using. If not present, class names will be '0',\n '1', ...\n feature_selection: feature selection method. can be\n 'forward_selection', 'lasso_path', 'none' or 'auto'.\n See function 'explain_instance_with_data' in lime_base.py for\n details on what each of the options does.\n discretize_continuous: if True, all non-categorical features will\n be discretized into quartiles.\n discretizer: only matters if discretize_continuous is True\n and data is not sparse. Options are 'quartile', 'decile',\n 'entropy' or a BaseDiscretizer instance.\n sample_around_instance: if True, will sample continuous features\n in perturbed samples from a normal centered at the instance\n being explained. Otherwise, the normal is centered on the mean\n of the feature data.\n random_state: an integer or numpy.RandomState that will be used to\n generate random numbers. If None, the random state will be\n initialized using the internal numpy seed.\n training_data_stats: a dict object having the details of training data\n statistics. If None, training data information will be used, only matters\n if discretize_continuous is True. Must have the following keys:\n means\", \"mins\", \"maxs\", \"stds\", \"feature_values\",\n \"feature_frequencies\"\n \"\"\"\n self.random_state = check_random_state(random_state)\n self.mode = mode\n self.categorical_names = categorical_names or {}\n self.sample_around_instance = sample_around_instance\n self.training_data_stats = training_data_stats\n\n # Check and raise proper error in stats are supplied in non-descritized path\n if self.training_data_stats:\n self.validate_training_data_stats(self.training_data_stats)\n\n if categorical_features is None:\n categorical_features = []\n if feature_names is None:\n feature_names = [str(i) for i in range(training_data.shape[1])]\n\n self.categorical_features = list(categorical_features)\n self.feature_names = list(feature_names)\n\n self.discretizer = None\n if discretize_continuous and not sp.sparse.issparse(training_data):\n # Set the discretizer if training data stats are provided\n if self.training_data_stats:\n discretizer = StatsDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels,\n data_stats=self.training_data_stats,\n random_state=self.random_state)\n\n if discretizer == 'quartile':\n self.discretizer = QuartileDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels,\n random_state=self.random_state)\n elif discretizer == 'decile':\n self.discretizer = DecileDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels,\n random_state=self.random_state)\n elif discretizer == 'entropy':\n self.discretizer = EntropyDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels,\n random_state=self.random_state)\n elif isinstance(discretizer, BaseDiscretizer):\n self.discretizer = discretizer\n else:\n raise ValueError('''Discretizer must be 'quartile',''' +\n ''' 'decile', 'entropy' or a''' +\n ''' BaseDiscretizer instance''')\n self.categorical_features = list(range(training_data.shape[1]))\n\n # Get the discretized_training_data when the stats are not provided\n if(self.training_data_stats is None):\n discretized_training_data = self.discretizer.discretize(\n training_data)\n\n if kernel_width is None:\n kernel_width = np.sqrt(training_data.shape[1]) * .75\n kernel_width = float(kernel_width)\n\n if kernel is None:\n def kernel(d, kernel_width):\n return np.sqrt(np.exp(-(d ** 2) / kernel_width ** 2))\n\n kernel_fn = partial(kernel, kernel_width=kernel_width)\n\n self.feature_selection = feature_selection\n self.base = lime_base.LimeBase(kernel_fn, verbose, random_state=self.random_state)\n self.class_names = class_names\n\n # Though set has no role to play if training data stats are provided\n self.scaler = sklearn.preprocessing.StandardScaler(with_mean=False)\n self.scaler.fit(training_data)\n self.feature_values = {}\n self.feature_frequencies = {}\n\n for feature in self.categorical_features:\n if training_data_stats is None:\n if self.discretizer is not None:\n column = discretized_training_data[:, feature]\n else:\n column = training_data[:, feature]\n\n feature_count = collections.Counter(column)\n values, frequencies = map(list, zip(*(sorted(feature_count.items()))))\n else:\n values = training_data_stats[\"feature_values\"][feature]\n frequencies = training_data_stats[\"feature_frequencies\"][feature]\n\n self.feature_values[feature] = values\n self.feature_frequencies[feature] = (np.array(frequencies) /\n float(sum(frequencies)))\n self.scaler.mean_[feature] = 0\n self.scaler.scale_[feature] = 1\n\n @staticmethod\n def convert_and_round(values):\n return ['%.2f' % v for v in values]\n\n @staticmethod\n def validate_training_data_stats(training_data_stats):\n \"\"\"\n Method to validate the structure of training data stats\n \"\"\"\n stat_keys = list(training_data_stats.keys())\n valid_stat_keys = [\"means\", \"mins\", \"maxs\", \"stds\", \"feature_values\", \"feature_frequencies\"]\n missing_keys = list(set(valid_stat_keys) - set(stat_keys))\n if len(missing_keys) > 0:\n raise Exception(\"Missing keys in training_data_stats. Details: %s\" % (missing_keys))\n\n def explain_instance(self,\n data_row,\n predict_fn,\n labels=(1,),\n top_labels=None,\n num_features=10,\n num_samples=5000,\n distance_metric='euclidean',\n model_regressor=None,\n sampling_method='gaussian'):\n \"\"\"Generates explanations for a prediction.\n\n First, we generate neighborhood data by randomly perturbing features\n from the instance (see __data_inverse). We then learn locally weighted\n linear models on this neighborhood data to explain each of the classes\n in an interpretable way (see lime_base.py).\n\n Args:\n data_row: 1d numpy array or scipy.sparse matrix, corresponding to a row\n predict_fn: prediction function. For classifiers, this should be a\n function that takes a numpy array and outputs prediction\n probabilities. For regressors, this takes a numpy array and\n returns the predictions. For ScikitClassifiers, this is\n `classifier.predict_proba()`. For ScikitRegressors, this\n is `regressor.predict()`. The prediction function needs to work\n on multiple feature vectors (the vectors randomly perturbed\n from the data_row).\n labels: iterable with labels to be explained.\n top_labels: if not None, ignore labels and produce explanations for\n the K labels with highest prediction probabilities, where K is\n this parameter.\n num_features: maximum number of features present in explanation\n num_samples: size of the neighborhood to learn the linear model\n distance_metric: the distance metric to use for weights.\n model_regressor: sklearn regressor to use in explanation. Defaults\n to Ridge regression in LimeBase. Must have model_regressor.coef_\n and 'sample_weight' as a parameter to model_regressor.fit()\n sampling_method: Method to sample synthetic data. Defaults to Gaussian\n sampling. Can also use Latin Hypercube Sampling.\n\n Returns:\n An Explanation object (see explanation.py) with the corresponding\n explanations.\n \"\"\"\n if sp.sparse.issparse(data_row) and not sp.sparse.isspmatrix_csr(data_row):\n # Preventative code: if sparse, convert to csr format if not in csr format already\n data_row = data_row.tocsr()\n data, inverse = self.__data_inverse(data_row, num_samples, sampling_method)\n if sp.sparse.issparse(data):\n # Note in sparse case we don't subtract mean since data would become dense\n scaled_data = data.multiply(self.scaler.scale_)\n # Multiplying with csr matrix can return a coo sparse matrix\n if not sp.sparse.isspmatrix_csr(scaled_data):\n scaled_data = scaled_data.tocsr()\n else:\n scaled_data = (data - self.scaler.mean_) / self.scaler.scale_\n distances = sklearn.metrics.pairwise_distances(\n scaled_data,\n scaled_data[0].reshape(1, -1),\n metric=distance_metric\n ).ravel()\n\n yss = predict_fn(inverse)\n\n # for classification, the model needs to provide a list of tuples - classes\n # along with prediction probabilities\n if self.mode == \"classification\":\n if len(yss.shape) == 1:\n raise NotImplementedError(\"LIME does not currently support \"\n \"classifier models without probability \"\n \"scores. If this conflicts with your \"\n \"use case, please let us know: \"\n \"https://github.com/datascienceinc/lime/issues/16\")\n elif len(yss.shape) == 2:\n if self.class_names is None:\n self.class_names = [str(x) for x in range(yss[0].shape[0])]\n else:\n self.class_names = list(self.class_names)\n if not np.allclose(yss.sum(axis=1), 1.0):\n warnings.warn(\"\"\"\n Prediction probabilties do not sum to 1, and\n thus does not constitute a probability space.\n Check that you classifier outputs probabilities\n (Not log probabilities, or actual class predictions).\n \"\"\")\n else:\n raise ValueError(\"Your model outputs \"\n \"arrays with {} dimensions\".format(len(yss.shape)))\n\n # for regression, the output should be a one-dimensional array of predictions\n else:\n try:\n if len(yss.shape) != 1 and len(yss[0].shape) == 1:\n yss = np.array([v[0] for v in yss])\n assert isinstance(yss, np.ndarray) and len(yss.shape) == 1\n except AssertionError:\n raise ValueError(\"Your model needs to output single-dimensional \\\n numpyarrays, not arrays of {} dimensions\".format(yss.shape))\n\n predicted_value = yss[0]\n min_y = min(yss)\n max_y = max(yss)\n\n # add a dimension to be compatible with downstream machinery\n yss = yss[:, np.newaxis]\n\n feature_names = copy.deepcopy(self.feature_names)\n if feature_names is None:\n feature_names = [str(x) for x in range(data_row.shape[0])]\n\n if sp.sparse.issparse(data_row):\n values = self.convert_and_round(data_row.data)\n feature_indexes = data_row.indices\n else:\n values = self.convert_and_round(data_row)\n feature_indexes = None\n\n for i in self.categorical_features:\n if self.discretizer is not None and i in self.discretizer.lambdas:\n continue\n name = int(data_row[i])\n if i in self.categorical_names:\n name = self.categorical_names[i][name]\n feature_names[i] = '%s=%s' % (feature_names[i], name)\n values[i] = 'True'\n categorical_features = self.categorical_features\n\n discretized_feature_names = None\n if self.discretizer is not None:\n categorical_features = range(data.shape[1])\n discretized_instance = self.discretizer.discretize(data_row)\n discretized_feature_names = copy.deepcopy(feature_names)\n for f in self.discretizer.names:\n discretized_feature_names[f] = self.discretizer.names[f][int(\n discretized_instance[f])]\n\n domain_mapper = TableDomainMapper(feature_names,\n values,\n scaled_data[0],\n categorical_features=categorical_features,\n discretized_feature_names=discretized_feature_names,\n feature_indexes=feature_indexes)\n ret_exp = explanation.Explanation(domain_mapper,\n mode=self.mode,\n class_names=self.class_names)\n if self.mode == \"classification\":\n ret_exp.predict_proba = yss[0]\n if top_labels:\n labels = np.argsort(yss[0])[-top_labels:]\n ret_exp.top_labels = list(labels)\n ret_exp.top_labels.reverse()\n else:\n ret_exp.predicted_value = predicted_value\n ret_exp.min_value = min_y\n ret_exp.max_value = max_y\n labels = [0]\n for label in labels:\n (ret_exp.intercept[label],\n ret_exp.local_exp[label],\n ret_exp.score[label],\n ret_exp.local_pred[label]) = self.base.explain_instance_with_data(\n scaled_data,\n yss,\n distances,\n label,\n num_features,\n model_regressor=model_regressor,\n feature_selection=self.feature_selection)\n\n if self.mode == \"regression\":\n ret_exp.intercept[1] = ret_exp.intercept[0]\n ret_exp.local_exp[1] = [x for x in ret_exp.local_exp[0]]\n ret_exp.local_exp[0] = [(i, -1 * j) for i, j in ret_exp.local_exp[1]]\n\n return ret_exp\n\n def __data_inverse(self,\n data_row,\n num_samples,\n sampling_method):\n \"\"\"Generates a neighborhood around a prediction.\n\n For numerical features, perturb them by sampling from a Normal(0,1) and\n doing the inverse operation of mean-centering and scaling, according to\n the means and stds in the training data. For categorical features,\n perturb by sampling according to the training distribution, and making\n a binary feature that is 1 when the value is the same as the instance\n being explained.\n\n Args:\n data_row: 1d numpy array, corresponding to a row\n num_samples: size of the neighborhood to learn the linear model\n sampling_method: 'gaussian' or 'lhs'\n\n Returns:\n A tuple (data, inverse), where:\n data: dense num_samples * K matrix, where categorical features\n are encoded with either 0 (not equal to the corresponding value\n in data_row) or 1. The first row is the original instance.\n inverse: same as data, except the categorical features are not\n binary, but categorical (as the original data)\n \"\"\"\n is_sparse = sp.sparse.issparse(data_row)\n if is_sparse:\n num_cols = data_row.shape[1]\n data = sp.sparse.csr_matrix((num_samples, num_cols), dtype=data_row.dtype)\n else:\n num_cols = data_row.shape[0]\n data = np.zeros((num_samples, num_cols))\n categorical_features = range(num_cols)\n if self.discretizer is None:\n instance_sample = data_row\n scale = self.scaler.scale_\n mean = self.scaler.mean_\n if is_sparse:\n # Perturb only the non-zero values\n non_zero_indexes = data_row.nonzero()[1]\n num_cols = len(non_zero_indexes)\n instance_sample = data_row[:, non_zero_indexes]\n scale = scale[non_zero_indexes]\n mean = mean[non_zero_indexes]\n\n if sampling_method == 'gaussian':\n data = self.random_state.normal(0, 1, num_samples * num_cols\n ).reshape(num_samples, num_cols)\n data = np.array(data)\n elif sampling_method == 'lhs':\n data = lhs(num_cols, samples=num_samples\n ).reshape(num_samples, num_cols)\n means = np.zeros(num_cols)\n stdvs = np.array([1]*num_cols)\n for i in range(num_cols):\n data[:, i] = norm(loc=means[i], scale=stdvs[i]).ppf(data[:, i])\n data = np.array(data)\n else:\n warnings.warn('''Invalid input for sampling_method.\n Defaulting to Gaussian sampling.''', UserWarning)\n data = self.random_state.normal(0, 1, num_samples * num_cols\n ).reshape(num_samples, num_cols)\n data = np.array(data)\n\n if self.sample_around_instance:\n data = data * scale + instance_sample\n else:\n data = data * scale + mean\n if is_sparse:\n if num_cols == 0:\n data = sp.sparse.csr_matrix((num_samples,\n data_row.shape[1]),\n dtype=data_row.dtype)\n else:\n indexes = np.tile(non_zero_indexes, num_samples)\n indptr = np.array(\n range(0, len(non_zero_indexes) * (num_samples + 1),\n len(non_zero_indexes)))\n data_1d_shape = data.shape[0] * data.shape[1]\n data_1d = data.reshape(data_1d_shape)\n data = sp.sparse.csr_matrix(\n (data_1d, indexes, indptr),\n shape=(num_samples, data_row.shape[1]))\n categorical_features = self.categorical_features\n first_row = data_row\n else:\n first_row = self.discretizer.discretize(data_row)\n data[0] = data_row.copy()\n inverse = data.copy()\n for column in categorical_features:\n values = self.feature_values[column]\n freqs = self.feature_frequencies[column]\n inverse_column = self.random_state.choice(values, size=num_samples,\n replace=True, p=freqs).astype(float)\n binary_column = (inverse_column == first_row[column]).astype(int)\n binary_column[0] = 1\n inverse_column[0] = data[0, column]\n data[:, column] = binary_column\n inverse[:, column] = inverse_column\n if self.discretizer is not None:\n inverse[1:] = self.discretizer.undiscretize(inverse[1:])\n inverse[0] = data_row\n return data, inverse\n\n\nclass RecurrentTabularExplainer(LimeTabularExplainer):\n \"\"\"\n An explainer for keras-style recurrent neural networks, where the\n input shape is (n_samples, n_timesteps, n_features). This class\n just extends the LimeTabularExplainer class and reshapes the training\n data and feature names such that they become something like\n\n (val1_t1, val1_t2, val1_t3, ..., val2_t1, ..., valn_tn)\n\n Each of the methods that take data reshape it appropriately,\n so you can pass in the training/testing data exactly as you\n would to the recurrent neural network.\n\n \"\"\"\n\n def __init__(self, training_data, mode=\"classification\",\n training_labels=None, feature_names=None,\n categorical_features=None, categorical_names=None,\n kernel_width=None, kernel=None, verbose=False, class_names=None,\n feature_selection='auto', discretize_continuous=True,\n discretizer='quartile', random_state=None):\n \"\"\"\n Args:\n training_data: numpy 3d array with shape\n (n_samples, n_timesteps, n_features)\n mode: \"classification\" or \"regression\"\n training_labels: labels for training data. Not required, but may be\n used by discretizer.\n feature_names: list of names (strings) corresponding to the columns\n in the training data.\n categorical_features: list of indices (ints) corresponding to the\n categorical columns. Everything else will be considered\n continuous. Values in these columns MUST be integers.\n categorical_names: map from int to list of names, where\n categorical_names[x][y] represents the name of the yth value of\n column x.\n kernel_width: kernel width for the exponential kernel.\n If None, defaults to sqrt(number of columns) * 0.75\n kernel: similarity kernel that takes euclidean distances and kernel\n width as input and outputs weights in (0,1). If None, defaults to\n an exponential kernel.\n verbose: if true, print local prediction values from linear model\n class_names: list of class names, ordered according to whatever the\n classifier is using. If not present, class names will be '0',\n '1', ...\n feature_selection: feature selection method. can be\n 'forward_selection', 'lasso_path', 'none' or 'auto'.\n See function 'explain_instance_with_data' in lime_base.py for\n details on what each of the options does.\n discretize_continuous: if True, all non-categorical features will\n be discretized into quartiles.\n discretizer: only matters if discretize_continuous is True. Options\n are 'quartile', 'decile', 'entropy' or a BaseDiscretizer\n instance.\n random_state: an integer or numpy.RandomState that will be used to\n generate random numbers. If None, the random state will be\n initialized using the internal numpy seed.\n \"\"\"\n\n # Reshape X\n n_samples, n_timesteps, n_features = training_data.shape\n training_data = np.transpose(training_data, axes=(0, 2, 1)).reshape(\n n_samples, n_timesteps * n_features)\n self.n_timesteps = n_timesteps\n self.n_features = n_features\n\n # Update the feature names\n feature_names = ['{}_t-{}'.format(n, n_timesteps - (i + 1))\n for n in feature_names for i in range(n_timesteps)]\n\n # Send off the the super class to do its magic.\n super(RecurrentTabularExplainer, self).__init__(\n training_data,\n mode=mode,\n training_labels=training_labels,\n feature_names=feature_names,\n categorical_features=categorical_features,\n categorical_names=categorical_names,\n kernel_width=kernel_width,\n kernel=kernel,\n verbose=verbose,\n class_names=class_names,\n feature_selection=feature_selection,\n discretize_continuous=discretize_continuous,\n discretizer=discretizer,\n random_state=random_state)\n\n def _make_predict_proba(self, func):\n \"\"\"\n The predict_proba method will expect 3d arrays, but we are reshaping\n them to 2D so that LIME works correctly. This wraps the function\n you give in explain_instance to first reshape the data to have\n the shape the the keras-style network expects.\n \"\"\"\n\n def predict_proba(X):\n n_samples = X.shape[0]\n new_shape = (n_samples, self.n_features, self.n_timesteps)\n X = np.transpose(X.reshape(new_shape), axes=(0, 2, 1))\n return func(X)\n\n return predict_proba\n\n def explain_instance(self, data_row, classifier_fn, labels=(1,),\n top_labels=None, num_features=10, num_samples=5000,\n distance_metric='euclidean', model_regressor=None):\n \"\"\"Generates explanations for a prediction.\n\n First, we generate neighborhood data by randomly perturbing features\n from the instance (see __data_inverse). We then learn locally weighted\n linear models on this neighborhood data to explain each of the classes\n in an interpretable way (see lime_base.py).\n\n Args:\n data_row: 2d numpy array, corresponding to a row\n classifier_fn: classifier prediction probability function, which\n takes a numpy array and outputs prediction probabilities. For\n ScikitClassifiers , this is classifier.predict_proba.\n labels: iterable with labels to be explained.\n top_labels: if not None, ignore labels and produce explanations for\n the K labels with highest prediction probabilities, where K is\n this parameter.\n num_features: maximum number of features present in explanation\n num_samples: size of the neighborhood to learn the linear model\n distance_metric: the distance metric to use for weights.\n model_regressor: sklearn regressor to use in explanation. Defaults\n to Ridge regression in LimeBase. Must have\n model_regressor.coef_ and 'sample_weight' as a parameter\n to model_regressor.fit()\n\n Returns:\n An Explanation object (see explanation.py) with the corresponding\n explanations.\n \"\"\"\n\n # Flatten input so that the normal explainer can handle it\n data_row = data_row.T.reshape(self.n_timesteps * self.n_features)\n\n # Wrap the classifier to reshape input\n classifier_fn = self._make_predict_proba(classifier_fn)\n return super(RecurrentTabularExplainer, self).explain_instance(\n data_row, classifier_fn,\n labels=labels,\n top_labels=top_labels,\n num_features=num_features,\n num_samples=num_samples,\n distance_metric=distance_metric,\n model_regressor=model_regressor)\n"
] |
[
[
"scipy.sparse.issparse",
"numpy.sqrt",
"scipy.sparse.isspmatrix_csr",
"numpy.tile",
"scipy.sparse.csr_matrix",
"scipy.stats.distributions.norm",
"numpy.transpose",
"numpy.exp",
"numpy.argsort",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.zeros",
"sklearn.utils.check_random_state"
]
] |
chuanli11/SynergyNet
|
[
"a8044d8dabbfb811d4299f59e64e0fb749027e86"
] |
[
"artistic.py"
] |
[
"import torch\nimport torchvision.transforms as transforms\nimport numpy as np\nimport cv2\nfrom utils.ddfa import ToTensor, Normalize\nfrom model_building import SynergyNet\nfrom utils.inference import crop_img, predict_denseVert\nimport argparse\nimport torch.backends.cudnn as cudnn\ncudnn.benchmark = True\nimport os\nimport os.path as osp\nimport glob\nfrom FaceBoxes import FaceBoxes\n\n# Following 3DDFA-V2, we also use 120x120 resolution\nIMG_SIZE = 120\n\ndef write_obj_with_colors(obj_name, vertices, triangles, colors):\n triangles = triangles.copy()\n\n if obj_name.split('.')[-1] != 'obj':\n obj_name = obj_name + '.obj'\n with open(obj_name, 'w') as f:\n for i in range(vertices.shape[1]):\n s = 'v {:.4f} {:.4f} {:.4f} {} {} {}\\n'.format(vertices[0, i], vertices[1, i], vertices[2, i], colors[i, 2],\n colors[i, 1], colors[i, 0])\n f.write(s)\n for i in range(triangles.shape[1]):\n s = 'f {} {} {}\\n'.format(triangles[0, i], triangles[1, i], triangles[2, i])\n f.write(s)\n\ndef main(args):\n # load pre-tained model\n checkpoint_fp = 'pretrained/best.pth.tar' \n args.arch = 'mobilenet_v2'\n args.devices_id = [0]\n\n checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict']\n \n model = SynergyNet(args)\n model_dict = model.state_dict()\n\n # load BFM_UV mapping and kept indicies and deleted triangles\n uv_vert=np.load('3dmm_data/BFM_UV.npy')\n coord_u = (uv_vert[:,1]*255.0).astype(np.int32)\n coord_v = (uv_vert[:,0]*255.0).astype(np.int32)\n keep_ind = np.load('3dmm_data/keptInd.npy')\n tri_deletion = np.load('3dmm_data/deletedTri.npy')\n\n # because the model is trained by multiple gpus, prefix 'module' should be removed\n for k in checkpoint.keys():\n model_dict[k.replace('module.', '')] = checkpoint[k]\n\n model.load_state_dict(model_dict, strict=False)\n model = model.cuda()\n model.eval()\n\n # face detector\n face_boxes = FaceBoxes()\n\n # preparation\n transform = transforms.Compose([ToTensor(), Normalize(mean=127.5, std=128)])\n if osp.isdir(args.files):\n if not args.files[-1] == '/':\n args.files = args.files + '/'\n if not args.png:\n files = sorted(glob.glob(args.files+'*.jpg'))\n else:\n files = sorted(glob.glob(args.files+'*.png'))\n else:\n files = [args.files]\n\n for img_fp in files:\n print(\"Process the image: \", img_fp)\n\n img_ori = cv2.imread(img_fp)\n\n # crop faces\n rects = face_boxes(img_ori)\n\n # storage\n vertices_lst = []\n for rect in rects:\n roi_box = rect\n\n # enlarge the bbox a little and do a square crop\n HCenter = (rect[1] + rect[3])/2\n WCenter = (rect[0] + rect[2])/2\n side_len = roi_box[3]-roi_box[1]\n margin = side_len * 1.2 // 2\n roi_box[0], roi_box[1], roi_box[2], roi_box[3] = WCenter-margin, HCenter-margin, WCenter+margin, HCenter+margin\n\n img = crop_img(img_ori, roi_box)\n img = cv2.resize(img, dsize=(IMG_SIZE, IMG_SIZE), interpolation=cv2.INTER_LINEAR)\n \n input = transform(img).unsqueeze(0)\n with torch.no_grad():\n input = input.cuda()\n param = model.forward_test(input)\n param = param.squeeze().cpu().numpy().flatten().astype(np.float32)\n\n # dense pts\n vertices = predict_denseVert(param, roi_box, transform=True)\n vertices_lst.append(vertices)\n\n # textured obj file output\n if not osp.exists(f'inference_output/obj/'):\n os.makedirs(f'inference_output/obj/')\n \n name = img_fp.rsplit('/',1)[-1][:-4] # drop off the extension\n colors = cv2.imread(f'uv_art/{name}_fake_B.png',-1)\n colors = np.flip(colors,axis=0)\n colors_uv = (colors[coord_u, coord_v,:])\n\n wfp = f'inference_output/obj/{name}.obj'\n write_obj_with_colors(wfp, vertices[:,keep_ind], tri_deletion, colors_uv[keep_ind,:].astype(np.float32))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--files', default='', help='path to a single image or path to a folder containing multiple images')\n parser.add_argument(\"--png\", action=\"store_true\", help=\"if images are with .png extension\")\n parser.add_argument('--img_size', default=120, type=int)\n parser.add_argument('-b', '--batch-size', default=1, type=int)\n\n args = parser.parse_args()\n main(args)"
] |
[
[
"numpy.load",
"torch.no_grad",
"numpy.flip",
"torch.load"
]
] |
by2101/OpenLAS
|
[
"0acb30dae98ab89009a919ce86e064c943c51643"
] |
[
"src/decoder.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom six.moves import range\nimport attention\nimport math\nfrom loss import cross_entropy, uniform_label_smooth_regulerizer\nimport utils\nfrom utils import get_seq_mask_by_shape, get_seq_mask\n\nimport pdb\n\n\n\nclass RNNDecoder(torch.nn.Module):\n def __init__(self, config):\n super(RNNDecoder, self).__init__()\n self.config = config\n\n self.embed_dim = config[\"embed_dim\"]\n self.dropout_rate = config[\"dropout_rate\"]\n self.vocab_size = config[\"vocab_size\"]\n self.hidden_size = config[\"hidden_size\"]\n self.num_layers = config[\"num_layers\"]\n self.enc_dim = config[\"enc_dim\"]\n self.att_inner_dim = config[\"att_inner_dim\"]\n\n self.emb = nn.Embedding(self.vocab_size, self.embed_dim)\n self.dropout = nn.Dropout(self.dropout_rate)\n \n rnns = [torch.nn.LSTM(self.embed_dim, self.hidden_size, 1, batch_first=True)]\n \n for _ in range(self.num_layers-1):\n rnns += [torch.nn.LSTM(self.hidden_size+self.enc_dim, self.hidden_size, 1, batch_first=True)]\n \n self.rnns = torch.nn.ModuleList(rnns)\n \n self.attentions = torch.nn.ModuleList(\n [attention.DotProductAttention(self.enc_dim, self.hidden_size, self.att_inner_dim,\n math.sqrt(self.att_inner_dim)) for _ in range(self.num_layers-1)])\n \n self.output_affine = nn.Linear(self.hidden_size, self.vocab_size)\n\n def forward(self, enc_outputs, enc_lengths, src_ids, tgt_ids, label_smooth=0):\n bz = enc_outputs.shape[0]\n if bz != src_ids.shape[0]:\n raise ValueError(\"enc_outputs does not match src_ids.\")\n \n encout_max_length = enc_outputs.shape[1]\n dec_max_length = src_ids.shape[1]\n att_masks = (1-get_seq_mask_by_shape(encout_max_length, dec_max_length, enc_lengths).transpose(1,2)).byte() \n \n rnn_in = self.emb(src_ids)\n rnn_in = self.dropout(rnn_in)\n \n rnn = self.rnns[0]\n rnn_output, _ = rnn(rnn_in)\n \n for l in range(1, self.num_layers):\n att_scores, att = self.attentions[l-1](enc_outputs, rnn_output, enc_outputs, mask=att_masks) \n rnn_in = torch.cat([rnn_output, att], dim=-1)\n rnn_in = self.dropout(rnn_in)\n rnn_output, _ = self.rnns[l](rnn_in)\n \n rnn_output = self.dropout(rnn_output)\n logits = self.output_affine(rnn_output)\n\n ce = cross_entropy(logits.view(-1, logits.size(-1)), tgt_ids.view(-1))\n if label_smooth > 0:\n ls = uniform_label_smooth_regulerizer(logits.view(-1, logits.size(-1)), tgt_ids.view(-1))\n loss = (1-label_smooth) * ce + label_smooth * ls\n else:\n loss = ce\n return loss\n\n def get_attention_scores(self, enc_outputs, enc_lengths, src_ids):\n bz = enc_outputs.shape[0]\n if bz != src_ids.shape[0]:\n raise ValueError(\"enc_outputs does not match src_ids.\")\n \n encout_max_length = enc_outputs.shape[1]\n dec_max_length = src_ids.shape[1]\n att_masks = (1-get_seq_mask_by_shape(encout_max_length, dec_max_length, enc_lengths).transpose(1,2)).byte() \n \n rnn_in = self.emb(src_ids)\n rnn_in = self.dropout(rnn_in)\n \n rnn = self.rnns[0]\n rnn_output, _ = rnn(rnn_in)\n \n att_score_list = []\n \n for l in range(1, self.num_layers):\n att_scores, att = self.attentions[l-1](enc_outputs, rnn_output, enc_outputs, mask=att_masks) \n att_score_list.append(att_scores)\n rnn_in = torch.cat([rnn_output, att], dim=-1)\n rnn_in = self.dropout(rnn_in)\n rnn_output, _ = self.rnns[l](rnn_in)\n return att_score_list\n\n def zero_states(self, batch_size):\n states = []\n for _ in range(len(self.rnns)):\n states.append(None) \n return states\n \n \n def forward_step(self, enc_outputs, enc_lengths, decoder_states, src_ids):\n '''\n decoder_states\n src_ids: batch_size x 1 \n '''\n bz = enc_outputs.shape[0]\n if bz != src_ids.shape[0]:\n raise ValueError(\"enc_outputs does not match src_ids.\") \n encout_max_length = enc_outputs.shape[1]\n if src_ids.shape[1] != 1:\n raise ValueError('The src_ids is not for one step.')\n att_masks = (1-get_seq_mask_by_shape(encout_max_length, 1, enc_lengths).transpose(1,2)).byte() \n \n src_ids = src_ids.to(enc_outputs.device)\n \n next_states = []\n rnn_in = self.emb(src_ids)\n rnn_in = self.dropout(rnn_in)\n \n rnn = self.rnns[0]\n \n rnn_output, states = rnn(rnn_in, decoder_states[0])\n next_states.append(states)\n \n for l in range(1, self.num_layers):\n att_scores, att = self.attentions[l-1](enc_outputs, rnn_output, enc_outputs, mask=att_masks) \n\n rnn_in = torch.cat([rnn_output, att], dim=-1) \n rnn_in = self.dropout(rnn_in)\n rnn_output, states = self.rnns[l](rnn_in, decoder_states[l]) \n next_states.append(states)\n \n rnn_output = self.dropout(rnn_output)\n logits = self.output_affine(rnn_output)\n log_probs = F.log_softmax(logits, dim=-1)\n return log_probs, next_states\n \n\n \n \n \nif __name__ == \"__main__\":\n # For debugging\n config = {\n \"embed_dim\": 8,\n \"vocab_size\": 128,\n \"hidden_size\": 64,\n \"num_layers\": 2,\n \"enc_dim\": 32,\n \"att_inner_dim\": 32,\n \"dropout_rate\": 0.5\n }\n\n decoder = RNNDecoder(config)\n \n enc_outputs = torch.randn(2, 20, 32)\n enc_lengths = torch.tensor([15, 16]).long()\n src_ids = torch.tensor([[1,2,3,4,5],\n [6,7,8,9,10]])\n tgt_ids = torch.tensor([[2, 3, 4, 5, 6],\n [7,8,9,10,-1]])\n\n log_probs, loss = decoder(enc_outputs, enc_lengths, src_ids, tgt_ids)\n \n states = decoder.zero_states(2)\n \n log_probs2 = [] \n states2 = [] \n for i in range(1):\n res, states = decoder.forward_step(enc_outputs, enc_lengths, states, src_ids[:, i][:, None])\n log_probs2.append(res)\n states2.append(states)\n log_probs2 = torch.cat(log_probs2, dim=1)\n\n \n\n\n\n\n\n\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.functional.log_softmax",
"torch.cat",
"torch.nn.LSTM",
"torch.randn",
"torch.nn.ModuleList",
"torch.nn.Embedding",
"torch.tensor",
"torch.nn.Linear"
]
] |
Aliang-CN/cogdl
|
[
"cf594cdb3a97f45333d08c937205d1a691828a33"
] |
[
"cogdl/datasets/ogb.py"
] |
[
"import os.path as osp\n\nimport torch\n\nfrom ogb.nodeproppred import NodePropPredDataset\nfrom ogb.graphproppred import GraphPropPredDataset\n\nfrom . import register_dataset\nfrom cogdl.data import Dataset, Data, DataLoader\nfrom cogdl.utils import cross_entropy_loss, accuracy, remove_self_loops\n\n\ndef coalesce(row, col, edge_attr=None):\n row = torch.tensor(row)\n col = torch.tensor(col)\n if edge_attr is not None:\n edge_attr = torch.tensor(edge_attr)\n num = col.shape[0] + 1\n idx = torch.full((num,), -1, dtype=torch.float)\n idx[1:] = row * num + col\n mask = idx[1:] > idx[:-1]\n\n if mask.all():\n return row, col, edge_attr\n row = row[mask]\n col = col[mask]\n if edge_attr is not None:\n edge_attr = edge_attr[mask]\n return row, col, edge_attr\n\n\nclass OGBNDataset(Dataset):\n def __init__(self, root, name):\n dataset = NodePropPredDataset(name, root)\n graph, y = dataset[0]\n x = torch.tensor(graph[\"node_feat\"])\n y = torch.tensor(y.squeeze())\n row, col, edge_attr = coalesce(graph[\"edge_index\"][0], graph[\"edge_index\"][1], graph[\"edge_feat\"])\n edge_index = torch.stack([row, col], dim=0)\n edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)\n row = torch.cat([edge_index[0], edge_index[1]])\n col = torch.cat([edge_index[1], edge_index[0]])\n edge_index = torch.stack([row, col], dim=0)\n if edge_attr is not None:\n edge_attr = torch.cat([edge_attr, edge_attr], dim=0)\n\n self.data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)\n self.data.num_nodes = graph[\"num_nodes\"]\n assert self.data.num_nodes == self.data.x.shape[0]\n\n # split\n split_index = dataset.get_idx_split()\n self.data.train_mask = torch.zeros(self.data.num_nodes, dtype=torch.bool)\n self.data.test_mask = torch.zeros(self.data.num_nodes, dtype=torch.bool)\n self.data.val_mask = torch.zeros(self.data.num_nodes, dtype=torch.bool)\n self.data.train_mask[split_index[\"train\"]] = True\n self.data.test_mask[split_index[\"test\"]] = True\n self.data.val_mask[split_index[\"valid\"]] = True\n\n self.transform = None\n\n def get(self, idx):\n assert idx == 0\n return self.data\n\n def get_loss_fn(self):\n return cross_entropy_loss\n\n def get_evaluator(self):\n return accuracy\n\n\n@register_dataset(\"ogbn-arxiv\")\nclass OGBArxivDataset(OGBNDataset):\n def __init__(self):\n dataset = \"ogbn-arxiv\"\n path = \"data\"\n super(OGBArxivDataset, self).__init__(path, dataset)\n\n\n@register_dataset(\"ogbn-products\")\nclass OGBProductsDataset(OGBNDataset):\n def __init__(self):\n dataset = \"ogbn-products\"\n path = \"data\"\n super(OGBProductsDataset, self).__init__(path, dataset)\n\n\n@register_dataset(\"ogbn-proteins\")\nclass OGBProteinsDataset(OGBNDataset):\n def __init__(self):\n dataset = \"ogbn-proteins\"\n path = \"data\"\n super(OGBProteinsDataset, self).__init__(path, dataset)\n\n\n@register_dataset(\"ogbn-mag\")\nclass OGBMAGDataset(OGBNDataset):\n def __init__(self):\n dataset = \"ogbn-mag\"\n path = \"data\"\n super(OGBMAGDataset, self).__init__(path, dataset)\n\n\n@register_dataset(\"ogbn-papers100M\")\nclass OGBPapers100MDataset(OGBNDataset):\n def __init__(self):\n dataset = \"ogbn-papers100M\"\n path = \"data\"\n super(OGBPapers100MDataset, self).__init__(path, dataset)\n\n\nclass OGBGDataset(Dataset):\n def __init__(self, root, name):\n self.name = name\n self.dataset = GraphPropPredDataset(self.name, root)\n\n self.graphs = []\n self.all_nodes = 0\n self.all_edges = 0\n for i in range(len(self.dataset.graphs)):\n graph, label = self.dataset[i]\n data = Data(\n x=torch.tensor(graph[\"node_feat\"], dtype=torch.float),\n edge_index=torch.tensor(graph[\"edge_index\"]),\n edge_attr=None if \"edge_feat\" not in graph else torch.tensor(graph[\"edge_feat\"], dtype=torch.float),\n y=torch.tensor(label),\n )\n data.num_nodes = graph[\"num_nodes\"]\n self.graphs.append(data)\n\n self.all_nodes += graph[\"num_nodes\"]\n self.all_edges += graph[\"edge_index\"].shape[1]\n\n self.transform = None\n\n def get_loader(self, args):\n split_index = self.dataset.get_idx_split()\n train_loader = DataLoader(self.get_subset(split_index[\"train\"]), batch_size=args.batch_size, shuffle=True)\n valid_loader = DataLoader(self.get_subset(split_index[\"valid\"]), batch_size=args.batch_size, shuffle=False)\n test_loader = DataLoader(self.get_subset(split_index[\"test\"]), batch_size=args.batch_size, shuffle=False)\n return train_loader, valid_loader, test_loader\n\n def get_subset(self, subset):\n datalist = []\n for idx in subset:\n datalist.append(self.graphs[idx])\n return datalist\n\n def get(self, idx):\n return self.graphs[idx]\n\n @property\n def num_classes(self):\n return int(self.dataset.num_classes)\n\n\n@register_dataset(\"ogbg-molbace\")\nclass OGBMolbaceDataset(OGBGDataset):\n def __init__(self):\n dataset = \"ogbg-molbace\"\n path = \"data\"\n super(OGBMolbaceDataset, self).__init__(path, dataset)\n\n\n@register_dataset(\"ogbg-molhiv\")\nclass OGBMolhivDataset(OGBGDataset):\n def __init__(self):\n dataset = \"ogbg-molhiv\"\n path = \"data\"\n super(OGBMolhivDataset, self).__init__(path, dataset)\n\n\n@register_dataset(\"ogbg-molpcba\")\nclass OGBMolpcbaDataset(OGBGDataset):\n def __init__(self):\n dataset = \"ogbg-molpcba\"\n path = \"data\"\n super(OGBMolpcbaDataset, self).__init__(path, dataset)\n\n\n@register_dataset(\"ogbg-ppa\")\nclass OGBPpaDataset(OGBGDataset):\n def __init__(self):\n dataset = \"ogbg-ppa\"\n path = \"data\"\n super(OGBPpaDataset, self).__init__(path, dataset)\n\n\n@register_dataset(\"ogbg-code\")\nclass OGBCodeDataset(OGBGDataset):\n def __init__(self):\n dataset = \"ogbg-code\"\n path = \"data\"\n super(OGBCodeDataset, self).__init__(path, dataset)\n"
] |
[
[
"torch.full",
"torch.zeros",
"torch.cat",
"torch.tensor",
"torch.stack"
]
] |
hobogalaxy/ISA
|
[
"aae4b9b4e30d07a4fe2ddc0900973bf134cc8fda"
] |
[
"Python/PPO.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom torch.utils.data import SubsetRandomSampler, BatchSampler\nfrom torch.optim import Adam\nimport wandb\nimport numpy as np\nfrom mlagents_envs.environment import UnityEnvironment\nfrom utils import EnvWrapper, Memory\nfrom models import Actor, Critic\nfrom collections import deque\nimport time\n\n\nprint(\"Press play in Unity.\")\n# This is a non-blocking call that only loads the environment.\nenv = UnityEnvironment()\nenv = EnvWrapper(env)\nprint(env.behavior_name)\n\n\nmodel_upload_frequency = 30_000\n\nwandb.init(entity=\"rl-cars\", project=\"ISA_mlagents\", group=None, job_type=\"eval\")\nconfig = wandb.config\n\nconfig.behavior_name = env.behavior_name\nconfig.gamma = 0.99\nconfig.lamb = 0.95\nconfig.batch_size = 64\nconfig.memory_size = 6000\nconfig.hidden_size = 256\nconfig.actor_lr = 0.0003\nconfig.critic_lr = 0.0003\nconfig.ppo_multiple_epochs = 5\nconfig.eps = 0.2\nconfig.grad_clip_norm = 0.5\nconfig.entropy_weight = 0.0\nconfig.max_steps = 15_000_000\nconfig.num_of_agents = len(env.agent_ids)\nconfig.obs_space = env.state_size\nconfig.action_space = env.action_size\nconfig.action_high = 1\nconfig.action_low = -1\n\n# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice = torch.device('cpu')\nconfig.device = device.type\nprint(device)\n\n\nactor = Actor(obs_space=config.obs_space, action_space=config.action_space, hidden_size=config.hidden_size).to(device)\ncritic = Critic(obs_space=config.obs_space, hidden_size=config.hidden_size).to(device)\n# actor.load_state_dict(torch.load('actor_model.h5'))\n# critic.load_state_dict(torch.load('critic_model.h5'))\n\nwandb.watch(actor)\nwandb.watch(critic)\n\noptimizer_actor = Adam(actor.parameters(), lr=config.actor_lr)\noptimizer_critic = Adam(critic.parameters(), lr=config.critic_lr)\nmemory = Memory(env.agent_ids)\n\n\ndef compute_GAE(rewards, state_values, done, gamma, lamb):\n \"\"\"\n Computes Generalized Advantage Estimations.\n \"\"\"\n returns = [rewards[-1] + state_values[-1]]\n running_sum = rewards[-1] - state_values[-1]\n for i in reversed(range(len(rewards) - 1)):\n mask = 0 if done[i+1] else 1\n delta = rewards[i] + gamma * state_values[i+1] * mask - state_values[i]\n running_sum = delta + gamma * lamb * running_sum * mask\n returns.insert(0, running_sum + state_values[i])\n\n returns = torch.as_tensor(returns, dtype=torch.float32)\n return returns\n\n\ndef compute_r2g(rewards, done, gamma):\n \"\"\"\n Computes discounted rewards-to-go.\n \"\"\"\n rewards2go = []\n running_sum = 0\n for r, is_terminal in zip(reversed(rewards), reversed(done)):\n running_sum = r + gamma * running_sum * (1 - is_terminal)\n rewards2go.insert(0, running_sum)\n\n rewards2go = torch.as_tensor(rewards2go, dtype=torch.float32)\n return rewards2go\n\n\ndef compute_loss(states, actions, rewards_to_go, adv, old_log_probs):\n\n # adv = (adv - adv.mean()) / (adv.std() + 1e-5)\n # rewards_to_go = (rewards_to_go - rewards_to_go.mean()) / (rewards_to_go.std() + 1e-5)\n\n # compute critic loss\n v = critic(states)\n critic_loss = (rewards_to_go - v).pow(2)\n\n # compute actor loss\n _, _, pi = actor(states)\n log_probs = pi.log_prob(actions).sum(1)\n ratio = torch.exp(log_probs - old_log_probs) # exp(log_prob - old_log_prob) = (prob / old_prob)\n clip = torch.clamp(ratio, 1 - config.eps, 1 + config.eps)\n actor_loss = -torch.min(ratio * adv, clip * adv)\n\n # compute entropy\n entropy = pi.entropy().sum(1)\n actor_loss -= config.entropy_weight * entropy\n\n return actor_loss.mean(), critic_loss.mean(), entropy.mean(), ratio.mean()\n\n\ndef update():\n start = time.time()\n\n # compute rewards-to-go\n rewards_to_go = [compute_r2g(memory.rewards[id], memory.dones[id], gamma=config.gamma) for id in env.agent_ids]\n rewards_to_go = torch.cat(rewards_to_go).detach().to(device)\n\n # prepare data\n states = [torch.squeeze(torch.stack(memory.states[id]), 1) for id in env.agent_ids]\n states = torch.cat(states).detach().to(device)\n\n actions = [torch.squeeze(torch.stack(memory.actions[id]), 1) for id in env.agent_ids]\n actions = torch.cat(actions).detach().to(device)\n\n old_log_probs = [torch.squeeze(torch.stack(memory.log_probs[id]), 1) for id in env.agent_ids]\n old_log_probs = torch.cat(old_log_probs).detach().to(device)\n\n # state_values = torch.squeeze(critic(states).detach().to(device))\n\n # compute state values\n for id in env.agent_ids:\n memory.state_values[id] = critic(torch.stack(memory.states[id]))\n state_values = torch.squeeze(torch.cat([memory.state_values[id] for id in env.agent_ids])).detach().to(device)\n\n # # compute GAE\n # gae_returns = [compute_GAE(\n # rewards=memory.rewards[id],\n # state_values=memory.state_values[id],\n # done=memory.dones[id],\n # gamma=config.gamma,\n # lamb=config.lamb\n # ) for id in env.agent_ids]\n # gae_returns = torch.cat(gae_returns).detach().to(device)\n # adv = (gae_returns - state_values).detach().to(device)\n\n # normalize rewards-to-go\n rewards_to_go = (rewards_to_go - rewards_to_go.mean()) / (rewards_to_go.std() + 1e-5)\n\n # compute advantage estimations\n adv = (rewards_to_go - state_values)\n\n adv = (adv - adv.mean()) / (adv.std() + 1e-5)\n\n # rewards_to_go = gae_returns\n\n # learn\n for _ in range(config.ppo_multiple_epochs):\n\n # create sampler\n sampler = SubsetRandomSampler(range(memory.size()))\n batch_sampler = BatchSampler(sampler, batch_size=config.batch_size, drop_last=False)\n\n # execute epoch\n for indices in batch_sampler:\n\n batch_states = states[indices]\n batch_actions = actions[indices]\n batch_rewards_to_go = rewards_to_go[indices]\n batch_adv = adv[indices]\n batch_old_log_probs = old_log_probs[indices]\n\n actor_loss, critic_loss, _, _ = compute_loss(\n batch_states,\n batch_actions,\n batch_rewards_to_go,\n batch_adv,\n batch_old_log_probs\n )\n\n # update critic\n optimizer_critic.zero_grad()\n critic_loss.backward()\n nn.utils.clip_grad_norm_(critic.parameters(), config.grad_clip_norm)\n optimizer_critic.step()\n\n # update actor\n optimizer_actor.zero_grad()\n actor_loss.backward()\n nn.utils.clip_grad_norm_(actor.parameters(), config.grad_clip_norm)\n optimizer_actor.step()\n\n # log stats\n actor_loss, critic_loss, entropy, ratio = compute_loss(states, actions, rewards_to_go, adv, old_log_probs)\n end = time.time()\n wandb.log({\n \"actor loss\": actor_loss,\n \"critic loss\": critic_loss,\n \"ppo prob ratio\": ratio,\n \"entropy\": entropy,\n \"loss computation time\": end - start\n })\n\n\ndef train(max_steps=1_000_000):\n ep_rewards = deque(maxlen=200)\n ep_lengths = deque(maxlen=200)\n\n decision_steps = env.reset()\n\n step = 0\n total_num_of_games = 0\n last_uploaded_model_avg_rew = 0\n last_states = {}\n last_actions = {}\n last_log_probs = {}\n ep_rewards_log = {id: [] for id in decision_steps.agent_id}\n ep_length_log = {id: 0 for id in decision_steps.agent_id}\n while step < max_steps:\n\n for id in decision_steps.agent_id:\n last_states[id] = torch.tensor(decision_steps[id].obs[0])\n\n # Generate an action for all agents\n states = decision_steps.obs[0]\n if states.shape[0] == 0:\n actions = env.get_random_actions()\n actions = torch.FloatTensor(actions)\n else:\n states = torch.as_tensor(states, dtype=torch.float32).to(device)\n actions, log_probs, pi = actor(states)\n\n for i, id in enumerate(decision_steps.agent_id):\n last_actions[id] = actions[i]\n last_log_probs[id] = log_probs[i]\n\n # Move the simulation forward\n new_decision_steps, terminal_steps = env.step(np.clip(actions.cpu().numpy(), -1, 1))\n\n for id in terminal_steps.agent_id:\n memory.states[id].append(last_states[id])\n memory.actions[id].append(last_actions[id])\n memory.rewards[id].append(terminal_steps[id].reward)\n memory.dones[id].append(True)\n memory.log_probs[id].append(last_log_probs[id])\n\n ep_rewards_log[id].append(terminal_steps[id].reward)\n ep_rewards.append(sum(ep_rewards_log[id]))\n ep_rewards_log[id].clear()\n ep_length_log[id] += 1\n ep_lengths.append(ep_length_log[id])\n ep_length_log[id] = 0\n total_num_of_games += 1\n\n for id in new_decision_steps.agent_id:\n if id in terminal_steps:\n continue\n memory.states[id].append(last_states[id])\n memory.actions[id].append(last_actions[id])\n memory.rewards[id].append(new_decision_steps[id].reward)\n memory.dones[id].append(False)\n memory.log_probs[id].append(last_log_probs[id])\n\n ep_rewards_log[id].append(new_decision_steps[id].reward)\n ep_length_log[id] += 1\n\n step += 1\n\n if step % model_upload_frequency == 0:\n upload_model()\n print(\"Model uploaded\")\n\n decision_steps = new_decision_steps\n\n if memory.size() >= config.memory_size or step >= max_steps:\n\n avg_reward = sum(ep_rewards) / len(ep_rewards)\n avg_ep_length = sum(ep_lengths) / len(ep_lengths)\n\n if last_uploaded_model_avg_rew < avg_reward:\n save_model()\n last_uploaded_model_avg_rew = avg_reward\n\n update()\n memory.reset()\n\n print(f\"Step: {step}, Avg reward: {avg_reward:.2f}\")\n\n wandb.log({\n \"total number of steps\": step,\n \"total number of games\": total_num_of_games,\n \"avg reward\": avg_reward,\n \"avg episode length\": avg_ep_length,\n\n }, step=step)\n\n\ndef save_model():\n torch.save(actor.state_dict(), \"actor_model.h5\")\n torch.save(critic.state_dict(), \"critic_model.h5\")\n\n\ndef upload_model():\n wandb.save('actor_model.h5')\n wandb.save('critic_model.h5')\n\n\ndef main():\n train(max_steps=config.max_steps)\n\n\nif __name__ == \"__main__\":\n main()\n env.close()\n"
] |
[
[
"torch.cat",
"torch.min",
"torch.stack",
"torch.tensor",
"torch.exp",
"torch.FloatTensor",
"torch.device",
"torch.clamp",
"torch.utils.data.BatchSampler",
"torch.as_tensor"
]
] |
mdiephuis/Berkeley-cs294-112
|
[
"99559e046b635ca8d229f19ca4ad45c2c02a1c01"
] |
[
"hw1/utils.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport torch.nn.functional as F\n\n\n# Model definition\nclass BehaviorClone(nn.Module):\n def __init__(self, input_shape, output_shape):\n super(BehaviorClone, self).__init__()\n self.input_shape = input_shape\n self.output_shape = output_shape\n self.fc1 = nn.Linear(input_shape, input_shape // 2)\n self.fc2 = nn.Linear(input_shape // 2, output_shape)\n self.do = nn.Dropout(p=0.3)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = self.do(x)\n x = self.fc2(x)\n return x\n\n\ndef train_valid(model, loader, optimizer, loss_fn, train):\n\n # train / valid loop\n model.train() if train else model.eval()\n batch_loss = 0\n for batch_idx, (x, y) in enumerate(loader):\n batch_size = x.size(0)\n loss = 0\n if train:\n optimizer.zero_grad()\n\n y_hat = model(x)\n loss = loss_fn(y_hat, y)\n batch_loss += loss.item() / batch_size\n\n if train:\n loss.backward()\n optimizer.step()\n\n batch_loss /= (batch_idx + 1)\n return batch_loss\n\n\ndef save_checkpoint(state, filename):\n torch.save(state, filename)\n\n\ndef init_weights(module):\n for m in module.modules():\n if isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose2d):\n init.xavier_uniform_(m.weight.data)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias, 0.0)\n elif isinstance(m, nn.Sequential):\n for sub_mod in m:\n init_weights(sub_mod)\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.init.constant_",
"torch.nn.Linear",
"torch.nn.init.xavier_uniform_",
"torch.save"
]
] |
natemcintosh/polars
|
[
"73e5a335d93c1ed85ba661fffeb48a479becb810"
] |
[
"py-polars/polars/internals/frame.py"
] |
[
"\"\"\"\nModule containing logic related to eager DataFrames\n\"\"\"\nimport os\nimport sys\nfrom datetime import datetime, timedelta\nfrom io import BytesIO, StringIO\nfrom pathlib import Path\nfrom typing import (\n Any,\n BinaryIO,\n Callable,\n Dict,\n Iterable,\n Iterator,\n List,\n Mapping,\n Optional,\n Sequence,\n TextIO,\n Tuple,\n Type,\n Union,\n overload,\n)\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal # pragma: no cover\n\nimport numpy as np\n\ntry:\n import pyarrow as pa\n import pyarrow.compute\n import pyarrow.parquet\n\n _PYARROW_AVAILABLE = True\nexcept ImportError: # pragma: no cover\n _PYARROW_AVAILABLE = False\n\nfrom polars import internals as pli\nfrom polars.internals.construction import (\n arrow_to_pydf,\n dict_to_pydf,\n numpy_to_pydf,\n pandas_to_pydf,\n sequence_to_pydf,\n series_to_pydf,\n)\n\ntry:\n from polars.polars import PyDataFrame, PySeries\n\n _DOCUMENTING = False\nexcept ImportError: # pragma: no cover\n _DOCUMENTING = True\n\nfrom polars._html import NotebookFormatter\nfrom polars.datatypes import Boolean, DataType, Datetime, UInt32, py_type_to_dtype\nfrom polars.utils import (\n _process_null_values,\n handle_projection_columns,\n is_int_sequence,\n is_str_sequence,\n range_to_slice,\n)\n\ntry:\n import pandas as pd\n\n _PANDAS_AVAILABLE = True\nexcept ImportError: # pragma: no cover\n _PANDAS_AVAILABLE = False\n\n\ndef wrap_df(df: \"PyDataFrame\") -> \"DataFrame\":\n return DataFrame._from_pydf(df)\n\n\ndef _prepare_other_arg(other: Any) -> \"pli.Series\":\n # if not a series create singleton series such that it will broadcast\n if not isinstance(other, pli.Series):\n if isinstance(other, str):\n pass\n elif isinstance(other, Sequence):\n raise ValueError(\"Operation not supported.\")\n\n other = pli.Series(\"\", [other])\n return other\n\n\nclass DataFrame:\n \"\"\"\n A DataFrame is a two-dimensional data structure that represents data as a table\n with rows and columns.\n\n Parameters\n ----------\n data : dict, Sequence, ndarray, Series, or pandas.DataFrame\n Two-dimensional data in various forms. dict must contain Sequences.\n Sequence may contain Series or other Sequences.\n columns : Sequence of str, default None\n Column labels to use for resulting DataFrame. If specified, overrides any\n labels already present in the data. Must match data dimensions.\n orient : {'col', 'row'}, default None\n Whether to interpret two-dimensional data as columns or as rows. If None,\n the orientation is inferred by matching the columns and data dimensions. If\n this does not yield conclusive results, column orientation is used.\n\n Examples\n --------\n Constructing a DataFrame from a dictionary:\n\n >>> data = {\"a\": [1, 2], \"b\": [3, 4]}\n >>> df = pl.DataFrame(data)\n >>> df\n shape: (2, 2)\n ┌─────┬─────┐\n │ a ┆ b │\n │ --- ┆ --- │\n │ i64 ┆ i64 │\n ╞═════╪═════╡\n │ 1 ┆ 3 │\n ├╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2 ┆ 4 │\n └─────┴─────┘\n\n Notice that the dtype is automatically inferred as a polars Int64:\n\n >>> df.dtypes\n [<class 'polars.datatypes.Int64'>, <class 'polars.datatypes.Int64'>]\n\n In order to specify dtypes for your columns, initialize the DataFrame with a list\n of Series instead:\n\n >>> data = [\n ... pl.Series(\"col1\", [1, 2], dtype=pl.Float32),\n ... pl.Series(\"col2\", [3, 4], dtype=pl.Int64),\n ... ]\n >>> df2 = pl.DataFrame(data)\n >>> df2\n shape: (2, 2)\n ┌──────┬──────┐\n │ col1 ┆ col2 │\n │ --- ┆ --- │\n │ f32 ┆ i64 │\n ╞══════╪══════╡\n │ 1 ┆ 3 │\n ├╌╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ 2 ┆ 4 │\n └──────┴──────┘\n\n Constructing a DataFrame from a numpy ndarray, specifying column names:\n\n >>> import numpy as np\n >>> data = np.array([(1, 2), (3, 4)], dtype=np.int64)\n >>> df3 = pl.DataFrame(data, columns=[\"a\", \"b\"], orient=\"col\")\n >>> df3\n shape: (2, 2)\n ┌─────┬─────┐\n │ a ┆ b │\n │ --- ┆ --- │\n │ i64 ┆ i64 │\n ╞═════╪═════╡\n │ 1 ┆ 3 │\n ├╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2 ┆ 4 │\n └─────┴─────┘\n\n Constructing a DataFrame from a list of lists, row orientation inferred:\n\n >>> data = [[1, 2, 3], [4, 5, 6]]\n >>> df4 = pl.DataFrame(data, columns=[\"a\", \"b\", \"c\"])\n >>> df4\n shape: (2, 3)\n ┌─────┬─────┬─────┐\n │ a ┆ b ┆ c │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ i64 │\n ╞═════╪═════╪═════╡\n │ 1 ┆ 2 ┆ 3 │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 4 ┆ 5 ┆ 6 │\n └─────┴─────┴─────┘\n\n \"\"\"\n\n def __init__(\n self,\n data: Optional[\n Union[\n Dict[str, Sequence[Any]],\n Sequence[Any],\n np.ndarray,\n \"pa.Table\",\n \"pd.DataFrame\",\n \"pli.Series\",\n ]\n ] = None,\n columns: Optional[Sequence[str]] = None,\n orient: Optional[str] = None,\n ):\n if data is None:\n self._df = dict_to_pydf({}, columns=columns)\n\n elif isinstance(data, dict):\n self._df = dict_to_pydf(data, columns=columns)\n\n elif isinstance(data, np.ndarray):\n self._df = numpy_to_pydf(data, columns=columns, orient=orient)\n\n elif _PYARROW_AVAILABLE and isinstance(data, pa.Table):\n self._df = arrow_to_pydf(data, columns=columns)\n\n elif isinstance(data, Sequence) and not isinstance(data, str):\n self._df = sequence_to_pydf(data, columns=columns, orient=orient)\n\n elif isinstance(data, pli.Series):\n self._df = series_to_pydf(data, columns=columns)\n\n elif _PANDAS_AVAILABLE and isinstance(data, pd.DataFrame):\n if not _PYARROW_AVAILABLE:\n raise ImportError( # pragma: no cover\n \"'pyarrow' is required for converting a pandas DataFrame to a polars DataFrame.\"\n )\n self._df = pandas_to_pydf(data, columns=columns)\n\n else:\n raise ValueError(\"DataFrame constructor not called properly.\")\n\n @classmethod\n def _from_pydf(cls, py_df: \"PyDataFrame\") -> \"DataFrame\":\n \"\"\"\n Construct Polars DataFrame from FFI PyDataFrame object.\n \"\"\"\n df = cls.__new__(cls)\n df._df = py_df\n return df\n\n @classmethod\n def _from_dicts(cls, data: Sequence[Dict[str, Any]]) -> \"DataFrame\":\n pydf = PyDataFrame.read_dicts(data)\n return DataFrame._from_pydf(pydf)\n\n @classmethod\n def _from_dict(\n cls,\n data: Dict[str, Sequence[Any]],\n columns: Optional[Sequence[str]] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Construct a DataFrame from a dictionary of sequences.\n\n Parameters\n ----------\n data : dict of sequences\n Two-dimensional data represented as a dictionary. dict must contain\n Sequences.\n columns : Sequence of str, default None\n Column labels to use for resulting DataFrame. If specified, overrides any\n labels already present in the data. Must match data dimensions.\n\n Returns\n -------\n DataFrame\n \"\"\"\n return cls._from_pydf(dict_to_pydf(data, columns=columns))\n\n @classmethod\n def _from_records(\n cls,\n data: Union[np.ndarray, Sequence[Sequence[Any]]],\n columns: Optional[Sequence[str]] = None,\n orient: Optional[str] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Construct a DataFrame from a numpy ndarray or sequence of sequences.\n\n Parameters\n ----------\n data : numpy ndarray or Sequence of sequences\n Two-dimensional data represented as numpy ndarray or sequence of sequences.\n columns : Sequence of str, default None\n Column labels to use for resulting DataFrame. Must match data dimensions.\n If not specified, columns will be named `column_0`, `column_1`, etc.\n orient : {'col', 'row'}, default None\n Whether to interpret two-dimensional data as columns or as rows. If None,\n the orientation is inferred by matching the columns and data dimensions. If\n this does not yield conclusive results, column orientation is used.\n\n Returns\n -------\n DataFrame\n \"\"\"\n if isinstance(data, np.ndarray):\n pydf = numpy_to_pydf(data, columns=columns, orient=orient)\n else:\n pydf = sequence_to_pydf(data, columns=columns, orient=orient)\n return cls._from_pydf(pydf)\n\n @classmethod\n def _from_arrow(\n cls,\n data: \"pa.Table\",\n columns: Optional[Sequence[str]] = None,\n rechunk: bool = True,\n ) -> \"DataFrame\":\n \"\"\"\n Construct a DataFrame from an Arrow table.\n\n This operation will be zero copy for the most part. Types that are not\n supported by Polars may be cast to the closest supported type.\n\n Parameters\n ----------\n data : numpy ndarray or Sequence of sequences\n Two-dimensional data represented as Arrow table.\n columns : Sequence of str, default None\n Column labels to use for resulting DataFrame. Must match data dimensions.\n If not specified, existing Array table columns are used, with missing names\n named as `column_0`, `column_1`, etc.\n rechunk : bool, default True\n Make sure that all data is contiguous.\n\n Returns\n -------\n DataFrame\n \"\"\"\n return cls._from_pydf(arrow_to_pydf(data, columns=columns, rechunk=rechunk))\n\n @classmethod\n def _from_pandas(\n cls,\n data: \"pd.DataFrame\",\n columns: Optional[Sequence[str]] = None,\n rechunk: bool = True,\n nan_to_none: bool = True,\n ) -> \"DataFrame\":\n \"\"\"\n Construct a Polars DataFrame from a pandas DataFrame.\n\n Parameters\n ----------\n data : pandas DataFrame\n Two-dimensional data represented as a pandas DataFrame.\n columns : Sequence of str, default None\n Column labels to use for resulting DataFrame. If specified, overrides any\n labels already present in the data. Must match data dimensions.\n rechunk : bool, default True\n Make sure that all data is contiguous.\n nan_to_none : bool, default True\n If data contains NaN values PyArrow will convert the NaN to None\n\n Returns\n -------\n DataFrame\n \"\"\"\n return cls._from_pydf(\n pandas_to_pydf(\n data, columns=columns, rechunk=rechunk, nan_to_none=nan_to_none\n )\n )\n\n @staticmethod\n def read_csv(\n file: Union[str, BinaryIO, bytes],\n has_header: bool = True,\n columns: Optional[Union[List[int], List[str]]] = None,\n sep: str = \",\",\n comment_char: Optional[str] = None,\n quote_char: Optional[str] = r'\"',\n skip_rows: int = 0,\n dtypes: Optional[\n Union[Mapping[str, Type[DataType]], List[Type[DataType]]]\n ] = None,\n null_values: Optional[Union[str, List[str], Dict[str, str]]] = None,\n ignore_errors: bool = False,\n parse_dates: bool = False,\n n_threads: Optional[int] = None,\n infer_schema_length: Optional[int] = 100,\n batch_size: int = 8192,\n n_rows: Optional[int] = None,\n encoding: str = \"utf8\",\n low_memory: bool = False,\n rechunk: bool = True,\n ) -> \"DataFrame\":\n \"\"\"\n Read a CSV file into a Dataframe.\n\n Parameters\n ----------\n file\n Path to a file or file like object.\n has_header\n Indicate if the first row of dataset is a header or not.\n If set to False, column names will be autogenrated in the\n following format: ``column_x``, with ``x`` being an\n enumeration over every column in the dataset starting at 1.\n columns\n Columns to select. Accepts a list of column indices (starting\n at zero) or a list of column names.\n sep\n Character to use as delimiter in the file.\n comment_char\n Character that indicates the start of a comment line, for\n instance ``#``.\n quote_char\n Single byte character used for csv quoting, default = ''.\n Set to None to turn off special handling and escaping of quotes.\n skip_rows\n Start reading after ``skip_rows`` lines.\n dtypes\n Overwrite dtypes during inference.\n null_values\n Values to interpret as null values. You can provide a:\n - ``str``: All values equal to this string will be null.\n - ``List[str]``: A null value per column.\n - ``Dict[str, str]``: A dictionary that maps column name to a\n null value string.\n ignore_errors\n Try to keep reading lines if some lines yield errors.\n First try ``infer_schema_length=0`` to read all columns as\n ``pl.Utf8`` to check which values might cause an issue.\n parse_dates\n Try to automatically parse dates. If this does not succeed,\n the column remains of data type ``pl.Utf8``.\n n_threads\n Number of threads to use in csv parsing.\n Defaults to the number of physical cpu's of your system.\n infer_schema_length\n Maximum number of lines to read to infer schema.\n If set to 0, all columns will be read as ``pl.Utf8``.\n If set to ``None``, a full table scan will be done (slow).\n batch_size\n Number of lines to read into the buffer at once.\n Modify this to change performance.\n n_rows\n Stop reading from CSV file after reading ``n_rows``.\n During multi-threaded parsing, an upper bound of ``n_rows``\n rows cannot be guaranteed.\n encoding\n Allowed encodings: ``utf8`` or ``utf8-lossy``.\n Lossy means that invalid utf8 values are replaced with ``�``\n characters.\n low_memory\n Reduce memory usage at expense of performance.\n rechunk\n Make sure that all columns are contiguous in memory by\n aggregating the chunks into a single array.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n\n >>> df = pl.read_csv(\"file.csv\", sep=\";\", n_rows=25) # doctest: +SKIP\n\n \"\"\"\n self = DataFrame.__new__(DataFrame)\n\n path: Optional[str]\n if isinstance(file, str):\n path = file\n else:\n path = None\n if isinstance(file, BytesIO):\n file = file.getvalue()\n if isinstance(file, StringIO):\n file = file.getvalue().encode()\n\n dtype_list: Optional[List[Tuple[str, Type[DataType]]]] = None\n dtype_slice: Optional[List[Type[DataType]]] = None\n if dtypes is not None:\n if isinstance(dtypes, dict):\n dtype_list = []\n for k, v in dtypes.items():\n dtype_list.append((k, py_type_to_dtype(v)))\n elif isinstance(dtypes, list):\n dtype_slice = dtypes\n else:\n raise ValueError(\"dtype arg should be list or dict\")\n\n processed_null_values = _process_null_values(null_values)\n\n if isinstance(file, str) and \"*\" in file:\n dtypes_dict = None\n if dtype_list is not None:\n dtypes_dict = {name: dt for (name, dt) in dtype_list}\n if dtype_slice is not None:\n raise ValueError(\n \"cannot use glob patterns and unamed dtypes as `dtypes` argument; Use dtypes: Mapping[str, Type[DataType]\"\n )\n from polars import scan_csv\n\n scan = scan_csv(\n file,\n has_header=has_header,\n sep=sep,\n comment_char=comment_char,\n quote_char=quote_char,\n skip_rows=skip_rows,\n dtypes=dtypes_dict,\n null_values=null_values,\n ignore_errors=ignore_errors,\n infer_schema_length=infer_schema_length,\n n_rows=n_rows,\n low_memory=low_memory,\n rechunk=rechunk,\n )\n if columns is None:\n return scan.collect(no_optimization=True)\n elif is_str_sequence(columns, False):\n return scan.select(columns).collect()\n else:\n raise ValueError(\n \"cannot use glob patterns and integer based projection as `columns` argument; Use columns: List[str]\"\n )\n\n projection, columns = handle_projection_columns(columns)\n\n self._df = PyDataFrame.read_csv(\n file,\n infer_schema_length,\n batch_size,\n has_header,\n ignore_errors,\n n_rows,\n skip_rows,\n projection,\n sep,\n rechunk,\n columns,\n encoding,\n n_threads,\n path,\n dtype_list,\n dtype_slice,\n low_memory,\n comment_char,\n quote_char,\n processed_null_values,\n parse_dates,\n )\n return self\n\n @staticmethod\n def read_parquet(\n file: Union[str, BinaryIO],\n columns: Optional[Union[List[int], List[str]]] = None,\n n_rows: Optional[int] = None,\n parallel: bool = True,\n ) -> \"DataFrame\":\n \"\"\"\n Read into a DataFrame from a parquet file.\n\n Parameters\n ----------\n file\n Path to a file or a file like object. Any valid filepath can be used.\n columns\n Columns to select. Accepts a list of column indices (starting at zero) or a list of column names.\n n_rows\n Stop reading from parquet file after reading ``n_rows``.\n parallel\n Read the parquet file in parallel. The single threaded reader consumes less memory.\n \"\"\"\n if isinstance(file, str) and \"*\" in file:\n from polars import scan_parquet\n\n scan = scan_parquet(file, n_rows=n_rows, rechunk=True, parallel=parallel)\n\n if columns is None:\n return scan.collect(no_optimization=True)\n elif is_str_sequence(columns, False):\n return scan.select(columns).collect()\n else:\n raise ValueError(\n \"cannot use glob patterns and integer based projection as `columns` argument; Use columns: List[str]\"\n )\n\n projection, columns = handle_projection_columns(columns)\n self = DataFrame.__new__(DataFrame)\n self._df = PyDataFrame.read_parquet(file, columns, projection, n_rows, parallel)\n return self\n\n @staticmethod\n def read_ipc(\n file: Union[str, BinaryIO],\n columns: Optional[Union[List[int], List[str]]] = None,\n n_rows: Optional[int] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Read into a DataFrame from Arrow IPC stream format. This is also called the Feather (v2) format.\n\n Parameters\n ----------\n file\n Path to a file or a file like object.\n columns\n Columns to select. Accepts a list of column indices (starting at zero) or a list of column names.\n n_rows\n Stop reading from IPC file after reading ``n_rows``.\n\n Returns\n -------\n DataFrame\n \"\"\"\n\n if isinstance(file, str) and \"*\" in file:\n from polars import scan_ipc\n\n scan = scan_ipc(file, n_rows=n_rows, rechunk=True)\n if columns is None:\n scan.collect()\n elif is_str_sequence(columns, False):\n scan.select(columns).collect()\n else:\n raise ValueError(\n \"cannot use glob patterns and integer based projection as `columns` argument; Use columns: List[str]\"\n )\n\n projection, columns = handle_projection_columns(columns)\n self = DataFrame.__new__(DataFrame)\n self._df = PyDataFrame.read_ipc(file, columns, projection, n_rows)\n return self\n\n @staticmethod\n def read_json(file: Union[str, BytesIO]) -> \"DataFrame\":\n \"\"\"\n Read into a DataFrame from JSON format.\n\n Parameters\n ----------\n file\n Path to a file or a file like object.\n \"\"\"\n if not isinstance(file, str):\n file = file.read().decode(\"utf8\")\n self = DataFrame.__new__(DataFrame)\n self._df = PyDataFrame.read_json(file)\n return self\n\n def to_arrow(self) -> \"pa.Table\":\n \"\"\"\n Collect the underlying arrow arrays in an Arrow Table.\n This operation is mostly zero copy.\n\n Data types that do copy:\n - CategoricalType\n \"\"\"\n if not _PYARROW_AVAILABLE:\n raise ImportError( # pragma: no cover\n \"'pyarrow' is required for converting a polars DataFrame to an Arrow Table.\"\n )\n record_batches = self._df.to_arrow()\n return pa.Table.from_batches(record_batches)\n\n @overload\n def to_dict(self, as_series: Literal[True] = ...) -> Dict[str, \"pli.Series\"]:\n ...\n\n @overload\n def to_dict(self, as_series: Literal[False]) -> Dict[str, List[Any]]:\n ...\n\n @overload\n def to_dict(\n self, as_series: bool = True\n ) -> Union[Dict[str, \"pli.Series\"], Dict[str, List[Any]]]:\n ...\n\n def to_dict(\n self, as_series: bool = True\n ) -> Union[Dict[str, \"pli.Series\"], Dict[str, List[Any]]]:\n \"\"\"\n Convert DataFrame to a dictionary mapping column name to values.\n\n Parameters\n ----------\n as_series\n True -> Values are series\n False -> Values are List[Any]\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"A\": [1, 2, 3, 4, 5],\n ... \"fruits\": [\"banana\", \"banana\", \"apple\", \"apple\", \"banana\"],\n ... \"B\": [5, 4, 3, 2, 1],\n ... \"cars\": [\"beetle\", \"audi\", \"beetle\", \"beetle\", \"beetle\"],\n ... \"optional\": [28, 300, None, 2, -30],\n ... }\n ... )\n >>> df\n shape: (5, 5)\n ┌─────┬────────┬─────┬────────┬──────────┐\n │ A ┆ fruits ┆ B ┆ cars ┆ optional │\n │ --- ┆ --- ┆ --- ┆ --- ┆ --- │\n │ i64 ┆ str ┆ i64 ┆ str ┆ i64 │\n ╞═════╪════════╪═════╪════════╪══════════╡\n │ 1 ┆ banana ┆ 5 ┆ beetle ┆ 28 │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤\n │ 2 ┆ banana ┆ 4 ┆ audi ┆ 300 │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤\n │ 3 ┆ apple ┆ 3 ┆ beetle ┆ null │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤\n │ 4 ┆ apple ┆ 2 ┆ beetle ┆ 2 │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤\n │ 5 ┆ banana ┆ 1 ┆ beetle ┆ -30 │\n └─────┴────────┴─────┴────────┴──────────┘\n >>> df.to_dict(as_series=False)\n {'A': [1, 2, 3, 4, 5],\n 'fruits': ['banana', 'banana', 'apple', 'apple', 'banana'],\n 'B': [5, 4, 3, 2, 1],\n 'cars': ['beetle', 'audi', 'beetle', 'beetle', 'beetle'],\n 'optional': [28, 300, None, 2, -30]}\n >>> df.to_dict(as_series=True)\n {'A': shape: (5,)\n Series: 'A' [i64]\n [\n 1\n 2\n 3\n 4\n 5\n ], 'fruits': shape: (5,)\n Series: 'fruits' [str]\n [\n \"banana\"\n \"banana\"\n \"apple\"\n \"apple\"\n \"banana\"\n ], 'B': shape: (5,)\n Series: 'B' [i64]\n [\n 5\n 4\n 3\n 2\n 1\n ], 'cars': shape: (5,)\n Series: 'cars' [str]\n [\n \"beetle\"\n \"audi\"\n \"beetle\"\n \"beetle\"\n \"beetle\"\n ], 'optional': shape: (5,)\n Series: 'optional' [i64]\n [\n 28\n 300\n null\n 2\n -30\n ]}\n\n \"\"\"\n if as_series:\n return {s.name: s for s in self}\n else:\n return {s.name: s.to_list() for s in self}\n\n @overload\n def to_json(\n self,\n file: Optional[Union[BytesIO, str, Path]] = ...,\n pretty: bool = ...,\n row_oriented: bool = ...,\n json_lines: bool = ...,\n *,\n to_string: Literal[True],\n ) -> str:\n ...\n\n @overload\n def to_json(\n self,\n file: Optional[Union[BytesIO, str, Path]] = ...,\n pretty: bool = ...,\n row_oriented: bool = ...,\n json_lines: bool = ...,\n *,\n to_string: Literal[False] = ...,\n ) -> None:\n ...\n\n @overload\n def to_json(\n self,\n file: Optional[Union[BytesIO, str, Path]] = ...,\n pretty: bool = ...,\n row_oriented: bool = ...,\n json_lines: bool = ...,\n *,\n to_string: bool = ...,\n ) -> Optional[str]:\n ...\n\n def to_json(\n self,\n file: Optional[Union[BytesIO, str, Path]] = None,\n pretty: bool = False,\n row_oriented: bool = False,\n json_lines: bool = False,\n *,\n to_string: bool = False,\n ) -> Optional[str]:\n \"\"\"\n Serialize to JSON representation.\n\n Parameters\n ----------\n file\n Write to this file instead of returning an string.\n pretty\n Pretty serialize json.\n row_oriented\n Write to row oriented json. This is slower, but more common.\n json_lines\n Write to Json Lines format\n to_string\n Ignore file argument and return a string.\n \"\"\"\n if to_string or file is None:\n file = BytesIO()\n self._df.to_json(file, pretty, row_oriented, json_lines)\n file.seek(0)\n return file.read().decode(\"utf8\")\n else:\n self._df.to_json(file, pretty, row_oriented, json_lines)\n return None\n\n def to_pandas(\n self, *args: Any, date_as_object: bool = False, **kwargs: Any\n ) -> \"pd.DataFrame\": # noqa: F821\n \"\"\"\n Cast to a Pandas DataFrame. This requires that Pandas is installed.\n This operation clones data.\n\n Parameters\n ----------\n args\n Arguments will be sent to pyarrow.Table.to_pandas.\n date_as_object\n Cast dates to objects. If False, convert to datetime64[ns] dtype.\n kwargs\n Arguments will be sent to pyarrow.Table.to_pandas.\n\n Examples\n --------\n\n >>> import pandas\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> pandas_df = df.to_pandas()\n >>> type(pandas_df)\n <class 'pandas.core.frame.DataFrame'>\n\n \"\"\"\n return self.to_arrow().to_pandas(*args, date_as_object=date_as_object, **kwargs)\n\n def to_csv(\n self,\n file: Optional[Union[TextIO, BytesIO, str, Path]] = None,\n has_header: bool = True,\n sep: str = \",\",\n ) -> Optional[str]:\n \"\"\"\n Write Dataframe to comma-separated values file (csv).\n\n Parameters\n ----------\n file\n File path to which the file should be written.\n has_header\n Whether or not to include header in the CSV output.\n sep\n Separate CSV fields with this symbol.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3, 4, 5],\n ... \"bar\": [6, 7, 8, 9, 10],\n ... \"ham\": [\"a\", \"b\", \"c\", \"d\", \"e\"],\n ... }\n ... )\n >>> df.to_csv(\"new_file.csv\", sep=\",\")\n\n \"\"\"\n if file is None:\n buffer = BytesIO()\n self._df.to_csv(buffer, has_header, ord(sep))\n return str(buffer.getvalue(), encoding=\"utf-8\")\n\n if isinstance(file, Path):\n file = str(file)\n\n self._df.to_csv(file, has_header, ord(sep))\n return None\n\n def to_ipc(\n self,\n file: Union[BinaryIO, BytesIO, str, Path],\n compression: Optional[Literal[\"uncompressed\", \"lz4\", \"zstd\"]] = \"uncompressed\",\n ) -> None:\n \"\"\"\n Write to Arrow IPC binary stream, or a feather file.\n\n Parameters\n ----------\n file\n File path to which the file should be written.\n compression\n Compression method. Choose one of:\n - \"uncompressed\"\n - \"lz4\"\n - \"zstd\"\n \"\"\"\n if compression is None:\n compression = \"uncompressed\"\n if isinstance(file, Path):\n file = str(file)\n\n self._df.to_ipc(file, compression)\n\n def to_dicts(self) -> List[Dict[str, Any]]:\n pydf = self._df\n names = self.columns\n\n return [\n {k: v for k, v in zip(names, pydf.row_tuple(i))}\n for i in range(0, self.height)\n ]\n\n def transpose(\n self,\n include_header: bool = False,\n header_name: str = \"column\",\n column_names: Optional[Union[Iterator[str], Sequence[str]]] = None,\n ) -> \"pli.DataFrame\":\n \"\"\"\n Transpose a DataFrame over the diagonal.\n\n Parameters\n ----------\n include_header:\n If set, the column names will be added as first column.\n header_name:\n If `include_header` is set, this determines the name of the column that will be inserted\n column_names:\n Optional generator/iterator that yields column names. Will be used to replace the columns in the DataFrame.\n\n Notes\n -----\n This is a very expensive operation. Perhaps you can do it differently.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n\n >>> df = pl.DataFrame({\"a\": [1, 2, 3], \"b\": [1, 2, 3]})\n >>> df.transpose(include_header=True)\n shape: (2, 4)\n ┌────────┬──────────┬──────────┬──────────┐\n │ column ┆ column_0 ┆ column_1 ┆ column_2 │\n │ --- ┆ --- ┆ --- ┆ --- │\n │ str ┆ i64 ┆ i64 ┆ i64 │\n ╞════════╪══════════╪══════════╪══════════╡\n │ a ┆ 1 ┆ 2 ┆ 3 │\n ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤\n │ b ┆ 1 ┆ 2 ┆ 3 │\n └────────┴──────────┴──────────┴──────────┘\n\n # replace the auto generated column names with a list\n\n >>> df.transpose(include_header=False, column_names=[\"a\", \"b\", \"c\"])\n shape: (2, 3)\n ┌─────┬─────┬─────┐\n │ a ┆ b ┆ c │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ i64 │\n ╞═════╪═════╪═════╡\n │ 1 ┆ 2 ┆ 3 │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 1 ┆ 2 ┆ 3 │\n └─────┴─────┴─────┘\n\n Include the header as a separate column\n\n >>> df.transpose(\n ... include_header=True, header_name=\"foo\", column_names=[\"a\", \"b\", \"c\"]\n ... )\n shape: (2, 4)\n ┌─────┬─────┬─────┬─────┐\n │ foo ┆ a ┆ b ┆ c │\n │ --- ┆ --- ┆ --- ┆ --- │\n │ str ┆ i64 ┆ i64 ┆ i64 │\n ╞═════╪═════╪═════╪═════╡\n │ a ┆ 1 ┆ 2 ┆ 3 │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ b ┆ 1 ┆ 2 ┆ 3 │\n └─────┴─────┴─────┴─────┘\n\n Replace the auto generated column with column names from a generator function\n\n >>> def name_generator():\n ... base_name = \"my_column_\"\n ... count = 0\n ... while True:\n ... yield f\"{base_name}{count}\"\n ... count += 1\n ...\n >>> df.transpose(include_header=False, column_names=name_generator())\n shape: (2, 3)\n ┌─────────────┬─────────────┬─────────────┐\n │ my_column_0 ┆ my_column_1 ┆ my_column_2 │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ i64 │\n ╞═════════════╪═════════════╪═════════════╡\n │ 1 ┆ 2 ┆ 3 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 1 ┆ 2 ┆ 3 │\n └─────────────┴─────────────┴─────────────┘\n\n \"\"\"\n df = wrap_df(self._df.transpose(include_header, header_name))\n if column_names is not None:\n names = []\n n = df.width\n if include_header:\n names.append(header_name)\n n -= 1\n\n column_names = iter(column_names)\n for _ in range(n):\n names.append(next(column_names))\n df.columns = names\n return df\n\n def to_parquet(\n self,\n file: Union[str, Path, BytesIO],\n compression: Optional[\n Union[\n Literal[\n \"uncompressed\", \"snappy\", \"gzip\", \"lzo\", \"brotli\", \"lz4\", \"zstd\"\n ],\n str,\n ]\n ] = \"snappy\",\n use_pyarrow: bool = False,\n **kwargs: Any,\n ) -> None:\n \"\"\"\n Write the DataFrame disk in parquet format.\n\n Parameters\n ----------\n file\n File path to which the file should be written.\n compression\n Compression method. Choose one of:\n - \"uncompressed\" (not supported by pyarrow)\n - \"snappy\"\n - \"gzip\"\n - \"lzo\"\n - \"brotli\"\n - \"lz4\"\n - \"zstd\"\n use_pyarrow\n Use C++ parquet implementation vs rust parquet implementation.\n At the moment C++ supports more features.\n\n **kwargs are passed to pyarrow.parquet.write_table\n \"\"\"\n if compression is None:\n compression = \"uncompressed\"\n if isinstance(file, Path):\n file = str(file)\n\n if use_pyarrow:\n if not _PYARROW_AVAILABLE:\n raise ImportError( # pragma: no cover\n \"'pyarrow' is required when using 'to_parquet(..., use_pyarrow=True)'.\"\n )\n\n tbl = self.to_arrow()\n\n data = {}\n\n for i, column in enumerate(tbl):\n # extract the name before casting\n if column._name is None:\n name = f\"column_{i}\"\n else:\n name = column._name\n\n data[name] = column\n tbl = pa.table(data)\n\n pa.parquet.write_table(\n table=tbl, where=file, compression=compression, **kwargs\n )\n else:\n self._df.to_parquet(file, compression)\n\n def to_numpy(self) -> np.ndarray:\n \"\"\"\n Convert DataFrame to a 2d numpy array.\n This operation clones data.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\"foo\": [1, 2, 3], \"bar\": [6, 7, 8], \"ham\": [\"a\", \"b\", \"c\"]}\n ... )\n >>> numpy_array = df.to_numpy()\n >>> type(numpy_array)\n <class 'numpy.ndarray'>\n\n \"\"\"\n return np.vstack([self.to_series(i).to_numpy() for i in range(self.width)]).T\n\n def __getstate__(self): # type: ignore\n return self.get_columns()\n\n def __setstate__(self, state): # type: ignore\n self._df = DataFrame(state)._df\n\n def __mul__(\n self, other: Union[\"DataFrame\", \"pli.Series\", int, float, bool]\n ) -> \"DataFrame\":\n if isinstance(other, DataFrame):\n return wrap_df(self._df.mul_df(other._df))\n\n other = _prepare_other_arg(other)\n return wrap_df(self._df.mul(other._s))\n\n def __truediv__(\n self, other: Union[\"DataFrame\", \"pli.Series\", int, float, bool]\n ) -> \"DataFrame\":\n if isinstance(other, DataFrame):\n return wrap_df(self._df.div_df(other._df))\n\n other = _prepare_other_arg(other)\n return wrap_df(self._df.div(other._s))\n\n def __add__(\n self, other: Union[\"DataFrame\", \"pli.Series\", int, float, bool, str]\n ) -> \"DataFrame\":\n if isinstance(other, DataFrame):\n return wrap_df(self._df.add_df(other._df))\n other = _prepare_other_arg(other)\n return wrap_df(self._df.add(other._s))\n\n def __sub__(\n self, other: Union[\"DataFrame\", \"pli.Series\", int, float, bool]\n ) -> \"DataFrame\":\n if isinstance(other, DataFrame):\n return wrap_df(self._df.sub_df(other._df))\n other = _prepare_other_arg(other)\n return wrap_df(self._df.sub(other._s))\n\n def __mod__(\n self, other: Union[\"DataFrame\", \"pli.Series\", int, float, bool]\n ) -> \"DataFrame\":\n if isinstance(other, DataFrame):\n return wrap_df(self._df.rem_df(other._df))\n other = _prepare_other_arg(other)\n return wrap_df(self._df.rem(other._s))\n\n def __str__(self) -> str:\n return self._df.as_str()\n\n def __repr__(self) -> str:\n return self.__str__()\n\n def __getattr__(self, item: Any) -> \"PySeries\":\n \"\"\"\n Access columns as attribute.\n \"\"\"\n try:\n return pli.wrap_s(self._df.column(item))\n except RuntimeError:\n raise AttributeError(f\"{item} not found\")\n\n def __iter__(self) -> Iterator[Any]:\n return self.get_columns().__iter__()\n\n def find_idx_by_name(self, name: str) -> int:\n \"\"\"\n Find the index of a column by name.\n\n Parameters\n ----------\n name\n Name of the column to find.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\"foo\": [1, 2, 3], \"bar\": [6, 7, 8], \"ham\": [\"a\", \"b\", \"c\"]}\n ... )\n >>> df.find_idx_by_name(\"ham\")\n 2\n\n \"\"\"\n return self._df.find_idx_by_name(name)\n\n def _pos_idx(self, idx: int, dim: int) -> int:\n if idx >= 0:\n return idx\n else:\n return self.shape[dim] + idx\n\n # __getitem__() mostly returns a dataframe. The major exception is when a string is passed in. Note that there are\n # more subtle cases possible where a non-string value leads to a Series.\n @overload\n def __getitem__(self, item: str) -> \"pli.Series\":\n ...\n\n @overload\n def __getitem__(\n self,\n item: Union[\n int, range, slice, np.ndarray, \"pli.Expr\", \"pli.Series\", List, tuple\n ],\n ) -> \"DataFrame\":\n ...\n\n def __getitem__(\n self,\n item: Union[\n str, int, range, slice, np.ndarray, \"pli.Expr\", \"pli.Series\", List, tuple\n ],\n ) -> Union[\"DataFrame\", \"pli.Series\"]:\n \"\"\"\n Does quite a lot. Read the comments.\n \"\"\"\n if isinstance(item, pli.Expr):\n return self.select(item)\n # select rows and columns at once\n # every 2d selection, i.e. tuple is row column order, just like numpy\n if isinstance(item, tuple) and len(item) == 2:\n row_selection, col_selection = item\n\n # df[:, unknown]\n if isinstance(row_selection, slice):\n\n # multiple slices\n # df[:, :]\n if isinstance(col_selection, slice):\n # slice can be\n # by index\n # [1:8]\n # or by column name\n # [\"foo\":\"bar\"]\n # first we make sure that the slice is by index\n start = col_selection.start\n stop = col_selection.stop\n if isinstance(col_selection.start, str):\n start = self.find_idx_by_name(col_selection.start)\n if isinstance(col_selection.stop, str):\n stop = self.find_idx_by_name(col_selection.stop) + 1\n\n col_selection = slice(start, stop, col_selection.step)\n\n df = self.__getitem__(self.columns[col_selection])\n return df[row_selection]\n\n # slice and boolean mask\n # df[:2, [True, False, True]]\n if isinstance(col_selection, (Sequence, pli.Series)):\n if (\n isinstance(col_selection[0], bool)\n or isinstance(col_selection, pli.Series)\n and col_selection.dtype() == Boolean\n ):\n df = self.__getitem__(row_selection)\n select = []\n for col, valid in zip(df.columns, col_selection):\n if valid:\n select.append(col)\n return df.select(select)\n\n # single slice\n # df[:, unknown]\n series = self.__getitem__(col_selection)\n # s[:]\n pli.wrap_s(series[row_selection])\n\n # df[2, :] (select row as df)\n if isinstance(row_selection, int):\n if isinstance(col_selection, (slice, list, np.ndarray)):\n df = self[:, col_selection]\n return df.slice(row_selection, 1)\n # df[2, \"a\"]\n if isinstance(col_selection, str):\n return self[col_selection][row_selection]\n\n # column selection can be \"a\" and [\"a\", \"b\"]\n if isinstance(col_selection, str):\n col_selection = [col_selection]\n\n # df[:, 1]\n if isinstance(col_selection, int):\n series = self.to_series(col_selection)\n return series[row_selection]\n\n if isinstance(col_selection, list):\n # df[:, [1, 2]]\n # select by column indexes\n if isinstance(col_selection[0], int):\n series_list = [self.to_series(i) for i in col_selection]\n df = DataFrame(series_list)\n return df[row_selection]\n df = self.__getitem__(col_selection)\n return df.__getitem__(row_selection)\n\n # select single column\n # df[\"foo\"]\n if isinstance(item, str):\n return pli.wrap_s(self._df.column(item))\n\n # df[idx]\n if isinstance(item, int):\n return self.slice(self._pos_idx(item, dim=0), 1)\n\n # df[range(n)]\n if isinstance(item, range):\n return self[range_to_slice(item)]\n\n # df[:]\n if isinstance(item, slice):\n # special case df[::-1]\n if item.start is None and item.stop is None and item.step == -1:\n return self.select(pli.col(\"*\").reverse())\n\n if getattr(item, \"end\", False):\n raise ValueError(\"A slice with steps larger than 1 is not supported.\")\n if item.start is None:\n start = 0\n else:\n start = item.start\n if item.stop is None:\n stop = self.height\n else:\n stop = item.stop\n\n length = stop - start\n if item.step is None:\n # df[start:stop]\n return self.slice(start, length)\n else:\n # df[start:stop:step]\n return self.select(\n pli.col(\"*\").slice(start, length).take_every(item.step)\n )\n\n # select rows by numpy mask or index\n # df[[1, 2, 3]]\n # df[[true, false, true]]\n if isinstance(item, np.ndarray):\n if item.dtype == int:\n return wrap_df(self._df.take(item))\n if isinstance(item[0], str):\n return wrap_df(self._df.select(item))\n if item.dtype == bool:\n return wrap_df(self._df.filter(pli.Series(\"\", item).inner()))\n\n if isinstance(item, Sequence):\n if isinstance(item[0], str):\n # select multiple columns\n # df[[\"foo\", \"bar\"]]\n return wrap_df(self._df.select(item))\n elif isinstance(item[0], pli.Expr):\n return self.select(item)\n elif type(item[0]) == bool:\n item = pli.Series(\"\", item) # fall through to next if isinstance\n elif is_int_sequence(item):\n return wrap_df(self._df.take([self._pos_idx(i, dim=0) for i in item]))\n\n if isinstance(item, pli.Series):\n dtype = item.dtype\n if dtype == Boolean:\n return wrap_df(self._df.filter(item.inner()))\n if dtype == UInt32:\n return wrap_df(self._df.take_with_series(item.inner()))\n\n # if no data has been returned, the operation is not supported\n raise NotImplementedError\n\n def __setitem__(\n self, key: Union[str, List, Tuple[Any, Union[str, int]]], value: Any\n ) -> None:\n # df[\"foo\"] = series\n if isinstance(key, str):\n try:\n self.replace(key, pli.Series(key, value))\n except Exception:\n self.hstack([pli.Series(key, value)], in_place=True)\n # df[[\"C\", \"D\"]]\n elif isinstance(key, list):\n value = np.array(value)\n if len(value.shape) != 2:\n raise ValueError(\"can only set multiple columns with 2D matrix\")\n if value.shape[1] != len(key):\n raise ValueError(\n \"matrix columns should be equal to list use to determine column names\"\n )\n for (i, name) in enumerate(key):\n self[name] = value[:, i]\n\n # df[a, b]\n elif isinstance(key, tuple):\n row_selection, col_selection = key\n\n # get series column selection\n if isinstance(col_selection, str):\n s = self.__getitem__(col_selection)\n elif isinstance(col_selection, int):\n s = self[:, col_selection] # type: ignore\n else:\n raise ValueError(f\"column selection not understood: {col_selection}\")\n\n # dispatch to __setitem__ of Series to do modification\n s[row_selection] = value\n\n # now find the location to place series\n # df[idx]\n if isinstance(col_selection, int):\n self.replace_at_idx(col_selection, s)\n # df[\"foo\"]\n elif isinstance(col_selection, str):\n self.replace(col_selection, s)\n else:\n raise NotImplementedError\n\n def __len__(self) -> int:\n return self.height\n\n def _repr_html_(self) -> str:\n \"\"\"\n Used by jupyter notebooks to get a html table.\n\n Output rows and columns can be modified by setting the following ENVIRONMENT variables:\n\n * POLARS_FMT_MAX_COLS: set the number of columns\n * POLARS_FMT_MAX_ROWS: set the number of rows\n \"\"\"\n max_cols = int(os.environ.get(\"POLARS_FMT_MAX_COLS\", default=75))\n max_rows = int(os.environ.get(\"POLARS_FMT_MAX_ROWS\", default=25))\n return \"\\n\".join(NotebookFormatter(self, max_cols, max_rows).render())\n\n def to_series(self, index: int = 0) -> \"pli.Series\":\n \"\"\"\n Select column as Series at index location.\n\n Parameters\n ----------\n index\n Location of selection.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.to_series(1)\n shape: (3,)\n Series: 'bar' [i64]\n [\n 6\n 7\n 8\n ]\n\n \"\"\"\n return pli.wrap_s(self._df.select_at_idx(index))\n\n def rename(self, mapping: Dict[str, str]) -> \"DataFrame\":\n \"\"\"\n Rename column names.\n\n Parameters\n ----------\n mapping\n Key value pairs that map from old name to new name.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\"foo\": [1, 2, 3], \"bar\": [6, 7, 8], \"ham\": [\"a\", \"b\", \"c\"]}\n ... )\n >>> df.rename({\"foo\": \"apple\"})\n shape: (3, 3)\n ┌───────┬─────┬─────┐\n │ apple ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═══════╪═════╪═════╡\n │ 1 ┆ 6 ┆ a │\n ├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2 ┆ 7 ┆ b │\n ├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 3 ┆ 8 ┆ c │\n └───────┴─────┴─────┘\n\n \"\"\"\n df = self.clone()\n for k, v in mapping.items():\n df._df.rename(k, v)\n return df\n\n def insert_at_idx(self, index: int, series: \"pli.Series\") -> None:\n \"\"\"\n Insert a Series at a certain column index. This operation is in place.\n\n Parameters\n ----------\n index\n Column to insert the new `Series` column.\n series\n `Series` to insert.\n \"\"\"\n self._df.insert_at_idx(index, series._s)\n\n def filter(self, predicate: \"pli.Expr\") -> \"DataFrame\":\n \"\"\"\n Filter the rows in the DataFrame based on a predicate expression.\n\n Parameters\n ----------\n predicate\n Expression that evaluates to a boolean Series.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n\n Filter on one condition:\n\n >>> df.filter(pl.col(\"foo\") < 3)\n shape: (2, 3)\n ┌─────┬─────┬─────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═════╪═════╪═════╡\n │ 1 ┆ 6 ┆ a │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2 ┆ 7 ┆ b │\n └─────┴─────┴─────┘\n\n Filter on multiple conditions:\n\n >>> df.filter((pl.col(\"foo\") < 3) & (pl.col(\"ham\") == \"a\"))\n shape: (1, 3)\n ┌─────┬─────┬─────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═════╪═════╪═════╡\n │ 1 ┆ 6 ┆ a │\n └─────┴─────┴─────┘\n\n \"\"\"\n return (\n self.lazy()\n .filter(predicate)\n .collect(no_optimization=True, string_cache=False)\n )\n\n @property\n def shape(self) -> Tuple[int, int]:\n \"\"\"\n Get the shape of the DataFrame.\n\n Examples\n --------\n >>> df = pl.DataFrame({\"foo\": [1, 2, 3, 4, 5]})\n >>> df.shape\n (5, 1)\n\n \"\"\"\n return self._df.shape()\n\n @property\n def height(self) -> int:\n \"\"\"\n Get the height of the DataFrame.\n\n Examples\n --------\n\n >>> df = pl.DataFrame({\"foo\": [1, 2, 3, 4, 5]})\n >>> df.height\n 5\n\n \"\"\"\n return self._df.height()\n\n @property\n def width(self) -> int:\n \"\"\"\n Get the width of the DataFrame.\n\n Examples\n --------\n\n >>> df = pl.DataFrame({\"foo\": [1, 2, 3, 4, 5]})\n >>> df.width\n 1\n\n \"\"\"\n return self._df.width()\n\n @property\n def columns(self) -> List[str]:\n \"\"\"\n Get or set column names.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.columns\n ['foo', 'bar', 'ham']\n\n Set column names:\n\n >>> df.columns = [\"apple\", \"banana\", \"orange\"]\n >>> df\n shape: (3, 3)\n ┌───────┬────────┬────────┐\n │ apple ┆ banana ┆ orange │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═══════╪════════╪════════╡\n │ 1 ┆ 6 ┆ a │\n ├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 2 ┆ 7 ┆ b │\n ├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 3 ┆ 8 ┆ c │\n └───────┴────────┴────────┘\n\n \"\"\"\n return self._df.columns()\n\n @columns.setter\n def columns(self, columns: Sequence[str]) -> None:\n \"\"\"\n Change the column names of the `DataFrame`.\n\n Parameters\n ----------\n columns\n A list with new names for the `DataFrame`.\n The length of the list should be equal to the width of the `DataFrame`.\n \"\"\"\n self._df.set_column_names(columns)\n\n @property\n def dtypes(self) -> List[Type[DataType]]:\n \"\"\"\n Get dtypes of columns in DataFrame. Dtypes can also be found in column headers when printing the DataFrame.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6.0, 7.0, 8.0],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.dtypes\n [<class 'polars.datatypes.Int64'>, <class 'polars.datatypes.Float64'>, <class 'polars.datatypes.Utf8'>]\n >>> df\n shape: (3, 3)\n ┌─────┬─────┬─────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ f64 ┆ str │\n ╞═════╪═════╪═════╡\n │ 1 ┆ 6 ┆ a │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2 ┆ 7 ┆ b │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 3 ┆ 8 ┆ c │\n └─────┴─────┴─────┘\n\n See Also\n --------\n schema : Return a dict of [column name, dtype]\n \"\"\"\n return self._df.dtypes()\n\n @property\n def schema(self) -> Dict[str, Type[DataType]]:\n \"\"\"\n Get a dict[column name, DataType]\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6.0, 7.0, 8.0],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.schema\n {'foo': <class 'polars.datatypes.Int64'>, 'bar': <class 'polars.datatypes.Float64'>, 'ham': <class 'polars.datatypes.Utf8'>}\n\n \"\"\"\n return {c: self[c].dtype for c in self.columns}\n\n def describe(self) -> \"DataFrame\":\n \"\"\"\n Summary statistics for a DataFrame. Only summarizes numeric datatypes at the moment and returns nulls for non numeric datatypes.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"a\": [1.0, 2.8, 3.0],\n ... \"b\": [4, 5, 6],\n ... \"c\": [True, False, True],\n ... }\n ... )\n >>> df.describe()\n shape: (5, 4)\n ┌──────────┬────────────────────┬─────┬──────┐\n │ describe ┆ a ┆ b ┆ c │\n │ --- ┆ --- ┆ --- ┆ --- │\n │ str ┆ f64 ┆ f64 ┆ f64 │\n ╞══════════╪════════════════════╪═════╪══════╡\n │ mean ┆ 2.2666666666666666 ┆ 5 ┆ null │\n ├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ std ┆ 1.1015141094572205 ┆ 1 ┆ null │\n ├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ min ┆ 1 ┆ 4 ┆ 0.0 │\n ├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ max ┆ 3 ┆ 6 ┆ 1 │\n ├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ median ┆ 2.8 ┆ 5 ┆ null │\n └──────────┴────────────────────┴─────┴──────┘\n\n \"\"\"\n\n def describe_cast(self: \"DataFrame\") -> \"DataFrame\":\n columns = []\n for s in self:\n if s.is_numeric() or s.is_boolean():\n columns.append(s.cast(float))\n else:\n columns.append(s)\n return DataFrame(columns)\n\n summary = pli.concat(\n [\n describe_cast(self.mean()),\n describe_cast(self.std()),\n describe_cast(self.min()),\n describe_cast(self.max()),\n describe_cast(self.median()),\n ]\n )\n summary.insert_at_idx(\n 0, pli.Series(\"describe\", [\"mean\", \"std\", \"min\", \"max\", \"median\"])\n )\n return summary\n\n def replace_at_idx(self, index: int, series: \"pli.Series\") -> None:\n \"\"\"\n Replace a column at an index location.\n\n Parameters\n ----------\n index\n Column index.\n series\n Series that will replace the column.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> x = pl.Series(\"apple\", [10, 20, 30])\n >>> df.replace_at_idx(0, x)\n >>> df\n shape: (3, 3)\n ┌───────┬─────┬─────┐\n │ apple ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═══════╪═════╪═════╡\n │ 10 ┆ 6 ┆ a │\n ├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 20 ┆ 7 ┆ b │\n ├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 30 ┆ 8 ┆ c │\n └───────┴─────┴─────┘\n\n \"\"\"\n self._df.replace_at_idx(index, series._s)\n\n @overload\n def sort(\n self,\n by: Union[str, \"pli.Expr\", List[str], List[\"pli.Expr\"]],\n reverse: Union[bool, List[bool]] = ...,\n *,\n in_place: Literal[False] = ...,\n ) -> \"DataFrame\":\n ...\n\n @overload\n def sort(\n self,\n by: Union[str, \"pli.Expr\", List[str], List[\"pli.Expr\"]],\n reverse: Union[bool, List[bool]] = ...,\n *,\n in_place: Literal[True],\n ) -> None:\n ...\n\n @overload\n def sort(\n self,\n by: Union[str, \"pli.Expr\", List[str], List[\"pli.Expr\"]],\n reverse: Union[bool, List[bool]] = ...,\n *,\n in_place: bool,\n ) -> Optional[\"DataFrame\"]:\n ...\n\n def sort(\n self,\n by: Union[str, \"pli.Expr\", List[str], List[\"pli.Expr\"]],\n reverse: Union[bool, List[bool]] = False,\n *,\n in_place: bool = False,\n ) -> Optional[\"DataFrame\"]:\n \"\"\"\n Sort the DataFrame by column.\n\n Parameters\n ----------\n by\n By which column to sort. Only accepts string.\n reverse\n Reverse/descending sort.\n in_place\n Perform operation in-place.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6.0, 7.0, 8.0],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.sort(\"foo\", reverse=True)\n shape: (3, 3)\n ┌─────┬─────┬─────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ f64 ┆ str │\n ╞═════╪═════╪═════╡\n │ 3 ┆ 8 ┆ c │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2 ┆ 7 ┆ b │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 1 ┆ 6 ┆ a │\n └─────┴─────┴─────┘\n\n **Sort by multiple columns.**\n For multiple columns we can also use expression syntax.\n\n >>> df.sort(\n ... [pl.col(\"foo\"), pl.col(\"bar\") ** 2],\n ... reverse=[True, False],\n ... )\n shape: (3, 3)\n ┌─────┬─────┬─────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ f64 ┆ str │\n ╞═════╪═════╪═════╡\n │ 3 ┆ 64 ┆ c │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2 ┆ 49 ┆ b │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 1 ┆ 36 ┆ a │\n └─────┴─────┴─────┘\n\n \"\"\"\n if type(by) is list or isinstance(by, pli.Expr):\n df = (\n self.lazy()\n .sort(by, reverse)\n .collect(no_optimization=True, string_cache=False)\n )\n if in_place:\n self._df = df._df\n return self\n return df\n if in_place:\n self._df.sort_in_place(by, reverse)\n return None\n else:\n return wrap_df(self._df.sort(by, reverse))\n\n def frame_equal(self, other: \"DataFrame\", null_equal: bool = True) -> bool:\n \"\"\"\n Check if DataFrame is equal to other.\n\n Parameters\n ----------\n other\n DataFrame to compare with.\n null_equal\n Consider null values as equal.\n\n Examples\n --------\n >>> df1 = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6.0, 7.0, 8.0],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df2 = pl.DataFrame(\n ... {\n ... \"foo\": [3, 2, 1],\n ... \"bar\": [8.0, 7.0, 6.0],\n ... \"ham\": [\"c\", \"b\", \"a\"],\n ... }\n ... )\n >>> df1.frame_equal(df1)\n True\n >>> df1.frame_equal(df2)\n False\n\n \"\"\"\n return self._df.frame_equal(other._df, null_equal)\n\n def replace(self, column: str, new_col: \"pli.Series\") -> None:\n \"\"\"\n Replace a column by a new Series.\n\n Parameters\n ----------\n column\n Column to replace.\n new_col\n New column to insert.\n \"\"\"\n self._df.replace(column, new_col.inner())\n\n def slice(self, offset: int, length: int) -> \"DataFrame\":\n \"\"\"\n Slice this DataFrame over the rows direction.\n\n Parameters\n ----------\n offset\n Offset index.\n length\n Length of the slice.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6.0, 7.0, 8.0],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.slice(1, 2)\n shape: (2, 3)\n ┌─────┬─────┬─────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ f64 ┆ str │\n ╞═════╪═════╪═════╡\n │ 2 ┆ 7 ┆ b │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 3 ┆ 8 ┆ c │\n └─────┴─────┴─────┘\n\n \"\"\"\n if length < 0:\n length = self.height - offset + length\n return wrap_df(self._df.slice(offset, length))\n\n def limit(self, length: int = 5) -> \"DataFrame\":\n \"\"\"\n Get first N rows as DataFrame.\n\n See Also `DataFrame.head`\n\n Parameters\n ----------\n length\n Amount of rows to take.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.limit(2)\n shape: (2, 3)\n ┌─────┬─────┬─────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═════╪═════╪═════╡\n │ 1 ┆ 6 ┆ a │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2 ┆ 7 ┆ b │\n └─────┴─────┴─────┘\n\n \"\"\"\n return self.head(length)\n\n def head(self, length: int = 5) -> \"DataFrame\":\n \"\"\"\n Get first N rows as DataFrame.\n\n Parameters\n ----------\n length\n Length of the head.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3, 4, 5],\n ... \"bar\": [6, 7, 8, 9, 10],\n ... \"ham\": [\"a\", \"b\", \"c\", \"d\", \"e\"],\n ... }\n ... )\n >>> df.head(3)\n shape: (3, 3)\n ┌─────┬─────┬─────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═════╪═════╪═════╡\n │ 1 ┆ 6 ┆ a │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2 ┆ 7 ┆ b │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 3 ┆ 8 ┆ c │\n └─────┴─────┴─────┘\n\n \"\"\"\n return wrap_df(self._df.head(length))\n\n def tail(self, length: int = 5) -> \"DataFrame\":\n \"\"\"\n Get last N rows as DataFrame.\n\n Parameters\n ----------\n length\n Length of the tail.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3, 4, 5],\n ... \"bar\": [6, 7, 8, 9, 10],\n ... \"ham\": [\"a\", \"b\", \"c\", \"d\", \"e\"],\n ... }\n ... )\n >>> df.tail(3)\n shape: (3, 3)\n ┌─────┬─────┬─────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═════╪═════╪═════╡\n │ 3 ┆ 8 ┆ c │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 4 ┆ 9 ┆ d │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 5 ┆ 10 ┆ e │\n └─────┴─────┴─────┘\n\n \"\"\"\n return wrap_df(self._df.tail(length))\n\n def drop_nulls(self, subset: Optional[Union[str, List[str]]] = None) -> \"DataFrame\":\n \"\"\"\n Return a new DataFrame where the null values are dropped.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, None, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.drop_nulls()\n shape: (2, 3)\n ┌─────┬─────┬─────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═════╪═════╪═════╡\n │ 1 ┆ 6 ┆ a │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 3 ┆ 8 ┆ c │\n └─────┴─────┴─────┘\n\n This method only drops nulls row-wise if any single value of the row is null.\n\n Below are some example snippets that show how you could drop null values based on other\n conditions\n\n >>> df = pl.DataFrame(\n ... {\n ... \"a\": [None, None, None, None],\n ... \"b\": [1, 2, None, 1],\n ... \"c\": [1, None, None, 1],\n ... }\n ... )\n >>> df\n shape: (4, 3)\n ┌──────┬──────┬──────┐\n │ a ┆ b ┆ c │\n │ --- ┆ --- ┆ --- │\n │ f64 ┆ i64 ┆ i64 │\n ╞══════╪══════╪══════╡\n │ null ┆ 1 ┆ 1 │\n ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ null ┆ 2 ┆ null │\n ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ null ┆ null ┆ null │\n ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ null ┆ 1 ┆ 1 │\n └──────┴──────┴──────┘\n\n Drop a row only if all values are null:\n\n >>> df.filter(\n ... ~pl.fold(\n ... acc=True,\n ... f=lambda acc, s: acc & s.is_null(),\n ... exprs=pl.all(),\n ... )\n ... )\n shape: (3, 3)\n ┌──────┬─────┬──────┐\n │ a ┆ b ┆ c │\n │ --- ┆ --- ┆ --- │\n │ f64 ┆ i64 ┆ i64 │\n ╞══════╪═════╪══════╡\n │ null ┆ 1 ┆ 1 │\n ├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ null ┆ 2 ┆ null │\n ├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ null ┆ 1 ┆ 1 │\n └──────┴─────┴──────┘\n\n Drop a column if all values are null:\n\n >>> df[:, [not (s.null_count() == df.height) for s in df]]\n shape: (4, 2)\n ┌──────┬──────┐\n │ b ┆ c │\n │ --- ┆ --- │\n │ i64 ┆ i64 │\n ╞══════╪══════╡\n │ 1 ┆ 1 │\n ├╌╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ 2 ┆ null │\n ├╌╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ null ┆ null │\n ├╌╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ 1 ┆ 1 │\n └──────┴──────┘\n\n \"\"\"\n if isinstance(subset, str):\n subset = [subset]\n return wrap_df(self._df.drop_nulls(subset))\n\n def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:\n \"\"\"\n Apply a function on Self.\n\n Parameters\n ----------\n func\n Callable.\n args\n Arguments.\n kwargs\n Keyword arguments.\n \"\"\"\n return func(self, *args, **kwargs)\n\n def with_row_count(self, name: str = \"row_nr\") -> \"DataFrame\":\n \"\"\"\n Add a column at index 0 that counts the rows.\n\n Parameters\n ----------\n name\n Name of the column to add.\n \"\"\"\n return wrap_df(self._df.with_row_count(name))\n\n def groupby(\n self,\n by: Union[str, \"pli.Expr\", Sequence[str], Sequence[\"pli.Expr\"]],\n maintain_order: bool = False,\n ) -> \"GroupBy\":\n \"\"\"\n Start a groupby operation.\n\n Parameters\n ----------\n by\n Column(s) to group by.\n maintain_order\n Make sure that the order of the groups remain consistent. This is more expensive than a default groupby.\n Note that this only works in expression aggregations.\n\n Examples\n --------\n Below we group by column `\"a\"`, and we sum column `\"b\"`.\n\n >>> df = pl.DataFrame(\n ... {\n ... \"a\": [\"a\", \"b\", \"a\", \"b\", \"b\", \"c\"],\n ... \"b\": [1, 2, 3, 4, 5, 6],\n ... \"c\": [6, 5, 4, 3, 2, 1],\n ... }\n ... )\n >>> df.groupby(\"a\")[\"b\"].sum().sort(by=\"a\")\n shape: (3, 2)\n ┌─────┬───────┐\n │ a ┆ b_sum │\n │ --- ┆ --- │\n │ str ┆ i64 │\n ╞═════╪═══════╡\n │ a ┆ 4 │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌┤\n │ b ┆ 11 │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌┤\n │ c ┆ 6 │\n └─────┴───────┘\n\n We can also loop over the grouped `DataFrame`\n\n >>> for sub_df in df.groupby(\"a\"):\n ... print(sub_df) # doctest: +IGNORE_RESULT\n ...\n shape: (3, 3)\n ┌─────┬─────┬─────┐\n │ a ┆ b ┆ c │\n │ --- ┆ --- ┆ --- │\n │ str ┆ i64 ┆ i64 │\n ╞═════╪═════╪═════╡\n │ b ┆ 2 ┆ 5 │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ b ┆ 4 ┆ 3 │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ b ┆ 5 ┆ 2 │\n └─────┴─────┴─────┘\n shape: (1, 3)\n ┌─────┬─────┬─────┐\n │ a ┆ b ┆ c │\n │ --- ┆ --- ┆ --- │\n │ str ┆ i64 ┆ i64 │\n ╞═════╪═════╪═════╡\n │ c ┆ 6 ┆ 1 │\n └─────┴─────┴─────┘\n\n \"\"\"\n if isinstance(by, str):\n by = [by]\n return GroupBy(self._df, by, maintain_order=maintain_order) # type: ignore\n\n def groupby_dynamic(\n self,\n time_column: str,\n every: str,\n period: Optional[str] = None,\n offset: Optional[str] = None,\n truncate: bool = True,\n include_boundaries: bool = False,\n closed: str = \"right\",\n by: Optional[Union[str, List[str], \"pli.Expr\", List[\"pli.Expr\"]]] = None,\n ) -> \"DynamicGroupBy\":\n \"\"\"\n Groups based on a time value. Time windows are calculated and rows are assigned to windows.\n Different from a normal groupby is that a row can be member of multiple groups. The time window could\n be seen as a rolling window, with a window size determined by dates/times instead of slots in the DataFrame.\n\n A window is defined by:\n\n - every: interval of the window\n - period: length of the window\n - offset: offset of the window\n\n The `every`, `period` and `offset` arguments are created with\n the following string language:\n\n - 1ns (1 nanosecond)\n - 1us (1 microsecond)\n - 1ms (1 millisecond)\n - 1s (1 second)\n - 1m (1 minute)\n - 1h (1 hour)\n - 1d (1 day)\n - 1w (1 week)\n - 1mo (1 calendar month)\n - 1y (1 calendar year)\n\n Or combine them:\n \"3d12h4m25s\" # 3 days, 12 hours, 4 minutes, and 25 seconds\n\n .. warning::\n This API is experimental and may change without it being considered a breaking change.\n\n Parameters\n ----------\n time_column\n Column used to group based on the time window.\n Often to type Date/Datetime\n This column must be sorted in ascending order. If not the output will not make sense.\n every\n interval of the window\n period\n length of the window, if None it is equal to 'every'\n offset\n offset of the window\n truncate\n truncate the time value to the window lower bound\n include_boundaries\n add the lower and upper bound of the window to the \"_lower_bound\" and \"_upper_bound\" columns\n closed\n Defines if the window interval is closed or not.\n Any of {\"left\", \"right\", \"both\" \"none\"}\n by\n Also group by this column/these columns\n\n Examples\n --------\n\n >>> from datetime import datetime\n >>> # create an example dataframe\n >>> df = pl.DataFrame(\n ... {\n ... \"time\": pl.date_range(\n ... low=datetime(2021, 12, 16),\n ... high=datetime(2021, 12, 16, 3),\n ... interval=\"30m\",\n ... ),\n ... \"n\": range(7),\n ... }\n ... )\n >>> df\n shape: (7, 2)\n ┌─────────────────────┬─────┐\n │ time ┆ n │\n │ --- ┆ --- │\n │ datetime[ns] ┆ i64 │\n ╞═════════════════════╪═════╡\n │ 2021-12-16 00:00:00 ┆ 0 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2021-12-16 00:30:00 ┆ 1 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2021-12-16 01:00:00 ┆ 2 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2021-12-16 01:30:00 ┆ 3 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2021-12-16 02:00:00 ┆ 4 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2021-12-16 02:30:00 ┆ 5 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2021-12-16 03:00:00 ┆ 6 │\n └─────────────────────┴─────┘\n\n Group by windows of 1 hour starting at 2021-12-16 00:00:00.\n\n >>> (\n ... df.groupby_dynamic(\"time\", every=\"1h\").agg(\n ... [pl.col(\"time\").min(), pl.col(\"time\").max()]\n ... )\n ... )\n shape: (3, 3)\n ┌─────────────────────┬─────────────────────┬─────────────────────┐\n │ time ┆ time_min ┆ time_max │\n │ --- ┆ --- ┆ --- │\n │ datetime[ns] ┆ datetime[ns] ┆ datetime[ns] │\n ╞═════════════════════╪═════════════════════╪═════════════════════╡\n │ 2021-12-16 00:00:00 ┆ 2021-12-16 00:30:00 ┆ 2021-12-16 01:00:00 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 01:00:00 ┆ 2021-12-16 01:30:00 ┆ 2021-12-16 02:00:00 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 02:00:00 ┆ 2021-12-16 02:30:00 ┆ 2021-12-16 03:00:00 │\n └─────────────────────┴─────────────────────┴─────────────────────┘\n\n The window boundaries can also be added to the aggregation result\n\n >>> (\n ... df.groupby_dynamic(\"time\", every=\"1h\", include_boundaries=True).agg(\n ... [pl.col(\"time\").count()]\n ... )\n ... )\n shape: (3, 4)\n ┌─────────────────────┬─────────────────────┬─────────────────────┬────────────┐\n │ _lower_boundary ┆ _upper_boundary ┆ time ┆ time_count │\n │ --- ┆ --- ┆ --- ┆ --- │\n │ datetime[ns] ┆ datetime[ns] ┆ datetime[ns] ┆ u32 │\n ╞═════════════════════╪═════════════════════╪═════════════════════╪════════════╡\n │ 2021-12-16 00:00:00 ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 00:00:00 ┆ 2 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 01:00:00 ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 01:00:00 ┆ 2 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 02:00:00 ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 02:00:00 ┆ 2 │\n └─────────────────────┴─────────────────────┴─────────────────────┴────────────┘\n\n When closed=\"left\", should not include right end of interval [lower_bound, upper_bound)\n\n >>> (\n ... df.groupby_dynamic(\"time\", every=\"1h\", closed=\"left\").agg(\n ... [pl.col(\"time\").count(), pl.col(\"time\").list()]\n ... )\n ... )\n shape: (4, 3)\n ┌─────────────────────┬────────────┬─────────────────────────────────────┐\n │ time ┆ time_count ┆ time_agg_list │\n │ --- ┆ --- ┆ --- │\n │ datetime[ns] ┆ u32 ┆ list [datetime[ns]] │\n ╞═════════════════════╪════════════╪═════════════════════════════════════╡\n │ 2021-12-16 00:00:00 ┆ 2 ┆ [2021-12-16 00:00:00, 2021-12-16... │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 01:00:00 ┆ 2 ┆ [2021-12-16 01:00:00, 2021-12-16... │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 02:00:00 ┆ 2 ┆ [2021-12-16 02:00:00, 2021-12-16... │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 03:00:00 ┆ 1 ┆ [2021-12-16 03:00:00] │\n └─────────────────────┴────────────┴─────────────────────────────────────┘\n\n When closed=\"both\" the time values at the window boundaries belong to 2 groups.\n\n >>> (\n ... df.groupby_dynamic(\"time\", every=\"1h\", closed=\"both\").agg(\n ... [pl.col(\"time\").count()]\n ... )\n ... )\n shape: (4, 2)\n ┌─────────────────────┬────────────┐\n │ time ┆ time_count │\n │ --- ┆ --- │\n │ datetime[ns] ┆ u32 │\n ╞═════════════════════╪════════════╡\n │ 2021-12-16 00:00:00 ┆ 3 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 01:00:00 ┆ 3 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 02:00:00 ┆ 3 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 03:00:00 ┆ 1 │\n └─────────────────────┴────────────┘\n\n Dynamic groupbys can also be combined with grouping on normal keys\n\n >>> df = pl.DataFrame(\n ... {\n ... \"time\": pl.date_range(\n ... low=datetime(2021, 12, 16),\n ... high=datetime(2021, 12, 16, 3),\n ... interval=\"30m\",\n ... ),\n ... \"groups\": [\"a\", \"a\", \"a\", \"b\", \"b\", \"a\", \"a\"],\n ... }\n ... )\n >>> df\n shape: (7, 2)\n ┌─────────────────────┬────────┐\n │ time ┆ groups │\n │ --- ┆ --- │\n │ datetime[ns] ┆ str │\n ╞═════════════════════╪════════╡\n │ 2021-12-16 00:00:00 ┆ a │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 00:30:00 ┆ a │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 01:00:00 ┆ a │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 01:30:00 ┆ b │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 02:00:00 ┆ b │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 02:30:00 ┆ a │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 2021-12-16 03:00:00 ┆ a │\n └─────────────────────┴────────┘\n >>> (\n ... df.groupby_dynamic(\n ... \"time\",\n ... every=\"1h\",\n ... closed=\"both\",\n ... by=\"groups\",\n ... include_boundaries=True,\n ... ).agg([pl.col(\"time\").count()])\n ... )\n shape: (6, 5)\n ┌────────┬─────────────────────┬─────────────────────┬─────────────────────┬────────────┐\n │ groups ┆ _lower_boundary ┆ _upper_boundary ┆ time ┆ time_count │\n │ --- ┆ --- ┆ --- ┆ --- ┆ --- │\n │ str ┆ datetime[ns] ┆ datetime[ns] ┆ datetime[ns] ┆ u32 │\n ╞════════╪═════════════════════╪═════════════════════╪═════════════════════╪════════════╡\n │ a ┆ 2021-12-16 00:00:00 ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 00:00:00 ┆ 3 │\n ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ a ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 01:00:00 ┆ 1 │\n ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ a ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 02:00:00 ┆ 2 │\n ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ a ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 04:00:00 ┆ 2021-12-16 03:00:00 ┆ 1 │\n ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ b ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 01:00:00 ┆ 2 │\n ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ b ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 02:00:00 ┆ 1 │\n └────────┴─────────────────────┴─────────────────────┴─────────────────────┴────────────┘\n\n \"\"\"\n\n return DynamicGroupBy(\n self,\n time_column,\n every,\n period,\n offset,\n truncate,\n include_boundaries,\n closed,\n by,\n )\n\n def upsample(self, by: str, interval: Union[str, timedelta]) -> \"DataFrame\":\n \"\"\"\n Upsample a DataFrame at a regular frequency.\n\n .. warning::\n This API is experimental and may change without it being considered a breaking change.\n\n Parameters\n ----------\n by\n Column that will be used as key in the upsampling operation.\n This should be a datetime column.\n interval\n Interval periods.\n \"\"\"\n if self[by].dtype != Datetime:\n raise ValueError(\n f\"Column {by} should be of type datetime. Got {self[by].dtype}\"\n )\n bounds = self.select(\n [pli.col(by).min().alias(\"low\"), pli.col(by).max().alias(\"high\")]\n )\n low: datetime = bounds[\"low\"].dt[0] # type: ignore\n high: datetime = bounds[\"high\"].dt[0] # type: ignore\n upsampled = pli.date_range(low, high, interval, name=by)\n return DataFrame(upsampled).join(self, on=by, how=\"left\")\n\n def join(\n self,\n df: \"DataFrame\",\n left_on: Optional[Union[str, \"pli.Expr\", List[Union[str, \"pli.Expr\"]]]] = None,\n right_on: Optional[Union[str, \"pli.Expr\", List[Union[str, \"pli.Expr\"]]]] = None,\n on: Optional[Union[str, \"pli.Expr\", List[Union[str, \"pli.Expr\"]]]] = None,\n how: str = \"inner\",\n suffix: str = \"_right\",\n asof_by: Optional[Union[str, List[str]]] = None,\n asof_by_left: Optional[Union[str, List[str]]] = None,\n asof_by_right: Optional[Union[str, List[str]]] = None,\n ) -> \"DataFrame\":\n \"\"\"\n SQL like joins.\n\n Parameters\n ----------\n df\n DataFrame to join with.\n left_on\n Name(s) of the left join column(s).\n right_on\n Name(s) of the right join column(s).\n on\n Name(s) of the join columns in both DataFrames.\n how\n Join strategy\n - \"inner\"\n - \"left\"\n - \"outer\"\n - \"asof\"\n - \"cross\"\n suffix\n Suffix to append to columns with a duplicate name.\n asof_by\n join on these columns before doing asof join\n asof_by_left\n join on these columns before doing asof join\n asof_by_right\n join on these columns before doing asof join\n\n Returns\n -------\n Joined DataFrame\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6.0, 7.0, 8.0],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> other_df = pl.DataFrame(\n ... {\n ... \"apple\": [\"x\", \"y\", \"z\"],\n ... \"ham\": [\"a\", \"b\", \"d\"],\n ... }\n ... )\n >>> df.join(other_df, on=\"ham\")\n shape: (2, 4)\n ┌─────┬─────┬─────┬───────┐\n │ foo ┆ bar ┆ ham ┆ apple │\n │ --- ┆ --- ┆ --- ┆ --- │\n │ i64 ┆ f64 ┆ str ┆ str │\n ╞═════╪═════╪═════╪═══════╡\n │ 1 ┆ 6 ┆ a ┆ x │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤\n │ 2 ┆ 7 ┆ b ┆ y │\n └─────┴─────┴─────┴───────┘\n\n >>> df.join(other_df, on=\"ham\", how=\"outer\")\n shape: (4, 4)\n ┌──────┬──────┬─────┬───────┐\n │ foo ┆ bar ┆ ham ┆ apple │\n │ --- ┆ --- ┆ --- ┆ --- │\n │ i64 ┆ f64 ┆ str ┆ str │\n ╞══════╪══════╪═════╪═══════╡\n │ 1 ┆ 6 ┆ a ┆ x │\n ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤\n │ 2 ┆ 7 ┆ b ┆ y │\n ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤\n │ null ┆ null ┆ d ┆ z │\n ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤\n │ 3 ┆ 8 ┆ c ┆ null │\n └──────┴──────┴─────┴───────┘\n\n **Asof join**\n This is similar to a left-join except that we match on nearest key rather than equal keys.\n The keys must be sorted to perform an asof join\n\n \"\"\"\n if how == \"cross\":\n return wrap_df(self._df.join(df._df, [], [], how, suffix))\n\n left_on_: Optional[List[Union[str, pli.Expr]]]\n if isinstance(left_on, (str, pli.Expr)):\n left_on_ = [left_on]\n else:\n left_on_ = left_on\n\n right_on_: Optional[List[Union[str, pli.Expr]]]\n if isinstance(right_on, (str, pli.Expr)):\n right_on_ = [right_on]\n else:\n right_on_ = right_on\n\n if isinstance(on, str):\n left_on_ = [on]\n right_on_ = [on]\n elif isinstance(on, list):\n left_on_ = on\n right_on_ = on\n\n if left_on_ is None or right_on_ is None:\n raise ValueError(\"You should pass the column to join on as an argument.\")\n\n if (\n isinstance(left_on_[0], pli.Expr)\n or isinstance(right_on_[0], pli.Expr)\n or asof_by_left is not None\n or asof_by_right is not None\n or asof_by is not None\n ):\n return (\n self.lazy()\n .join(\n df.lazy(),\n left_on,\n right_on,\n on=on,\n how=how,\n suffix=suffix,\n asof_by_right=asof_by_right,\n asof_by_left=asof_by_left,\n asof_by=asof_by,\n )\n .collect(no_optimization=True)\n )\n else:\n return wrap_df(self._df.join(df._df, left_on_, right_on_, how, suffix))\n\n def apply(\n self,\n f: Callable[[Tuple[Any, ...]], Any],\n return_dtype: Optional[Type[DataType]] = None,\n inference_size: int = 256,\n ) -> \"DataFrame\":\n \"\"\"\n Apply a custom function over the rows of the DataFrame. The rows are passed as tuple.\n\n Beware, this is slow.\n\n Parameters\n ----------\n f\n Custom function/ lambda function.\n return_dtype\n Output type of the operation. If none given, Polars tries to infer the type.\n inference_size\n Only used in the case when the custom function returns rows.\n This uses the first `n` rows to determine the output schema\n\n \"\"\"\n out, is_df = self._df.apply(f, return_dtype, inference_size)\n if is_df:\n return wrap_df(out)\n else:\n return pli.wrap_s(out).to_frame()\n\n def with_column(self, column: Union[\"pli.Series\", \"pli.Expr\"]) -> \"DataFrame\":\n \"\"\"\n Return a new DataFrame with the column added or replaced.\n\n Parameters\n ----------\n column\n Series, where the name of the Series refers to the column in the DataFrame.\n \"\"\"\n if isinstance(column, pli.Expr):\n return self.with_columns([column])\n else:\n return wrap_df(self._df.with_column(column._s))\n\n def with_column_renamed(self, existing_name: str, new_name: str) -> \"DataFrame\":\n \"\"\"\n Return a new DataFrame with the column renamed.\n\n Parameters\n ----------\n existing_name\n new_name\n\n Examples\n --------\n >>> df = pl.DataFrame({\"a\": [1, 2], \"b\": [3, 4]})\n >>> df.with_column_renamed(\"b\", \"c\")\n shape: (2, 2)\n ┌─────┬─────┐\n │ a ┆ c │\n │ --- ┆ --- │\n │ i64 ┆ i64 │\n ╞═════╪═════╡\n │ 1 ┆ 3 │\n ├╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2 ┆ 4 │\n └─────┴─────┘\n\n \"\"\"\n return (\n self.lazy()\n .with_column_renamed(existing_name, new_name)\n .collect(no_optimization=True, string_cache=False)\n )\n\n def hstack(\n self, columns: Union[List[\"pli.Series\"], \"DataFrame\"], in_place: bool = False\n ) -> Optional[\"DataFrame\"]:\n \"\"\"\n Return a new DataFrame grown horizontally by stacking multiple Series to it.\n\n Parameters\n ----------\n columns\n Series to stack.\n in_place\n Modify in place.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> x = pl.Series(\"apple\", [10, 20, 30])\n >>> df.hstack([x])\n shape: (3, 4)\n ┌─────┬─────┬─────┬───────┐\n │ foo ┆ bar ┆ ham ┆ apple │\n │ --- ┆ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str ┆ i64 │\n ╞═════╪═════╪═════╪═══════╡\n │ 1 ┆ 6 ┆ a ┆ 10 │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤\n │ 2 ┆ 7 ┆ b ┆ 20 │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤\n │ 3 ┆ 8 ┆ c ┆ 30 │\n └─────┴─────┴─────┴───────┘\n\n \"\"\"\n if not isinstance(columns, list):\n columns = columns.get_columns()\n if in_place:\n self._df.hstack_mut([s.inner() for s in columns])\n return None\n else:\n return wrap_df(self._df.hstack([s.inner() for s in columns]))\n\n @overload\n def vstack(self, df: \"DataFrame\", in_place: Literal[True]) -> None:\n ...\n\n @overload\n def vstack(self, df: \"DataFrame\", in_place: Literal[False] = ...) -> \"DataFrame\":\n ...\n\n @overload\n def vstack(self, df: \"DataFrame\", in_place: bool) -> Optional[\"DataFrame\"]:\n ...\n\n def vstack(self, df: \"DataFrame\", in_place: bool = False) -> Optional[\"DataFrame\"]:\n \"\"\"\n Grow this DataFrame vertically by stacking a DataFrame to it.\n\n Parameters\n ----------\n df\n DataFrame to stack.\n in_place\n Modify in place\n\n Examples\n --------\n\n >>> df1 = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2],\n ... \"bar\": [6, 7],\n ... \"ham\": [\"a\", \"b\"],\n ... }\n ... )\n >>> df2 = pl.DataFrame(\n ... {\n ... \"foo\": [3, 4],\n ... \"bar\": [8, 9],\n ... \"ham\": [\"c\", \"d\"],\n ... }\n ... )\n >>> df1.vstack(df2)\n shape: (4, 3)\n ┌─────┬─────┬─────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═════╪═════╪═════╡\n │ 1 ┆ 6 ┆ a │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2 ┆ 7 ┆ b │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 3 ┆ 8 ┆ c │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 4 ┆ 9 ┆ d │\n └─────┴─────┴─────┘\n\n \"\"\"\n if in_place:\n self._df.vstack_mut(df._df)\n return None\n else:\n return wrap_df(self._df.vstack(df._df))\n\n def drop(self, name: Union[str, List[str]]) -> \"DataFrame\":\n \"\"\"\n Remove column from DataFrame and return as new.\n\n Parameters\n ----------\n name\n Column(s) to drop.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6.0, 7.0, 8.0],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.drop(\"ham\")\n shape: (3, 2)\n ┌─────┬─────┐\n │ foo ┆ bar │\n │ --- ┆ --- │\n │ i64 ┆ f64 │\n ╞═════╪═════╡\n │ 1 ┆ 6 │\n ├╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2 ┆ 7 │\n ├╌╌╌╌╌┼╌╌╌╌╌┤\n │ 3 ┆ 8 │\n └─────┴─────┘\n\n \"\"\"\n if isinstance(name, list):\n df = self.clone()\n\n for name in name:\n df._df.drop_in_place(name)\n return df\n\n return wrap_df(self._df.drop(name))\n\n def drop_in_place(self, name: str) -> \"pli.Series\":\n \"\"\"\n Drop in place.\n\n Parameters\n ----------\n name\n Column to drop.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.drop_in_place(\"ham\")\n shape: (3,)\n Series: 'ham' [str]\n [\n \"a\"\n \"b\"\n \"c\"\n ]\n\n \"\"\"\n return pli.wrap_s(self._df.drop_in_place(name))\n\n def select_at_idx(self, idx: int) -> \"pli.Series\":\n \"\"\"\n Select column at index location.\n\n Parameters\n ----------\n idx\n Location of selection.\n\n .. deprecated:: 0.10.20\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.select_at_idx(1)\n shape: (3,)\n Series: 'bar' [i64]\n [\n 6\n 7\n 8\n ]\n\n \"\"\"\n return pli.wrap_s(self._df.select_at_idx(idx))\n\n def clone(self) -> \"DataFrame\":\n \"\"\"\n Very cheap deep clone.\n \"\"\"\n return wrap_df(self._df.clone())\n\n def __copy__(self) -> \"DataFrame\":\n return self.clone()\n\n def __deepcopy__(self, memodict={}) -> \"DataFrame\": # type: ignore\n return self.clone()\n\n def get_columns(self) -> List[\"pli.Series\"]:\n \"\"\"\n Get the DataFrame as a List of Series.\n \"\"\"\n return list(map(lambda s: pli.wrap_s(s), self._df.get_columns()))\n\n def get_column(self, name: str) -> \"pli.Series\":\n \"\"\"\n Get a single column as Series by name.\n \"\"\"\n return self[name]\n\n def fill_null(self, strategy: Union[str, \"pli.Expr\", Any]) -> \"DataFrame\":\n \"\"\"\n Fill None/missing values by a filling strategy or an Expression evaluation.\n\n Parameters\n ----------\n strategy\n One of:\n - \"backward\"\n - \"forward\"\n - \"mean\"\n - \"min'\n - \"max\"\n - \"zero\"\n - \"one\"\n Or an expression.\n\n Returns\n -------\n DataFrame with None replaced with the filling strategy.\n \"\"\"\n if isinstance(strategy, pli.Expr):\n return self.lazy().fill_null(strategy).collect(no_optimization=True)\n if not isinstance(strategy, str):\n return self.fill_null(pli.lit(strategy))\n return wrap_df(self._df.fill_null(strategy))\n\n def fill_nan(self, fill_value: Union[\"pli.Expr\", int, float]) -> \"DataFrame\":\n \"\"\"\n Fill None/missing values by a an Expression evaluation.\n\n Warnings\n --------\n NOTE that floating point NaN (No a Number) are not missing values!\n to replace missing values, use `fill_null`.\n\n Parameters\n ----------\n fill_value\n value to fill NaN with\n\n Returns\n -------\n DataFrame with NaN replaced with fill_value\n \"\"\"\n return self.lazy().fill_nan(fill_value).collect(no_optimization=True)\n\n def explode(\n self, columns: Union[str, List[str], \"pli.Expr\", List[\"pli.Expr\"]]\n ) -> \"DataFrame\":\n \"\"\"\n Explode `DataFrame` to long format by exploding a column with Lists.\n\n Parameters\n ----------\n columns\n Column of LargeList type.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"letters\": [\"c\", \"c\", \"a\", \"c\", \"a\", \"b\"],\n ... \"nrs\": [[1, 2], [1, 3], [4, 3], [5, 5, 5], [6], [2, 1, 2]],\n ... }\n ... )\n >>> df\n shape: (6, 2)\n ┌─────────┬────────────┐\n │ letters ┆ nrs │\n │ --- ┆ --- │\n │ str ┆ list [i64] │\n ╞═════════╪════════════╡\n │ c ┆ [1, 2] │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ c ┆ [1, 3] │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ a ┆ [4, 3] │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ c ┆ [5, 5, 5] │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ a ┆ [6] │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ b ┆ [2, 1, 2] │\n └─────────┴────────────┘\n >>> df.explode(\"nrs\")\n shape: (13, 2)\n ┌─────────┬─────┐\n │ letters ┆ nrs │\n │ --- ┆ --- │\n │ str ┆ i64 │\n ╞═════════╪═════╡\n │ c ┆ 1 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ c ┆ 2 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ c ┆ 1 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ c ┆ 3 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ ... ┆ ... │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ a ┆ 6 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ b ┆ 2 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ b ┆ 1 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ b ┆ 2 │\n └─────────┴─────┘\n\n \"\"\"\n return self.lazy().explode(columns).collect(no_optimization=True)\n\n def melt(\n self, id_vars: Union[List[str], str], value_vars: Union[List[str], str]\n ) -> \"DataFrame\":\n \"\"\"\n Unpivot DataFrame to long format.\n\n Parameters\n ----------\n id_vars\n Columns to use as identifier variables.\n\n value_vars\n Values to use as identifier variables.\n\n Returns\n -------\n\n \"\"\"\n if isinstance(value_vars, str):\n value_vars = [value_vars]\n if isinstance(id_vars, str):\n id_vars = [id_vars]\n return wrap_df(self._df.melt(id_vars, value_vars))\n\n def shift(self, periods: int) -> \"DataFrame\":\n \"\"\"\n Shift the values by a given period and fill the parts that will be empty due to this operation\n with `Nones`.\n\n Parameters\n ----------\n periods\n Number of places to shift (may be negative).\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.shift(periods=1)\n shape: (3, 3)\n ┌──────┬──────┬──────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞══════╪══════╪══════╡\n │ null ┆ null ┆ null │\n ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ 1 ┆ 6 ┆ a │\n ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ 2 ┆ 7 ┆ b │\n └──────┴──────┴──────┘\n >>> df.shift(periods=-1)\n shape: (3, 3)\n ┌──────┬──────┬──────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞══════╪══════╪══════╡\n │ 2 ┆ 7 ┆ b │\n ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ 3 ┆ 8 ┆ c │\n ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ null ┆ null ┆ null │\n └──────┴──────┴──────┘\n\n \"\"\"\n return wrap_df(self._df.shift(periods))\n\n def shift_and_fill(\n self, periods: int, fill_value: Union[int, str, float]\n ) -> \"DataFrame\":\n \"\"\"\n Shift the values by a given period and fill the parts that will be empty due to this operation\n with the result of the `fill_value` expression.\n\n Parameters\n ----------\n periods\n Number of places to shift (may be negative).\n fill_value\n fill None values with this value.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.shift_and_fill(periods=1, fill_value=0)\n shape: (3, 3)\n ┌─────┬─────┬─────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═════╪═════╪═════╡\n │ 0 ┆ 0 ┆ 0 │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 1 ┆ 6 ┆ a │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2 ┆ 7 ┆ b │\n └─────┴─────┴─────┘\n\n \"\"\"\n return (\n self.lazy()\n .shift_and_fill(periods, fill_value)\n .collect(no_optimization=True, string_cache=False)\n )\n\n def is_duplicated(self) -> \"pli.Series\":\n \"\"\"\n Get a mask of all duplicated rows in this DataFrame.\n \"\"\"\n return pli.wrap_s(self._df.is_duplicated())\n\n def is_unique(self) -> \"pli.Series\":\n \"\"\"\n Get a mask of all unique rows in this DataFrame.\n \"\"\"\n return pli.wrap_s(self._df.is_unique())\n\n def lazy(self) -> \"pli.LazyFrame\":\n \"\"\"\n Start a lazy query from this point. This returns a `LazyFrame` object.\n\n Operations on a `LazyFrame` are not executed until this is requested by either calling:\n\n * `.fetch()` (run on a small number of rows)\n * `.collect()` (run on all data)\n * `.describe_plan()` (print unoptimized query plan)\n * `.describe_optimized_plan()` (print optimized query plan)\n * `.show_graph()` (show (un)optimized query plan) as graphiz graph)\n\n Lazy operations are advised because they allow for query optimization and more parallelization.\n \"\"\"\n return pli.wrap_ldf(self._df.lazy())\n\n def select(\n self,\n exprs: Union[\n str,\n \"pli.Expr\",\n Sequence[Union[str, \"pli.Expr\"]],\n Sequence[bool],\n Sequence[int],\n Sequence[float],\n \"pli.Series\",\n ],\n ) -> \"DataFrame\":\n \"\"\"\n Select columns from this DataFrame.\n\n Parameters\n ----------\n exprs\n Column or columns to select.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.select(\"foo\")\n shape: (3, 1)\n ┌─────┐\n │ foo │\n │ --- │\n │ i64 │\n ╞═════╡\n │ 1 │\n ├╌╌╌╌╌┤\n │ 2 │\n ├╌╌╌╌╌┤\n │ 3 │\n └─────┘\n\n \"\"\"\n return (\n self.lazy().select(exprs).collect(no_optimization=True, string_cache=False) # type: ignore\n )\n\n def with_columns(self, exprs: Union[\"pli.Expr\", List[\"pli.Expr\"]]) -> \"DataFrame\":\n \"\"\"\n Add or overwrite multiple columns in a DataFrame.\n\n Parameters\n ----------\n exprs\n List of Expressions that evaluate to columns.\n \"\"\"\n if not isinstance(exprs, list):\n exprs = [exprs]\n return (\n self.lazy()\n .with_columns(exprs)\n .collect(no_optimization=True, string_cache=False)\n )\n\n def n_chunks(self) -> int:\n \"\"\"\n Get number of chunks used by the ChunkedArrays of this DataFrame.\n \"\"\"\n return self._df.n_chunks()\n\n @overload\n def max(self, axis: Literal[0] = ...) -> \"DataFrame\":\n ...\n\n @overload\n def max(self, axis: Literal[1]) -> \"pli.Series\":\n ...\n\n @overload\n def max(self, axis: int = 0) -> Union[\"DataFrame\", \"pli.Series\"]:\n ...\n\n def max(self, axis: int = 0) -> Union[\"DataFrame\", \"pli.Series\"]:\n \"\"\"\n Aggregate the columns of this DataFrame to their maximum value.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.max()\n shape: (1, 3)\n ┌─────┬─────┬──────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═════╪═════╪══════╡\n │ 3 ┆ 8 ┆ null │\n └─────┴─────┴──────┘\n\n \"\"\"\n if axis == 0:\n return wrap_df(self._df.max())\n if axis == 1:\n return pli.wrap_s(self._df.hmax())\n raise ValueError(\"Axis should be 0 or 1.\") # pragma: no cover\n\n @overload\n def min(self, axis: Literal[0] = ...) -> \"DataFrame\":\n ...\n\n @overload\n def min(self, axis: Literal[1]) -> \"pli.Series\":\n ...\n\n @overload\n def min(self, axis: int = 0) -> Union[\"DataFrame\", \"pli.Series\"]:\n ...\n\n def min(self, axis: int = 0) -> Union[\"DataFrame\", \"pli.Series\"]:\n \"\"\"\n Aggregate the columns of this DataFrame to their minimum value.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.min()\n shape: (1, 3)\n ┌─────┬─────┬──────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═════╪═════╪══════╡\n │ 1 ┆ 6 ┆ null │\n └─────┴─────┴──────┘\n\n \"\"\"\n if axis == 0:\n return wrap_df(self._df.min())\n if axis == 1:\n return pli.wrap_s(self._df.hmin())\n raise ValueError(\"Axis should be 0 or 1.\") # pragma: no cover\n\n @overload\n def sum(\n self, *, axis: Literal[0] = ..., null_strategy: str = \"ignore\"\n ) -> \"DataFrame\":\n ...\n\n @overload\n def sum(self, *, axis: Literal[1], null_strategy: str = \"ignore\") -> \"pli.Series\":\n ...\n\n @overload\n def sum(\n self, *, axis: int = 0, null_strategy: str = \"ignore\"\n ) -> Union[\"DataFrame\", \"pli.Series\"]:\n ...\n\n def sum(\n self, *, axis: int = 0, null_strategy: str = \"ignore\"\n ) -> Union[\"DataFrame\", \"pli.Series\"]:\n \"\"\"\n Aggregate the columns of this DataFrame to their sum value.\n\n Parameters\n ----------\n axis\n either 0 or 1\n null_strategy\n {'ignore', 'propagate'}\n this argument is only used if axis == 1\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.sum()\n shape: (1, 3)\n ┌─────┬─────┬──────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═════╪═════╪══════╡\n │ 6 ┆ 21 ┆ null │\n └─────┴─────┴──────┘\n\n \"\"\"\n if axis == 0:\n return wrap_df(self._df.sum())\n if axis == 1:\n return pli.wrap_s(self._df.hsum(null_strategy))\n raise ValueError(\"Axis should be 0 or 1.\") # pragma: no cover\n\n @overload\n def mean(\n self, *, axis: Literal[0] = ..., null_strategy: str = \"ignore\"\n ) -> \"DataFrame\":\n ...\n\n @overload\n def mean(self, *, axis: Literal[1], null_strategy: str = \"ignore\") -> \"pli.Series\":\n ...\n\n @overload\n def mean(\n self, *, axis: int = 0, null_strategy: str = \"ignore\"\n ) -> Union[\"DataFrame\", \"pli.Series\"]:\n ...\n\n def mean(\n self, axis: int = 0, null_strategy: str = \"ignore\"\n ) -> Union[\"DataFrame\", \"pli.Series\"]:\n \"\"\"\n Aggregate the columns of this DataFrame to their mean value.\n\n Parameters\n ----------\n axis\n either 0 or 1\n null_strategy\n {'ignore', 'propagate'}\n this argument is only used if axis == 1\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.mean()\n shape: (1, 3)\n ┌─────┬─────┬──────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ f64 ┆ f64 ┆ str │\n ╞═════╪═════╪══════╡\n │ 2 ┆ 7 ┆ null │\n └─────┴─────┴──────┘\n\n \"\"\"\n if axis == 0:\n return wrap_df(self._df.mean())\n if axis == 1:\n return pli.wrap_s(self._df.hmean(null_strategy))\n raise ValueError(\"Axis should be 0 or 1.\") # pragma: no cover\n\n def std(self) -> \"DataFrame\":\n \"\"\"\n Aggregate the columns of this DataFrame to their standard deviation value.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.std()\n shape: (1, 3)\n ┌─────┬─────┬──────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ f64 ┆ f64 ┆ str │\n ╞═════╪═════╪══════╡\n │ 1 ┆ 1 ┆ null │\n └─────┴─────┴──────┘\n\n \"\"\"\n return wrap_df(self._df.std())\n\n def var(self) -> \"DataFrame\":\n \"\"\"\n Aggregate the columns of this DataFrame to their variance value.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.var()\n shape: (1, 3)\n ┌─────┬─────┬──────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ f64 ┆ f64 ┆ str │\n ╞═════╪═════╪══════╡\n │ 1 ┆ 1 ┆ null │\n └─────┴─────┴──────┘\n\n \"\"\"\n return wrap_df(self._df.var())\n\n def median(self) -> \"DataFrame\":\n \"\"\"\n Aggregate the columns of this DataFrame to their median value.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.median()\n shape: (1, 3)\n ┌─────┬─────┬──────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ f64 ┆ f64 ┆ str │\n ╞═════╪═════╪══════╡\n │ 2 ┆ 7 ┆ null │\n └─────┴─────┴──────┘\n\n \"\"\"\n return wrap_df(self._df.median())\n\n def quantile(self, quantile: float, interpolation: str = \"nearest\") -> \"DataFrame\":\n \"\"\"\n Aggregate the columns of this DataFrame to their quantile value.\n\n Parameters\n ----------\n quantile\n quantile between 0.0 and 1.0\n\n interpolation\n interpolation type, options: ['nearest', 'higher', 'lower', 'midpoint', 'linear']\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.quantile(0.5, \"nearest\")\n shape: (1, 3)\n ┌─────┬─────┬──────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═════╪═════╪══════╡\n │ 2 ┆ 7 ┆ null │\n └─────┴─────┴──────┘\n\n \"\"\"\n return wrap_df(self._df.quantile(quantile, interpolation))\n\n def to_dummies(self) -> \"DataFrame\":\n \"\"\"\n Get one hot encoded dummy variables.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.to_dummies()\n shape: (3, 9)\n ┌───────┬───────┬───────┬───────┬─────┬───────┬───────┬───────┬───────┐\n │ foo_1 ┆ foo_2 ┆ foo_3 ┆ bar_6 ┆ ... ┆ bar_8 ┆ ham_a ┆ ham_b ┆ ham_c │\n │ --- ┆ --- ┆ --- ┆ --- ┆ ┆ --- ┆ --- ┆ --- ┆ --- │\n │ u8 ┆ u8 ┆ u8 ┆ u8 ┆ ┆ u8 ┆ u8 ┆ u8 ┆ u8 │\n ╞═══════╪═══════╪═══════╪═══════╪═════╪═══════╪═══════╪═══════╪═══════╡\n │ 1 ┆ 0 ┆ 0 ┆ 1 ┆ ... ┆ 0 ┆ 1 ┆ 0 ┆ 0 │\n ├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤\n │ 0 ┆ 1 ┆ 0 ┆ 0 ┆ ... ┆ 0 ┆ 0 ┆ 1 ┆ 0 │\n ├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤\n │ 0 ┆ 0 ┆ 1 ┆ 0 ┆ ... ┆ 1 ┆ 0 ┆ 0 ┆ 1 │\n └───────┴───────┴───────┴───────┴─────┴───────┴───────┴───────┴───────┘\n\n \"\"\"\n return wrap_df(self._df.to_dummies())\n\n def drop_duplicates(\n self,\n maintain_order: bool = True,\n subset: Optional[Union[str, List[str]]] = None,\n ) -> \"DataFrame\":\n \"\"\"\n Drop duplicate rows from this DataFrame.\n Note that this fails if there is a column of type `List` in the DataFrame.\n \"\"\"\n if subset is not None and not isinstance(subset, list):\n subset = [subset]\n return wrap_df(self._df.drop_duplicates(maintain_order, subset))\n\n def rechunk(self) -> \"DataFrame\":\n \"\"\"\n Rechunk the data in this DataFrame to a contiguous allocation.\n\n This will make sure all subsequent operations have optimal and predictable performance.\n \"\"\"\n return wrap_df(self._df.rechunk())\n\n def null_count(self) -> \"DataFrame\":\n \"\"\"\n Create a new DataFrame that shows the null counts per column.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, None, 3],\n ... \"bar\": [6, 7, None],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.null_count()\n shape: (1, 3)\n ┌─────┬─────┬─────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ u32 ┆ u32 ┆ u32 │\n ╞═════╪═════╪═════╡\n │ 1 ┆ 1 ┆ 0 │\n └─────┴─────┴─────┘\n\n \"\"\"\n return wrap_df(self._df.null_count())\n\n def sample(\n self,\n n: Optional[int] = None,\n frac: Optional[float] = None,\n with_replacement: bool = False,\n seed: int = 0,\n ) -> \"DataFrame\":\n \"\"\"\n Sample from this DataFrame by setting either `n` or `frac`.\n\n Parameters\n ----------\n n\n Number of samples < self.len() .\n frac\n Fraction between 0.0 and 1.0 .\n with_replacement\n Sample with replacement.\n seed\n Initialization seed\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.sample(n=2) # doctest: +IGNORE_RESULT\n shape: (2, 3)\n ┌─────┬─────┬─────┐\n │ foo ┆ bar ┆ ham │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ i64 ┆ str │\n ╞═════╪═════╪═════╡\n │ 3 ┆ 8 ┆ c │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 2 ┆ 7 ┆ b │\n └─────┴─────┴─────┘\n\n \"\"\"\n if n is not None:\n return wrap_df(self._df.sample_n(n, with_replacement, seed))\n return wrap_df(self._df.sample_frac(frac, with_replacement, seed))\n\n def fold(\n self, operation: Callable[[\"pli.Series\", \"pli.Series\"], \"pli.Series\"]\n ) -> \"pli.Series\":\n \"\"\"\n Apply a horizontal reduction on a DataFrame. This can be used to effectively\n determine aggregations on a row level, and can be applied to any DataType that\n can be supercasted (casted to a similar parent type).\n\n An example of the supercast rules when applying an arithmetic operation on two DataTypes are for instance:\n\n Int8 + Utf8 = Utf8\n Float32 + Int64 = Float32\n Float32 + Float64 = Float64\n\n Examples\n --------\n A horizontal sum operation:\n\n >>> df = pl.DataFrame(\n ... {\n ... \"a\": [2, 1, 3],\n ... \"b\": [1, 2, 3],\n ... \"c\": [1.0, 2.0, 3.0],\n ... }\n ... )\n >>> df.fold(lambda s1, s2: s1 + s2)\n shape: (3,)\n Series: 'a' [f64]\n [\n 4\n 5\n 9\n ]\n\n A horizontal minimum operation:\n\n >>> df = pl.DataFrame({\"a\": [2, 1, 3], \"b\": [1, 2, 3], \"c\": [1.0, 2.0, 3.0]})\n >>> df.fold(lambda s1, s2: s1.zip_with(s1 < s2, s2))\n shape: (3,)\n Series: 'a' [f64]\n [\n 1\n 1\n 3\n ]\n\n A horizontal string concattenation:\n\n >>> df = pl.DataFrame(\n ... {\n ... \"a\": [\"foo\", \"bar\", 2],\n ... \"b\": [1, 2, 3],\n ... \"c\": [1.0, 2.0, 3.0],\n ... }\n ... )\n >>> df.fold(lambda s1, s2: s1 + s2)\n shape: (3,)\n Series: 'a' [str]\n [\n \"foo11.0\"\n \"bar22.0\"\n null\n ]\n\n Parameters\n ----------\n operation\n function that takes two `Series` and returns a `Series`.\n\n \"\"\"\n acc = self.to_series(0)\n\n for i in range(1, self.width):\n acc = operation(acc, self.to_series(i))\n return acc\n\n def row(self, index: int) -> Tuple[Any]:\n \"\"\"\n Get a row as tuple.\n\n Parameters\n ----------\n index\n Row index.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.row(2)\n (3, 8, 'c')\n\n \"\"\"\n return self._df.row_tuple(index)\n\n def rows(self) -> List[Tuple]:\n \"\"\"\n Convert columnar data to rows as python tuples.\n \"\"\"\n return self._df.row_tuples()\n\n @overload\n def shrink_to_fit(self, in_place: Literal[False] = ...) -> \"DataFrame\":\n ...\n\n @overload\n def shrink_to_fit(self, in_place: Literal[True]) -> None:\n ...\n\n @overload\n def shrink_to_fit(self, in_place: bool) -> Optional[\"DataFrame\"]:\n ...\n\n def shrink_to_fit(self, in_place: bool = False) -> Optional[\"DataFrame\"]:\n \"\"\"\n Shrink memory usage of this DataFrame to fit the exact capacity needed to hold the data.\n \"\"\"\n if in_place:\n self._df.shrink_to_fit()\n return None\n else:\n df = self.clone()\n df._df.shrink_to_fit()\n return df\n\n def hash_rows(\n self, k0: int = 0, k1: int = 1, k2: int = 2, k3: int = 3\n ) -> \"pli.Series\":\n \"\"\"\n Hash and combine the rows in this DataFrame.\n\n Hash value is UInt64\n\n Parameters\n ----------\n k0\n seed parameter\n k1\n seed parameter\n k2\n seed parameter\n k3\n seed parameter\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\n ... \"foo\": [1, 2, 3],\n ... \"bar\": [6, 7, 8],\n ... \"ham\": [\"a\", \"b\", \"c\"],\n ... }\n ... )\n >>> df.hash(k0=42) # doctest: +SKIP\n shape: (3,)\n Series: '' [u64]\n [\n 1208206736888326229\n 8040480609798856146\n 18282897888575762835\n ]\n \"\"\"\n return pli.wrap_s(self._df.hash_rows(k0, k1, k2, k3))\n\n def interpolate(self) -> \"DataFrame\":\n \"\"\"\n Interpolate intermediate values. The interpolation method is linear.\n \"\"\"\n return self.select(pli.col(\"*\").interpolate())\n\n def is_empty(self) -> bool:\n \"\"\"\n Check if the dataframe is empty\n \"\"\"\n return self.height == 0\n\n\nclass DynamicGroupBy:\n \"\"\"\n A dynamic grouper. This has an `.agg` method which will allow you to run all polars expressions\n in a groupby context.\n \"\"\"\n\n def __init__(\n self,\n df: \"DataFrame\",\n time_column: str,\n every: str,\n period: Optional[str],\n offset: Optional[str],\n truncate: bool = True,\n include_boundaries: bool = True,\n closed: str = \"none\",\n by: Optional[Union[str, List[str], \"pli.Expr\", List[\"pli.Expr\"]]] = None,\n ):\n self.df = df\n self.time_column = time_column\n self.every = every\n self.period = period\n self.offset = offset\n self.truncate = truncate\n self.include_boundaries = include_boundaries\n self.closed = closed\n self.by = by\n\n def agg(\n self,\n column_to_agg: Union[\n List[Tuple[str, List[str]]],\n Dict[str, Union[str, List[str]]],\n List[\"pli.Expr\"],\n \"pli.Expr\",\n ],\n ) -> DataFrame:\n return (\n self.df.lazy()\n .groupby_dynamic(\n self.time_column,\n self.every,\n self.period,\n self.offset,\n self.truncate,\n self.include_boundaries,\n self.closed,\n self.by,\n )\n .agg(column_to_agg) # type: ignore[arg-type]\n .collect(no_optimization=True, string_cache=False)\n )\n\n\nclass GroupBy:\n \"\"\"\n Starts a new GroupBy operation.\n\n You can also loop over this Object to loop over `DataFrames` with unique groups.\n\n Examples\n --------\n\n >>> df = pl.DataFrame({\"foo\": [\"a\", \"a\", \"b\"], \"bar\": [1, 2, 3]})\n >>> for group in df.groupby(\"foo\"):\n ... print(group)\n ... # doctest: +IGNORE_RESULT\n ...\n shape: (2, 2)\n ┌─────┬─────┐\n │ foo ┆ bar │\n │ --- ┆ --- │\n │ str ┆ i64 │\n ╞═════╪═════╡\n │ a ┆ 1 │\n ├╌╌╌╌╌┼╌╌╌╌╌┤\n │ a ┆ 2 │\n └─────┴─────┘\n shape: (1, 2)\n ┌─────┬─────┐\n │ foo ┆ bar │\n │ --- ┆ --- │\n │ str ┆ i64 │\n ╞═════╪═════╡\n │ b ┆ 3 │\n └─────┴─────┘\n\n \"\"\"\n\n def __init__(\n self,\n df: \"PyDataFrame\",\n by: Union[str, List[str]],\n maintain_order: bool = False,\n ):\n self._df = df\n self.by = by\n self.maintain_order = maintain_order\n\n def __getitem__(self, item: Any) -> \"GBSelection\":\n return self._select(item)\n\n def _select(self, columns: Union[str, List[str]]) -> \"GBSelection\":\n \"\"\"\n Select the columns that will be aggregated.\n\n Parameters\n ----------\n columns\n One or multiple columns.\n \"\"\"\n if isinstance(columns, str):\n columns = [columns]\n return GBSelection(self._df, self.by, columns)\n\n def __iter__(self) -> Iterable[Any]:\n groups_df = self.groups()\n groups = groups_df[\"groups\"]\n df = wrap_df(self._df)\n for i in range(groups_df.height):\n yield df[groups[i]]\n\n def get_group(self, group_value: Union[Any, Tuple[Any]]) -> DataFrame:\n \"\"\"\n Select a single group as a new DataFrame.\n\n Parameters\n ----------\n group_value\n Group to select.\n \"\"\"\n groups_df = self.groups()\n groups = groups_df[\"groups\"]\n\n if not isinstance(group_value, list):\n group_value = [group_value]\n\n by = self.by\n if not isinstance(by, list):\n by = [by]\n\n mask = None\n for column, group_val in zip(by, group_value):\n local_mask = groups_df[column] == group_val\n if mask is None:\n mask = local_mask\n else:\n mask = mask & local_mask\n\n # should be only one match\n try:\n groups_idx = groups[mask][0] # type: ignore\n except IndexError:\n raise ValueError(f\"no group: {group_value} found\")\n\n df = wrap_df(self._df)\n return df[groups_idx]\n\n def groups(self) -> DataFrame:\n \"\"\"\n Return a `DataFrame` with:\n\n * the groupby keys\n * the group indexes aggregated as lists\n \"\"\"\n return wrap_df(self._df.groupby(self.by, None, \"groups\"))\n\n def apply(self, f: Callable[[DataFrame], DataFrame]) -> DataFrame:\n \"\"\"\n Apply a function over the groups as a sub-DataFrame.\n\n Beware, this is slow.\n\n Parameters\n ----------\n f\n Custom function.\n\n Returns\n -------\n DataFrame\n \"\"\"\n return wrap_df(self._df.groupby_apply(self.by, f))\n\n def agg(\n self,\n column_to_agg: Union[\n List[Tuple[str, List[str]]],\n Dict[str, Union[str, List[str]]],\n List[\"pli.Expr\"],\n \"pli.Expr\",\n ],\n ) -> DataFrame:\n \"\"\"\n Use multiple aggregations on columns. This can be combined with complete lazy API\n and is considered idiomatic polars.\n\n Parameters\n ----------\n column_to_agg\n map column to aggregation functions.\n\n Use lazy API syntax (recommended)\n\n >>> [pl.col(\"foo\").sum(), pl.col(\"bar\").min()] # doctest: +SKIP\n\n >>> [\n ... (\"foo\", [\"sum\", \"n_unique\", \"min\"]),\n ... (\"bar\", [\"max\"]),\n ... ] # doctest: +SKIP\n\n Column name to aggregation with dict:\n >>> {\"foo\": [\"sum\", \"n_unique\", \"min\"], \"bar\": \"max\"} # doctest: +SKIP\n\n Returns\n -------\n Result of groupby split apply operations.\n\n\n Examples\n --------\n\n Use lazy API:\n\n >>> df.groupby([\"foo\", \"bar\"]).agg(\n ... [\n ... pl.sum(\"ham\"),\n ... pl.col(\"spam\").tail(4).sum(),\n ... ]\n ... ) # doctest: +SKIP\n\n Use a dict:\n\n >>> df.groupby([\"foo\", \"bar\"]).agg(\n ... {\n ... \"spam\": [\"sum\", \"min\"],\n ... }\n ... ) # doctest: +SKIP\n shape: (3, 2)\n ┌─────┬─────┐\n │ foo ┆ bar │\n │ --- ┆ --- │\n │ str ┆ i64 │\n ╞═════╪═════╡\n │ a ┆ 1 │\n ├╌╌╌╌╌┼╌╌╌╌╌┤\n │ a ┆ 2 │\n ├╌╌╌╌╌┼╌╌╌╌╌┤\n │ b ┆ 3 │\n └─────┴─────┘\n\n \"\"\"\n\n # a single list comprehension would be cleaner, but mypy complains on different\n # lines for py3.7 vs py3.10 about typing errors, so this is the same logic,\n # but broken down into two small functions\n def _str_to_list(y: Any) -> Any:\n return [y] if isinstance(y, str) else y\n\n def _wrangle(x: Any) -> list:\n return [(xi[0], _str_to_list(xi[1])) for xi in x]\n\n if isinstance(column_to_agg, pli.Expr):\n column_to_agg = [column_to_agg]\n if isinstance(column_to_agg, dict):\n column_to_agg = _wrangle(column_to_agg.items())\n elif isinstance(column_to_agg, list):\n\n if isinstance(column_to_agg[0], tuple):\n column_to_agg = _wrangle(column_to_agg)\n\n elif isinstance(column_to_agg[0], pli.Expr):\n return (\n wrap_df(self._df)\n .lazy()\n .groupby(self.by, maintain_order=self.maintain_order)\n .agg(column_to_agg) # type: ignore[arg-type]\n .collect(no_optimization=True, string_cache=False)\n )\n\n pass\n else:\n raise ValueError(\n f\"argument: {column_to_agg} not understood, have you passed a list of expressions?\"\n )\n else:\n raise ValueError(\n f\"argument: {column_to_agg} not understood, have you passed a list of expressions?\"\n )\n\n return wrap_df(self._df.groupby_agg(self.by, column_to_agg))\n\n def head(self, n: int = 5) -> DataFrame:\n \"\"\"\n Return first n rows of each group.\n\n Parameters\n ----------\n n\n Number of values of the group to select\n\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"letters\": [\"c\", \"c\", \"a\", \"c\", \"a\", \"b\"],\n ... \"nrs\": [1, 2, 3, 4, 5, 6],\n ... }\n ... )\n >>> df\n shape: (6, 2)\n ┌─────────┬─────┐\n │ letters ┆ nrs │\n │ --- ┆ --- │\n │ str ┆ i64 │\n ╞═════════╪═════╡\n │ c ┆ 1 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ c ┆ 2 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ a ┆ 3 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ c ┆ 4 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ a ┆ 5 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ b ┆ 6 │\n └─────────┴─────┘\n >>> df.groupby(\"letters\").head(2).sort(\"letters\")\n shape: (5, 2)\n ┌─────────┬─────┐\n │ letters ┆ nrs │\n │ --- ┆ --- │\n │ str ┆ i64 │\n ╞═════════╪═════╡\n │ a ┆ 3 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ a ┆ 5 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ b ┆ 6 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ c ┆ 1 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ c ┆ 2 │\n └─────────┴─────┘\n\n \"\"\"\n return (\n wrap_df(self._df)\n .lazy()\n .groupby(self.by, self.maintain_order)\n .head(n)\n .collect(no_optimization=True, string_cache=False)\n )\n\n def tail(self, n: int = 5) -> DataFrame:\n \"\"\"\n Return last n rows of each group.\n\n Parameters\n ----------\n n\n Number of values of the group to select\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"letters\": [\"c\", \"c\", \"a\", \"c\", \"a\", \"b\"],\n ... \"nrs\": [1, 2, 3, 4, 5, 6],\n ... }\n ... )\n >>> df\n shape: (6, 2)\n ┌─────────┬─────┐\n │ letters ┆ nrs │\n │ --- ┆ --- │\n │ str ┆ i64 │\n ╞═════════╪═════╡\n │ c ┆ 1 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ c ┆ 2 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ a ┆ 3 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ c ┆ 4 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ a ┆ 5 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ b ┆ 6 │\n └─────────┴─────┘\n >>> (df.groupby(\"letters\").tail(2).sort(\"letters\"))\n shape: (5, 2)\n ┌─────────┬─────┐\n │ letters ┆ nrs │\n │ --- ┆ --- │\n │ str ┆ i64 │\n ╞═════════╪═════╡\n │ a ┆ 3 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ a ┆ 5 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ b ┆ 6 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ c ┆ 2 │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤\n │ c ┆ 4 │\n └─────────┴─────┘\n\n \"\"\"\n return (\n wrap_df(self._df)\n .lazy()\n .groupby(self.by, self.maintain_order)\n .tail(n)\n .collect(no_optimization=True, string_cache=False)\n )\n\n def _select_all(self) -> \"GBSelection\":\n \"\"\"\n Select all columns for aggregation.\n \"\"\"\n return GBSelection(self._df, self.by, None)\n\n def pivot(self, pivot_column: str, values_column: str) -> \"PivotOps\":\n \"\"\"\n Do a pivot operation based on the group key, a pivot column and an aggregation function on the values column.\n\n Parameters\n ----------\n pivot_column\n Column to pivot.\n values_column\n Column that will be aggregated.\n \"\"\"\n return PivotOps(self._df, self.by, pivot_column, values_column)\n\n def first(self) -> DataFrame:\n \"\"\"\n Aggregate the first values in the group.\n \"\"\"\n return self.agg(pli.all().first())\n\n def last(self) -> DataFrame:\n \"\"\"\n Aggregate the last values in the group.\n \"\"\"\n return self.agg(pli.all().last())\n\n def sum(self) -> DataFrame:\n \"\"\"\n Reduce the groups to the sum.\n \"\"\"\n return self.agg(pli.all().sum())\n\n def min(self) -> DataFrame:\n \"\"\"\n Reduce the groups to the minimal value.\n \"\"\"\n return self.agg(pli.all().min())\n\n def max(self) -> DataFrame:\n \"\"\"\n Reduce the groups to the maximal value.\n \"\"\"\n return self.agg(pli.all().max())\n\n def count(self) -> DataFrame:\n \"\"\"\n Count the number of values in each group.\n \"\"\"\n return self.agg(pli.all().count())\n\n def mean(self) -> DataFrame:\n \"\"\"\n Reduce the groups to the mean values.\n \"\"\"\n return self.agg(pli.all().mean())\n\n def n_unique(self) -> DataFrame:\n \"\"\"\n Count the unique values per group.\n \"\"\"\n return self.agg(pli.all().n_unique())\n\n def quantile(self, quantile: float, interpolation: str = \"nearest\") -> DataFrame:\n \"\"\"\n Compute the quantile per group.\n\n Parameters\n ----------\n quantile\n quantile between 0.0 and 1.0\n\n interpolation\n interpolation type, options: ['nearest', 'higher', 'lower', 'midpoint', 'linear']\n\n \"\"\"\n return self.agg(pli.all().quantile(quantile, interpolation))\n\n def median(self) -> DataFrame:\n \"\"\"\n Return the median per group.\n \"\"\"\n return self.agg(pli.all().median())\n\n def agg_list(self) -> DataFrame:\n \"\"\"\n Aggregate the groups into Series.\n \"\"\"\n return self.agg(pli.all().list())\n\n\nclass PivotOps:\n \"\"\"\n Utility class returned in a pivot operation.\n \"\"\"\n\n def __init__(\n self,\n df: DataFrame,\n by: Union[str, List[str]],\n pivot_column: str,\n values_column: str,\n ):\n self._df = df\n self.by = by\n self.pivot_column = pivot_column\n self.values_column = values_column\n\n def first(self) -> DataFrame:\n \"\"\"\n Get the first value per group.\n \"\"\"\n return wrap_df(\n self._df.pivot(self.by, self.pivot_column, self.values_column, \"first\")\n )\n\n def sum(self) -> DataFrame:\n \"\"\"\n Get the sum per group.\n \"\"\"\n return wrap_df(\n self._df.pivot(self.by, self.pivot_column, self.values_column, \"sum\")\n )\n\n def min(self) -> DataFrame:\n \"\"\"\n Get the minimal value per group.\n \"\"\"\n return wrap_df(\n self._df.pivot(self.by, self.pivot_column, self.values_column, \"min\")\n )\n\n def max(self) -> DataFrame:\n \"\"\"\n Get the maximal value per group.\n \"\"\"\n return wrap_df(\n self._df.pivot(self.by, self.pivot_column, self.values_column, \"max\")\n )\n\n def mean(self) -> DataFrame:\n \"\"\"\n Get the mean value per group.\n \"\"\"\n return wrap_df(\n self._df.pivot(self.by, self.pivot_column, self.values_column, \"mean\")\n )\n\n def count(self) -> DataFrame:\n \"\"\"\n Count the values per group.\n \"\"\"\n return wrap_df(\n self._df.pivot(self.by, self.pivot_column, self.values_column, \"count\")\n )\n\n def median(self) -> DataFrame:\n \"\"\"\n Get the median value per group.\n \"\"\"\n return wrap_df(\n self._df.pivot(self.by, self.pivot_column, self.values_column, \"median\")\n )\n\n\nclass GBSelection:\n \"\"\"\n Utility class returned in a groupby operation.\n \"\"\"\n\n def __init__(\n self,\n df: \"PyDataFrame\",\n by: Union[str, List[str]],\n selection: Optional[List[str]],\n ):\n self._df = df\n self.by = by\n self.selection = selection\n\n def first(self) -> DataFrame:\n \"\"\"\n Aggregate the first values in the group.\n \"\"\"\n return wrap_df(self._df.groupby(self.by, self.selection, \"first\"))\n\n def last(self) -> DataFrame:\n \"\"\"\n Aggregate the last values in the group.\n \"\"\"\n return wrap_df(self._df.groupby(self.by, self.selection, \"last\"))\n\n def sum(self) -> DataFrame:\n \"\"\"\n Reduce the groups to the sum.\n \"\"\"\n return wrap_df(self._df.groupby(self.by, self.selection, \"sum\"))\n\n def min(self) -> DataFrame:\n \"\"\"\n Reduce the groups to the minimal value.\n \"\"\"\n return wrap_df(self._df.groupby(self.by, self.selection, \"min\"))\n\n def max(self) -> DataFrame:\n \"\"\"\n Reduce the groups to the maximal value.\n \"\"\"\n return wrap_df(self._df.groupby(self.by, self.selection, \"max\"))\n\n def count(self) -> DataFrame:\n \"\"\"\n Count the number of values in each group.\n \"\"\"\n return wrap_df(self._df.groupby(self.by, self.selection, \"count\"))\n\n def mean(self) -> DataFrame:\n \"\"\"\n Reduce the groups to the mean values.\n \"\"\"\n return wrap_df(self._df.groupby(self.by, self.selection, \"mean\"))\n\n def n_unique(self) -> DataFrame:\n \"\"\"\n Count the unique values per group.\n \"\"\"\n return wrap_df(self._df.groupby(self.by, self.selection, \"n_unique\"))\n\n def quantile(self, quantile: float, interpolation: str = \"nearest\") -> DataFrame:\n \"\"\"\n Compute the quantile per group.\n\n Parameters\n ----------\n quantile\n quantile between 0.0 and 1.0\n\n interpolation\n interpolation type, options: ['nearest', 'higher', 'lower', 'midpoint', 'linear']\n\n \"\"\"\n return wrap_df(\n self._df.groupby_quantile(self.by, self.selection, quantile, interpolation)\n )\n\n def median(self) -> DataFrame:\n \"\"\"\n Return the median per group.\n \"\"\"\n return wrap_df(self._df.groupby(self.by, self.selection, \"median\"))\n\n def agg_list(self) -> DataFrame:\n \"\"\"\n Aggregate the groups into Series.\n \"\"\"\n return wrap_df(self._df.groupby(self.by, self.selection, \"agg_list\"))\n\n def apply(\n self,\n func: Callable[[Any], Any],\n return_dtype: Optional[Type[DataType]] = None,\n ) -> DataFrame:\n \"\"\"\n Apply a function over the groups.\n \"\"\"\n df = self.agg_list()\n if self.selection is None:\n raise TypeError(\n \"apply not available for Groupby.select_all(). Use select() instead.\"\n )\n for name in self.selection:\n s = df.drop_in_place(name + \"_agg_list\").apply(func, return_dtype)\n s.rename(name, in_place=True)\n df[name] = s\n\n return df\n"
] |
[
[
"numpy.array"
]
] |
SkafteNicki/Deep_LMNN
|
[
"e70b495befdc6f6f1b24029e470c42a3b3821a93"
] |
[
"dlmnn/helper/layers.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 19 02:40:18 2018\n\n@author: nsde\n\"\"\"\n\n#%% easy access to keras layers\nfrom tensorflow.python.keras.layers import Conv2D\nfrom tensorflow.python.keras.layers import MaxPool2D\nfrom tensorflow.python.keras.layers import Dense \nfrom tensorflow.python.keras.layers import InputLayer\nfrom tensorflow.python.keras.layers import Flatten\nfrom tensorflow.python.keras.layers import LeakyReLU\n\nfrom tensorflow.python.keras._impl.keras.layers.core import Layer as _Layer\nfrom tensorflow.python.keras import backend as _K\n\n#%%\nclass layerlist(object):\n ''' Utility structure for holding layers. Layers are added to the structure\n using the add method, and then using the get_layer method will always\n return a new copy of the layer '''\n def __init__(self):\n self.layers = [ ]\n self.layers_args = [ ]\n \n def add(self, layer, **kwargs):\n self.layers.append(layer)\n self.layers_args.append(kwargs)\n \n def get_layer(self, index):\n return self.layers[index](**self.layers_args[index])\n \n @property\n def n_layers(self):\n return len(self.layers)\n\n\n#%%\nclass L2normalize(_Layer):\n def __init__(self, **kwargs):\n super(L2normalize, self).__init__(**kwargs)\n\n def build(self, input_shape):\n super(L2normalize, self).build(input_shape)\n\n def call(self, X):\n return _K.l2_normalize(X, axis=1)\n \n def compute_output_shape(self, input_shape):\n return (None, input_shape[1])\n \n#%%\nif __name__ == '__main__':\n from tensorflow.python.keras import Sequential\n s = Sequential()\n s.add(InputLayer(input_shape=(30, 30, 3)))\n s.add(Conv2D(16, 3))\n s.add(MaxPool2D((2,2)))\n s.add(Flatten())\n s.add(Dense(32))\n s.add(LeakyReLU(alpha=0.3))\n s.add(L2normalize())"
] |
[
[
"tensorflow.python.keras.layers.Flatten",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.keras.layers.LeakyReLU",
"tensorflow.python.keras.Sequential",
"tensorflow.python.keras.layers.MaxPool2D",
"tensorflow.python.keras.layers.Conv2D",
"tensorflow.python.keras.backend.l2_normalize",
"tensorflow.python.keras.layers.InputLayer"
]
] |
yardencsGitHub/tweetynet
|
[
"281f8876726359a298a2c387c7b4c2e40ac61c91",
"281f8876726359a298a2c387c7b4c2e40ac61c91"
] |
[
"article/src/scripts/Canaries/eval_without_and_with_output_transforms.py",
"article/src/scripts/Bengalese_Finches/behavior/branch_point_stats_source_data.py"
] |
[
"# This script is identical to the on for BFSongRepository but with canaries\nfrom collections import defaultdict\nimport json\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom tqdm import tqdm\n\nimport vak.device\nimport vak.files\nfrom vak.labeled_timebins import lbl_tb2segments, majority_vote_transform, lbl_tb_segment_inds_list, \\\n remove_short_segments\nfrom vak import config, io, models, transforms\nfrom vak.datasets.vocal_dataset import VocalDataset\n\n\ndef compute_metrics(metrics, y_true, y_pred, y_true_labels, y_pred_labels):\n \"\"\"helper function to compute metrics\n\n Parameters\n ----------\n metrics : dict\n where keys are metric names and values are callables that compute the metric\n given ground truth and prediction\n y_true : torch.Tensor\n vector of labeled time bins\n y_pred : torch.Tensor\n vector of labeled time bins\n y_true_labels : str\n sequence of segment labels\n y_pred_labels : str\n sequence of segment labels\n\n Returns\n -------\n metric_vals : defaultdict\n \"\"\"\n metric_vals = {}\n\n for metric_name, metric_callable in metrics.items():\n if metric_name == 'acc':\n metric_vals[metric_name] = metric_callable(y_pred, y_true)\n elif metric_name == 'levenshtein':\n metric_vals[metric_name] = metric_callable(y_pred_labels, y_true_labels)\n elif metric_name == 'segment_error_rate':\n metric_vals[metric_name] = metric_callable(y_pred_labels, y_true_labels)\n\n return metric_vals\n\n\nALPHANUMERIC = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'\n\n\ndef remap(labelmap):\n \"\"\"map integer labels to alphanumeric characters so we can compute edit distance metrics.\n The mapping can be arbitrary as long as it is constant across all times we compute the metric.\n \"\"\"\n return {ALPHANUMERIC[ind]: val for ind, (key, val) in enumerate(labelmap.items())}\n\n\ndef map_number_labels_to_alphanumeric(labelvec):\n \"\"\"Take a vector of 'str' labels that are all numbers and replace them with a string of characters \n \"\"\"\n return ''.join([ALPHANUMERIC[int(x)] for x in labelvec])\n\n\ndef metrics_df_from_toml_path(toml_path,\n min_segment_dur,\n device='cuda',\n spect_key='s',\n timebins_key='t'):\n \"\"\"computes evaluation metrics on a dataset from a config.toml file\n\n computes the metrics without and with transforms used for prediction\n\n Parameters\n ----------\n toml_path\n min_segment_dur\n device\n spect_key\n timebins_key\n\n Returns\n -------\n df : pandas.Dataframe\n \"\"\"\n\n toml_path = Path(toml_path)\n cfg = config.parse.from_toml(toml_path)\n # spect_standardizer = joblib.load(cfg.eval.spect_scaler_path)\n with cfg.eval.labelmap_path.open('r') as f:\n labelmap = json.load(f)\n\n model_config_map = config.models.map_from_path(toml_path, cfg.eval.models)\n\n # ---- make eval dataset that we'll use to compute metrics\n # each batch will give us dict with 'spect', 'annot' and 'spect_path'\n # we can use 'spect_path' to find prediction in pred_dict and then compare to target\n # dict also includes 'padding_mask' so we can \"unpad\" the prediction vectors\n item_transform = transforms.get_defaults('eval',\n spect_standardizer=None,\n window_size=cfg.dataloader.window_size,\n return_padding_mask=True,\n )\n\n eval_dataset = VocalDataset.from_csv(csv_path=cfg.eval.csv_path,\n split='test',\n labelmap=labelmap,\n spect_key=spect_key,\n timebins_key=timebins_key,\n item_transform=item_transform,\n )\n\n eval_data = torch.utils.data.DataLoader(dataset=eval_dataset,\n shuffle=False,\n # batch size 1 because each spectrogram reshaped into a batch of windows\n batch_size=1,\n num_workers=cfg.eval.num_workers)\n\n # get timebin dur to use when converting labeled timebins to labels, onsets and offsets\n timebin_dur = io.dataframe.validate_and_get_timebin_dur(\n pd.read_csv(cfg.eval.csv_path)\n )\n\n input_shape = eval_dataset.shape\n # if dataset returns spectrogram reshaped into windows,\n # throw out the window dimension; just want to tell network (channels, height, width) shape\n if len(input_shape) == 4:\n input_shape = input_shape[1:]\n\n models_map = models.from_model_config_map(\n model_config_map,\n num_classes=len(labelmap),\n input_shape=input_shape\n )\n\n if device is None:\n device = vak.device.get_default_device()\n\n records = defaultdict(list) # will be used with pandas.DataFrame.from_records to make output csv\n to_long_tensor = transforms.ToLongTensor()\n\n for model_name, model in models_map.items():\n model.load(cfg.eval.checkpoint_path)\n metrics = model.metrics # metric name -> callable map we use below in loop\n\n pred_dict = model.predict(pred_data=eval_data,\n device=device)\n\n error_position_distribution = [] # will accumulate error time differences from syllable edges\n num_err_bin = [] # will accumulate total number of error frames for normalization \n\n progress_bar = tqdm(eval_data)\n for ind, batch in enumerate(progress_bar):\n y_true, padding_mask, spect_path = batch['annot'], batch['padding_mask'], batch['spect_path']\n # need to convert spect_path to tuple for match in call to index() below\n spect_path = tuple(spect_path)\n records['spect_path'].append(spect_path[0]) # remove str from tuple\n y_true = y_true.to(device)\n y_true_np = np.squeeze(y_true.cpu().numpy())\n t_vec = vak.files.spect.load(spect_path[0])['t']\n y_true_labels, t_ons_s, t_offs_s = lbl_tb2segments(y_true_np,\n labelmap,\n t_vec)\n y_true_labels = map_number_labels_to_alphanumeric(y_true_labels)\n y_pred_ind = spect_path[0] # pred_dict['y'].index(spect_path)\n y_pred = pred_dict[y_pred_ind] # pred_dict['y_pred'][y_pred_ind]\n y_pred = torch.argmax(y_pred, dim=1) # assumes class dimension is 1\n y_pred = torch.flatten(y_pred)\n y_pred = y_pred.unsqueeze(0)[padding_mask]\n y_pred_np = np.squeeze(y_pred.cpu().numpy())\n y_pred_labels, _, _ = lbl_tb2segments(y_pred_np,\n labelmap,\n t_vec,\n min_segment_dur=None,\n majority_vote=False)\n y_pred_labels = map_number_labels_to_alphanumeric(y_pred_labels)\n\n metric_vals_batch = compute_metrics(metrics, y_true, y_pred, y_true_labels, y_pred_labels)\n for metric_name, metric_val in metric_vals_batch.items():\n records[metric_name].append(metric_val)\n\n # --- apply majority vote and min segment dur transforms separately\n # need segment_inds_list for both transforms\n segment_inds_list = lbl_tb_segment_inds_list(y_pred_np,\n unlabeled_label=labelmap['unlabeled'])\n\n # ---- majority vote transform\n y_pred_np_mv = majority_vote_transform(y_pred_np, segment_inds_list)\n y_pred_mv = to_long_tensor(y_pred_np_mv).to(device)\n y_pred_mv_labels, _, _ = lbl_tb2segments(y_pred_np_mv,\n labelmap,\n t_vec,\n min_segment_dur=None,\n majority_vote=False)\n y_pred_mv_labels = map_number_labels_to_alphanumeric(y_pred_mv_labels)\n\n metric_vals_batch_mv = compute_metrics(metrics, y_true, y_pred_mv,\n y_true_labels, y_pred_mv_labels)\n for metric_name, metric_val in metric_vals_batch_mv.items():\n records[f'{metric_name}_majority_vote'].append(metric_val)\n\n # ---- min segment dur transform\n y_pred_np_mindur, _ = remove_short_segments(y_pred_np,\n segment_inds_list,\n timebin_dur=timebin_dur,\n min_segment_dur=min_segment_dur,\n unlabeled_label=labelmap['unlabeled'])\n y_pred_mindur = to_long_tensor(y_pred_np_mindur).to(device)\n y_pred_mindur_labels, _, _ = lbl_tb2segments(y_pred_np_mindur,\n labelmap,\n t_vec,\n min_segment_dur=None,\n majority_vote=False)\n y_pred_mindur_labels = map_number_labels_to_alphanumeric(y_pred_mindur_labels)\n\n metric_vals_batch_mindur = compute_metrics(metrics, y_true, y_pred_mindur,\n y_true_labels, y_pred_mindur_labels)\n for metric_name, metric_val in metric_vals_batch_mindur.items():\n records[f'{metric_name}_min_segment_dur'].append(metric_val)\n\n # ---- and finally both transforms, in same order we apply for prediction\n y_pred_np_mindur_mv, segment_inds_list = remove_short_segments(y_pred_np,\n segment_inds_list,\n timebin_dur=timebin_dur,\n min_segment_dur=min_segment_dur,\n unlabeled_label=labelmap[\n 'unlabeled'])\n y_pred_np_mindur_mv = majority_vote_transform(y_pred_np_mindur_mv,\n segment_inds_list)\n y_pred_mindur_mv = to_long_tensor(y_pred_np_mindur_mv).to(device)\n y_pred_mindur_mv_labels, _, _ = lbl_tb2segments(y_pred_np_mindur_mv,\n labelmap,\n t_vec,\n min_segment_dur=None,\n majority_vote=False)\n\n y_pred_mindur_mv_labels = map_number_labels_to_alphanumeric(y_pred_mindur_mv_labels)\n\n metric_vals_batch_mindur_mv = compute_metrics(metrics, y_true, y_pred_mindur_mv,\n y_true_labels, y_pred_mindur_mv_labels)\n for metric_name, metric_val in metric_vals_batch_mindur_mv.items():\n records[f'{metric_name}_min_dur_maj_vote'].append(metric_val)\n\n # ---- accumulate error distances from true segment edges\n num_err_bin.append(sum(y_true_np - y_pred_np_mindur_mv != 0))\n err = (y_true_np - y_pred_np_mindur_mv != 0) & ((y_true_np == 0) | (y_pred_np_mindur_mv == 0))\n error_position_distribution.append(\n [min(np.abs(np.concatenate((t_ons_s, t_offs_s)) - tm)) for tm in t_vec[err == True]])\n\n error_position_distribution = np.concatenate(error_position_distribution)\n\n df = pd.DataFrame.from_records(records)\n t1 = t_vec[1]\n return df, error_position_distribution, num_err_bin, t1\n\n\nCONFIG_ROOT = Path('src\\\\configs\\\\Canaries')\nBIRD_ID_MIN_SEGMENT_DUR_MAP = {'llb3': 0.005,\n 'llb11': 0.005,\n 'llb16': 0.005}\n\n\ndef main():\n plt.figure()\n err_stat = []\n for bird_id, min_segment_dur in BIRD_ID_MIN_SEGMENT_DUR_MAP.items():\n toml_root = CONFIG_ROOT.joinpath(bird_id)\n eval_toml_paths = sorted(toml_root.glob('**/*eval*toml'))\n\n all_dfs = []\n for eval_toml_path in eval_toml_paths:\n print(f'computing metrics from dataset in .toml file: {eval_toml_path.name}')\n toml_df, error_dist, num_err_bin, t1 = metrics_df_from_toml_path(eval_toml_path, min_segment_dur)\n all_dfs.append(toml_df)\n bins = np.histogram(error_dist, bins=np.arange(0.0, 1.0, t1))\n plt.plot(bins[0])\n err_stat.append(sum(bins[0][:2]) / sum(num_err_bin))\n\n output_df = pd.concat(all_dfs)\n output_df['bird_id'] = bird_id\n print(f\"adding 'bird_id' to concatenated data frames: {bird_id}\")\n csv_fname = f'{bird_id}.metrics.csv'\n csv_path = Path('results\\\\Canaries').joinpath(csv_fname)\n print(f'saving csv as: {csv_path}')\n output_df.to_csv(csv_path, index=False)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"#!/usr/bin/env python\n# coding: utf-8\nfrom collections import defaultdict\nimport json\nfrom pathlib import Path\n\nimport crowsetta\nimport numpy as np\nimport pandas as pd\nimport pyprojroot\nimport toml\nfrom tqdm import tqdm\nimport vak\n\nimport article\n\n\n# we record ground truth and prediction for all of these\n# selected using the notebook 'branch-point-inspection.ipynb'\nBRANCH_POINTS = {\n 'bl26lb16': [('b', 'b'), ('b', 'c')],\n 'gr41rd51': [('e', 'f'), ('e', 'i')],\n 'gy6or6': [('e', 'e'), ('e', 'f')],\n 'or60yw70': [('d', 'e'), ('d', 'g')],\n}\n\n# ---- 1. get transition matrices for ground truth data\nbehav_configs_root = pyprojroot.here() / 'data/configs/Bengalese_Finches/behavior'\n\nanimal_ID_roots = sorted(\n [subdir\n for subdir in behav_configs_root.iterdir()\n if subdir.is_dir()]\n)\nanimal_day_config_map = {}\nfor animal_ID_root in animal_ID_roots:\n animal_day_config_map[animal_ID_root.name] = sorted([subdir\n for subdir in animal_ID_root.iterdir()\n if subdir.is_dir()])\n\nanimal_day_config_map = {\n animal_ID: {\n day_dir.name: sorted(day_dir.glob('*.toml'))[0] for day_dir in day_dirs\n }\n for animal_ID, day_dirs in animal_day_config_map.items()\n}\n\nprint('getting ground truth transition matrices')\nanimal_day_transmats = {}\nfor animal_id, day_config_map in tqdm(animal_day_config_map.items()):\n day_transmats = {}\n for day, config_path in day_config_map.items():\n with config_path.open('r') as fp:\n config = toml.load(fp)\n prep_csv_path = pyprojroot.here() / config['EVAL']['csv_path']\n df = pd.read_csv(prep_csv_path)\n annots = vak.annotation.from_df(df)\n day_transmats[day] = article.bfbehav.sequence.transmat_from_annots(annots, thresh=0.002)\n animal_day_transmats[animal_id] = day_transmats\n\n# ---- 2. get transition matrices from predictions made by each model\n# (including the multiple training replicates)\nbehav_results_annot_root = pyprojroot.here() / 'results/Bengalese_Finches/behavior/annotations'\n\nCLEANUP = 'min_segment_dur_majority_vote'\n\nscribe = crowsetta.Transcriber(format='csv')\n\nprint('getting transition matrices from model predictions')\nanimal_day_pred_transmats = {}\nfor animal_id, day_config_map in tqdm(animal_day_config_map.items()):\n day_transmats = {}\n for day, _ in day_config_map.items():\n annot_csvs = sorted(behav_results_annot_root.glob(f'{animal_id}*{day}*{CLEANUP}.csv'))\n model_transmat_map = {}\n for replicate_num, annot_csv in enumerate(annot_csvs):\n annots = scribe.from_file(annot_csv)\n model_transmat_map[f'replicate {replicate_num}'] = article.bfbehav.sequence.transmat_from_annots(annots, thresh=0.002)\n day_transmats[day] = model_transmat_map\n animal_day_pred_transmats[animal_id] = day_transmats\n\n\n# ---- 3. make DataFrame of transition probabilities\n# from ground truth and predicted annotations\nrecords = [] # to make into DataFrame\n\nfor animal_id, transitions in BRANCH_POINTS.items():\n for transition in transitions:\n days = list(animal_day_transmats[animal_id].keys())\n for day in days:\n matrix = animal_day_transmats[animal_id][day].matrix\n states = animal_day_transmats[animal_id][day].states\n row_ind, col_ind = states.index(transition[0]), states.index(transition[1])\n trans_prob = matrix[row_ind, col_ind]\n records.append(\n {\n 'animal_id': animal_id,\n 'day': day,\n 'transition': transition,\n 'prob': trans_prob,\n 'source': 'ground_truth',\n }\n )\n\n for replicate_num_str, trans_mat_tuple in animal_day_pred_transmats[animal_id][day].items():\n matrix = trans_mat_tuple.matrix\n states = trans_mat_tuple.states\n row_ind, col_ind = states.index(transition[0]), states.index(transition[1])\n trans_prob = matrix[row_ind, col_ind]\n records.append(\n {\n 'animal_id': animal_id,\n 'day': day,\n 'transition': transition,\n 'prob': trans_prob,\n 'source': 'model',\n 'replicate_num': int(replicate_num_str.split()[-1]),\n }\n )\n\nprint('saving dataframe of transition probabilities for selected branch points')\ndf = pd.DataFrame.from_records(records)\n\nRESULTS_ROOT = pyprojroot.here() / 'results' / 'Bengalese_Finches'\nsource_data_csv_path = RESULTS_ROOT / 'behavior' / 'transition-probabilities.csv'\ndf.to_csv(source_data_csv_path)\n\nanimal_xyerr = {}\nprint('computing mean / std. dev. for predicted probabilities')\nfor animal_id, transitions in BRANCH_POINTS.items():\n\n for transition in transitions:\n animal_xyerr[(animal_id, transition)] = defaultdict(list)\n\n days = list(animal_day_transmats[animal_id].keys())\n for day in days:\n matrix = animal_day_transmats[animal_id][day].matrix\n states = animal_day_transmats[animal_id][day].states\n row_ind, col_ind = states.index(transition[0]), states.index(transition[1])\n trans_prob = matrix[row_ind, col_ind]\n\n animal_xyerr[(animal_id, transition)]['x'].append(trans_prob)\n\n y_vals = []\n for replicate_num_str, trans_mat_tuple in animal_day_pred_transmats[animal_id][day].items():\n matrix = trans_mat_tuple.matrix\n states = trans_mat_tuple.states\n row_ind, col_ind = states.index(transition[0]), states.index(transition[1])\n trans_prob = matrix[row_ind, col_ind]\n y_vals.append(trans_prob)\n\n animal_xyerr[(animal_id, transition)]['y'].append(np.mean(y_vals))\n animal_xyerr[(animal_id, transition)]['yerr'].append(np.std(y_vals))\n\n# keys can't be tuples when saving to .json, convert to strings\nanimal_xyerr = {str(k): v for k, v in animal_xyerr.items()}\n\nsource_data_json_path = RESULTS_ROOT / 'behavior' / 'transition-probabilities-x-y-plot.json'\nwith source_data_json_path.open('w') as fp:\n json.dump(animal_xyerr, fp)\n\n"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"numpy.arange",
"torch.argmax",
"torch.utils.data.DataLoader",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"torch.flatten",
"pandas.DataFrame.from_records",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"pandas.DataFrame.from_records",
"numpy.std",
"pandas.read_csv",
"numpy.mean"
]
] |
shingkid/data-mining-suicide-sg
|
[
"fc7dc746d9a4115c9d782222eec92a7d1e5e4a04"
] |
[
"scripts/crawl_reddit.py"
] |
[
"#!/usr/bin/env python\n\nimport argparse\nimport os\n# import pprint\nimport random\nimport socket\nimport sys\nimport time\n\nimport numpy as np\nimport pandas as pd\nimport praw\nfrom tqdm import tqdm\n\nfrom utility import create_project_dir, file_to_set, get_date\n\n\ndef receive_connection():\n \"\"\"Wait for and then return a connected socket..\n\n Opens a TCP connection on port 8080, and waits for a single client.\n\n \"\"\"\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server.bind(('localhost', 8080))\n server.listen(1)\n client = server.accept()[0]\n server.close()\n return client\n\n\ndef send_message(client, message):\n \"\"\"Send message to client and close the connection.\"\"\"\n print(message)\n client.send('HTTP/1.1 200 OK\\r\\n\\r\\n{}'.format(message).encode('utf-8'))\n client.close()\n\n\ndef obtain_token():\n \"\"\"Provide the program's entry point when directly executed.\"\"\"\n print('Go here while logged into the account you want to create a '\n 'token for: https://www.reddit.com/prefs/apps/')\n print('Click the create an app button. Put something in the name '\n 'field and select the script radio button.')\n print('Put http://localhost:8080 in the redirect uri field and '\n 'click create app')\n client_id = input('Enter the client ID, it\\'s the line just under '\n 'Personal use script at the top: ')\n client_secret = input('Enter the client secret, it\\'s the line next '\n 'to secret: ')\n commaScopes = input('Now enter a comma separated list of scopes, or '\n 'all for all tokens: ')\n\n if commaScopes.lower() == 'all':\n scopes = ['creddits', 'edit', 'flair', 'history', 'identity',\n 'modconfig', 'modcontributors', 'modflair', 'modlog',\n 'modothers', 'modposts', 'modself', 'modwiki',\n 'mysubreddits', 'privatemessages', 'read', 'report',\n 'save', 'submit', 'subscribe', 'vote', 'wikiedit',\n 'wikiread']\n else:\n scopes = commaScopes.strip().split(',')\n\n reddit = praw.Reddit(client_id=client_id.strip(),\n client_secret=client_secret.strip(),\n redirect_uri='http://localhost:8080',\n user_agent='praw_refresh_token_example')\n state = str(random.randint(0, 65000))\n url = reddit.auth.url(scopes, state, 'permanent')\n print('Now open this url in your browser: '+url)\n sys.stdout.flush()\n\n client = receive_connection()\n data = client.recv(1024).decode('utf-8')\n param_tokens = data.split(' ', 2)[1].split('?', 1)[1].split('&')\n params = {key: value for (key, value) in [token.split('=')\n for token in param_tokens]}\n\n if state != params['state']:\n send_message(client, 'State mismatch. Expected: {} Received: {}'\n .format(state, params['state']))\n elif 'error' in params:\n send_message(client, params['error'])\n\n refresh_token = reddit.auth.authorize(params['code'])\n send_message(client, 'Refresh token: {}'.format(refresh_token))\n\n return reddit\n\n\ndef crawl_submissions(reddit, words, subreddit_name):\n \"\"\"Crawls submissions from r/Singapore\n \n Args:\n reddit (Reddit): Reddit instance after authentication\n words (set): set of words from a vocabulary file\n subreddit_name (str): name of subreddit, e.g. Singapore\n\n Returns:\n submissions (DataFrame): DataFrame of crawled submissions\n \"\"\"\n print(\"Crawling submissions...\")\n t0 = time.time()\n subreddit = reddit.subreddit(subreddit_name)\n submissions = pd.DataFrame(columns=['title', 'score', 'id', 'url', 'comms_num', 'created', 'body', 'author_name', 'query'])\n for word in tqdm(words):\n subreddit_query = subreddit.search(word)\n\n if subreddit_query:\n topics_dict = {\n \"title\":[],\n \"score\":[],\n \"id\":[],\n \"url\":[],\n \"comms_num\": [],\n \"created\": [],\n \"body\": [],\n \"author_name\": [],\n# \"reports_num\":[],\n \"query\": []\n }\n\n for submission in subreddit_query:\n topics_dict[\"title\"].append(submission.title)\n topics_dict[\"score\"].append(submission.score)\n topics_dict[\"id\"].append(submission.id)\n topics_dict[\"url\"].append(submission.url)\n topics_dict[\"comms_num\"].append(submission.num_comments)\n topics_dict[\"created\"].append(get_date(submission.created))\n topics_dict[\"body\"].append(submission.selftext[:-3])\n topics_dict[\"author_name\"].append(submission.author.name)\n# topics_dict[\"reports_num\"].append(submission.num_reports)\n topics_dict[\"query\"].append(word)\n\n submissions = pd.concat([submissions, pd.DataFrame(topics_dict)])\n print(\"Seconds:\", time.time()-t0)\n\n submissions.drop_duplicates('id', inplace=True)\n submissions.reset_index(inplace=True)\n submissions.drop(columns=['index'], inplace=True)\n\n print(submissions)\n sub_path = os.path.join('../data', 'submissions.csv')\n if os.path.isfile(sub_path):\n replace = input(\"File already exists. Replace %s? [y/n]\" % sub_path)\n if replace=='y':\n print(\"Saving to %s\" % sub_path)\n submissions.to_csv(sub_path, index=False)\n else:\n print(\"Saving to %s\" % sub_path)\n submissions.to_csv(sub_path, index=False)\n\n return submissions\n\n\ndef remove_irrelevant_posts():\n \"\"\"Manually screen through every submission and determine its relevance\n \n Returns:\n df (DataFrame): DataFrame of submissions with new 'relevant' column\n \"\"\"\n filename = os.path.join('../data', 'submissions.csv')\n if not os.path.isfile(filename):\n print(\"Need to crawl for submissions first.\")\n exit()\n\n df = pd.read_csv(filename)\n print(\"No. of submissions:\", df.shape[0])\n for index, row in df.iterrows():\n print(index, 'Query:', row.query)\n print('Title:', row.title)\n keep = input(\"Keep submission [y/n] (Enter 'more' to see body): \")\n if keep=='more':\n print(row.body)\n keep = input('Keep submission [y/n]: ')\n \n if keep=='n':\n df.at[index, 'relevant'] = True\n else:\n df.at[index, 'relevant'] = False\n \n df = df.replace(np.nan, '', regex=True)\n print(df)\n path = os.path.join('../data', 'submissions-clean.csv')\n print(\"Saving to %s\" % path)\n df.to_csv(path, index=False)\n\n return df\n\n\ndef crawl_comments(reddit):\n \"\"\"Crawl comments on submissions labeled 'relevant'\n \n Args:\n reddit (Reddit): Reddit instance after authentication\n\n Returns:\n comments (DataFrame): DataFrame of crawled comments\n \"\"\"\n filename = os.path.join('../data', 'submissions-clean.csv')\n\n if not os.path.isfile(filename):\n print(\"You have not cleaned your submissions.\")\n exit()\n\n print(\"Crawling comments...\")\n t0 = time.time()\n df = pd.read_csv(filename)\n comments = []\n for index, row in df[df.relevant].iterrows():\n submission_id = row.id\n submission = praw.models.Submission(reddit, id=submission_id)\n print(index, \"Submission:\", row.id, \"Num comments:\", submission.num_comments)\n comment_forest = submission.comments\n submission.comments.replace_more(limit=None)\n for comment in comment_forest.list():\n # pprint.pprint(vars(comment))\n redditor_name = \"\" \n if comment.author:\n redditor_name = comment.author.name\n # pprint.pprint(vars(comment.author))\n comments.append([submission_id, comment.id, comment.body, comment.score, redditor_name, get_date(comment.created), comment.parent().id])\n print(\"Seconds:\", time.time()-t0)\n\n comments = pd.DataFrame(comments, columns=['submission_id', 'id', 'body', 'score', 'author_name', 'created', 'parent'])\n\n com_path = os.path.join('../data', 'comments.csv')\n if os.path.isfile(com_path):\n replace = input(\"File already exists. Replace %s? [y/n]\" % com_path)\n if replace=='y':\n print(\"Saving to %s\" % com_path)\n comments.to_csv(com_path, index=False)\n else:\n print(\"Saving to %s\" % com_path)\n comments.to_csv(com_path, index=False)\n\n return comments\n\n\ndef main():\n prog = \"crawl_reddit\"\n descr = \"Scrape and crawl r/Singapore\"\n parser = argparse.ArgumentParser(prog=prog, description=descr)\n parser.add_argument(\"--s\", help=\"Crawl submissions\", action=\"store_true\")\n parser.add_argument(\"--l\", help=\"Label submissions\", action=\"store_true\")\n parser.add_argument(\"--c\", help=\"Crawl comments\", action=\"store_true\")\n args = parser.parse_args()\n\n if args.s or args.c:\n reddit = obtain_token()\n\n if args.s:\n vocab_file_path = input('Enter path to vocabulary file: ')\n while not os.path.isfile(vocab_file_path):\n vocab_file_path = input('Invalid file. Enter path to vocabulary file: ')\n words = file_to_set(vocab_file_path)\n crawl_submissions(reddit, words, 'Singapore')\n\n if args.l:\n remove_irrelevant_posts()\n\n if args.c:\n crawl_comments(reddit)\n\n DATA_DIR = '../data'\n merged_path = os.path.join(DATA_DIR, 'merged.csv')\n sub_path = os.path.join(DATA_DIR, 'submissions-clean.csv')\n com_path = os.path.join(DATA_DIR, 'comments.csv')\n print(not os.path.isfile(merged_path), os.path.isfile(sub_path), os.path.isfile(com_path))\n if not os.path.isfile(merged_path) and os.path.isfile(sub_path) and os.path.isfile(com_path):\n submissions = pd.read_csv(sub_path)\n submissions = submissions[submissions.relevant]\n print(submissions)\n comments = pd.read_csv(com_path)\n comments.rename(columns={'body': 'content'}, inplace = True)\n print(comments)\n submissions['content'] = submissions.title.map(str) + ' ' + submissions.body.map(str)\n df = pd.concat([submissions, comments])\n df = df[['id', 'author_name', 'content']]\n print(df)\n print(\"Saving to %s\" % merged_path)\n df.to_csv(merged_path, index=False)\n\n\nif __name__ == \"__main__\":\n main()"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame"
]
] |
huiqun2001/caffe2
|
[
"97209961b675c8bea3831450ba46c9e8b7bad3de"
] |
[
"caffe2/python/operator_test/conv_test.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\n\nimport numpy as np\nfrom hypothesis import assume, given\nimport hypothesis.strategies as st\n\nfrom caffe2.proto import caffe2_pb2\nfrom caffe2.python import brew, core, workspace\nimport caffe2.python.hypothesis_test_util as hu\nfrom caffe2.python.model_helper import ModelHelper\n\n\ndef _cudnn_supports(\n dilation=False,\n nhwc=False,\n):\n \"\"\"Return True if cuDNN supports this configuration.\"\"\"\n v = workspace.GetCuDNNVersion()\n if dilation and v < 6000:\n # Dilation not supported until v6\n return False\n if dilation and nhwc:\n # Dilation and NHWC not supported together\n return False\n return True\n\n\nclass TestConvolution(hu.HypothesisTestCase):\n # CUDNN does NOT support different padding values and we skip it\n @given(op_type=st.sampled_from([\"Conv\", \"Conv2D\"]),\n stride_h=st.integers(1, 3),\n stride_w=st.integers(1, 3),\n pad_t=st.integers(0, 3),\n pad_l=st.integers(0, 3),\n pad_b=st.integers(0, 3),\n pad_r=st.integers(0, 3),\n kernel=st.integers(3, 5),\n size=st.integers(1, 8),\n input_channels=st.integers(1, 3),\n output_channels=st.integers(1, 3),\n batch_size=st.integers(1, 3),\n order=st.sampled_from([\"NCHW\", \"NHWC\"]),\n engine=st.sampled_from([\"\", \"EIGEN\"]),\n shared_buffer=st.booleans(),\n use_bias=st.booleans(),\n **hu.gcs)\n def test_convolution_separate_stride_pad_gradients(self, op_type,\n stride_h, stride_w,\n pad_t, pad_l, pad_b,\n pad_r, kernel, size,\n input_channels,\n output_channels,\n batch_size, order,\n engine, shared_buffer,\n use_bias,\n gc, dc):\n op = core.CreateOperator(\n op_type,\n [\"X\", \"w\", \"b\"] if use_bias else [\"X\", \"w\"],\n [\"Y\"],\n stride_h=stride_h,\n stride_w=stride_w,\n pad_t=pad_t,\n pad_l=pad_l,\n pad_b=pad_b,\n pad_r=pad_r,\n kernel=kernel,\n order=order,\n engine=engine,\n shared_buffer=int(shared_buffer),\n )\n X = np.random.rand(\n batch_size, size, size, input_channels).astype(np.float32) - 0.5\n w = np.random.rand(\n output_channels, kernel, kernel, input_channels).astype(np.float32)\\\n - 0.5\n b = np.random.rand(output_channels).astype(np.float32) - 0.5\n if order == \"NCHW\":\n X = X.transpose((0, 3, 1, 2))\n w = w.transpose((0, 3, 1, 2))\n\n inputs = [X, w, b] if use_bias else [X, w]\n\n # Error handling path.\n if size + pad_r + pad_l < kernel or size + pad_t + pad_b < kernel:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n\n self.assertDeviceChecks(dc, op, inputs, [0])\n for i in range(len(inputs)):\n self.assertGradientChecks(gc, op, inputs, i, [0])\n\n # CUDNN does NOT support different padding values and we skip it\n @given(op_type=st.sampled_from([\"Conv\", \"Conv2D\"]),\n stride_h=st.integers(1, 3),\n stride_w=st.integers(1, 3),\n pad_t=st.integers(0, 3),\n pad_l=st.integers(0, 3),\n pad_b=st.integers(0, 3),\n pad_r=st.integers(0, 3),\n kernel=st.integers(1, 5),\n size=st.integers(7, 10),\n input_channels=st.integers(1, 8),\n output_channels=st.integers(1, 8),\n batch_size=st.integers(1, 3),\n engine=st.sampled_from([\"\", \"EIGEN\"]),\n use_bias=st.booleans(),\n **hu.gcs)\n def test_convolution_separate_stride_pad_layout(self, op_type,\n stride_h, stride_w,\n pad_t, pad_l, pad_b, pad_r,\n kernel, size,\n input_channels,\n output_channels, batch_size,\n engine, use_bias, gc, dc):\n X = np.random.rand(\n batch_size, size, size, input_channels).astype(np.float32) - 0.5\n w = np.random.rand(\n output_channels, kernel, kernel, input_channels).astype(np.float32)\\\n - 0.5\n b = np.random.rand(output_channels).astype(np.float32) - 0.5\n outputs = {}\n for order in [\"NCHW\", \"NHWC\"]:\n op = core.CreateOperator(\n op_type,\n [\"X\", \"w\", \"b\"] if use_bias else [\"X\", \"w\"],\n [\"Y\"],\n stride_h=stride_h,\n stride_w=stride_w,\n kernel=kernel,\n pad_t=pad_t,\n pad_l=pad_l,\n pad_b=pad_b,\n pad_r=pad_r,\n order=order,\n engine=engine,\n device_option=gc,\n )\n if order == \"NCHW\":\n X_f = X.transpose((0, 3, 1, 2))\n w_f = w.transpose((0, 3, 1, 2))\n else:\n X_f = X\n w_f = w\n self.ws.create_blob(\"X\").feed(X_f, device_option=gc)\n self.ws.create_blob(\"w\").feed(w_f, device_option=gc)\n self.ws.create_blob(\"b\").feed(b, device_option=gc)\n self.ws.run(op)\n outputs[order] = self.ws.blobs[\"Y\"].fetch()\n np.testing.assert_allclose(\n outputs[\"NCHW\"],\n outputs[\"NHWC\"].transpose((0, 3, 1, 2)),\n atol=1e-4,\n rtol=1e-4)\n\n @given(op_type=st.sampled_from([\"Conv\", \"Conv2D\"]),\n stride=st.integers(1, 3),\n pad=st.integers(0, 3),\n kernel=st.integers(1, 5),\n dilation=st.integers(1, 3),\n size=st.integers(7, 10),\n input_channels=st.integers(1, 8),\n output_channels=st.integers(1, 8),\n batch_size=st.integers(1, 3),\n order=st.sampled_from([\"NCHW\", \"NHWC\"]),\n engine=st.sampled_from([\"\", \"CUDNN\", \"MKLDNN\"]),\n use_bias=st.booleans(),\n **hu.gcs)\n def test_convolution_gradients(self, op_type, stride, pad, kernel, dilation,\n size, input_channels, output_channels,\n batch_size, order, engine, use_bias, gc, dc):\n dkernel = dilation * (kernel - 1) + 1\n\n if gc.device_type == caffe2_pb2.CUDA and engine == 'CUDNN':\n assume(_cudnn_supports(dilation=(dilation > 1),\n nhwc=(order == 'NHWC')))\n\n assume(engine != \"MKLDNN\" or use_bias is True)\n\n op = core.CreateOperator(\n op_type,\n [\"X\", \"w\", \"b\"] if use_bias else [\"X\", \"w\"],\n [\"Y\"],\n stride=stride,\n kernel=kernel,\n dilation=dilation,\n pad=pad,\n order=order,\n engine=engine,\n )\n X = np.random.rand(\n batch_size, size, size, input_channels).astype(np.float32) - 0.5\n w = np.random.rand(\n output_channels, kernel, kernel, input_channels).astype(np.float32)\\\n - 0.5\n b = np.random.rand(output_channels).astype(np.float32) - 0.5\n if order == \"NCHW\":\n X = X.transpose((0, 3, 1, 2))\n w = w.transpose((0, 3, 1, 2))\n\n inputs = [X, w, b] if use_bias else [X, w]\n # Error handling path.\n if size + pad + pad < dkernel or size + pad + pad < dkernel:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n\n self.assertDeviceChecks(dc, op, inputs, [0])\n for i in range(len(inputs)):\n self.assertGradientChecks(gc, op, inputs, i, [0])\n\n def _nd_convolution_nchw(self, n, input_channels, output_channels,\n batch_size, stride, size, kernel, dilation, pad,\n use_bias, gc, dc):\n dkernel = dilation * (kernel - 1) + 1\n for op_type in [\"Conv\", \"Conv\" + str(n) + \"D\"]:\n op = core.CreateOperator(\n op_type,\n [\"X\", \"w\", \"b\"] if use_bias else [\"X\", \"w\"],\n [\"Y\"],\n strides=[stride] * n,\n kernels=[kernel] * n,\n dilations=[dilation] * n,\n pads=[pad] * n * 2,\n order=\"NCHW\",\n engine=\"\",\n )\n\n input_dims = [batch_size, input_channels]\n input_dims.extend([size] * n)\n filter_dims = [output_channels, input_channels]\n filter_dims.extend([kernel] * n)\n\n X = np.random.rand(*input_dims).astype(np.float32) - 0.5\n w = np.random.rand(*filter_dims).astype(np.float32) - 0.5\n b = np.random.rand(output_channels).astype(np.float32) - 0.5\n\n inputs = [X, w, b] if use_bias else [X, w]\n\n if size + pad + pad < dkernel or size + pad + pad < dkernel:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n\n self.assertDeviceChecks(dc, op, inputs, [0])\n for i in range(len(inputs)):\n self.assertGradientChecks(gc, op, inputs, i, [0])\n\n @given(input_channels=st.integers(1, 3),\n output_channels=st.integers(1, 2),\n batch_size=st.integers(1, 3),\n stride=st.integers(1, 3),\n size=st.integers(7, 10),\n kernel=st.integers(1, 2),\n dilation=st.integers(1, 3),\n pad=st.integers(0, 3),\n use_bias=st.booleans(),\n **hu.gcs)\n def test_1d_convolution_nchw(self, input_channels, output_channels,\n batch_size, stride, size, kernel, dilation,\n pad, use_bias, gc, dc):\n self._nd_convolution_nchw(\n 1, input_channels, output_channels, batch_size, stride, size,\n kernel, dilation, pad, use_bias, gc, dc\n )\n\n @given(input_channels=st.integers(1, 2),\n output_channels=st.integers(1, 2),\n batch_size=st.integers(1, 2),\n stride=st.integers(1, 2),\n size=st.integers(4, 5),\n kernel=st.integers(1, 2),\n dilation=st.integers(1, 2),\n pad=st.integers(0, 2),\n use_bias=st.booleans(),\n **hu.gcs)\n def test_3d_convolution_nchw(self, input_channels, output_channels,\n batch_size, stride, size, kernel, dilation,\n pad, use_bias, gc, dc):\n self._nd_convolution_nchw(\n 3, input_channels, output_channels, batch_size, stride, size,\n kernel, dilation, pad, use_bias, gc, dc\n )\n\n @given(op_type=st.sampled_from([\"Conv\", \"Conv3D\"]),\n batch_size=st.integers(1, 2),\n stride=st.integers(1, 2),\n size=st.integers(3, 5),\n kernel=st.integers(1, 2),\n dilation=st.integers(1, 2),\n pad=st.integers(0, 2),\n use_bias=st.booleans(),\n **hu.gcs)\n def test_3d_convolution_cudnn_nchw(self, op_type, batch_size, stride, size,\n kernel, dilation, pad, use_bias, gc, dc):\n input_channels = 1\n output_channels = 1\n n = 3\n dkernel = dilation * (kernel - 1) + 1\n order = \"NCHW\"\n\n op = core.CreateOperator(\n op_type,\n [\"X\", \"w\", \"b\"] if use_bias else [\"X\", \"w\"],\n [\"Y\"],\n strides=[stride] * n,\n kernels=[kernel] * n,\n dilations=[dilation] * n,\n pads=[pad] * n * 2,\n order=order,\n engine=\"CUDNN\",\n )\n\n input_dims = [batch_size, input_channels]\n input_dims.extend([size] * n)\n filter_dims = [output_channels, input_channels]\n filter_dims.extend([kernel] * n)\n X = np.random.rand(*input_dims).astype(np.float32) - 0.5\n w = np.random.rand(*filter_dims).astype(np.float32) - 0.5\n b = np.random.rand(output_channels).astype(np.float32) - 0.5\n\n inputs = [X, w, b] if use_bias else [X, w]\n\n if size + pad + pad < dkernel or size + pad + pad < dkernel:\n with self.assertRaises(RuntimeError):\n self.assertDeviceChecks(dc, op, inputs, [0])\n return\n\n self.assertDeviceChecks(dc, op, inputs, [0])\n for i in range(len(inputs)):\n self.assertGradientChecks(gc, op, inputs, i, [0])\n\n @given(op_type=st.sampled_from([\"Conv\", \"Conv2D\"]),\n stride=st.integers(1, 3),\n pad=st.integers(0, 3),\n kernel=st.integers(1, 5),\n dilation=st.integers(1, 3),\n size=st.integers(7, 10),\n input_channels=st.integers(1, 8),\n output_channels=st.integers(1, 8),\n batch_size=st.integers(1, 3),\n use_bias=st.booleans(),\n **hu.gcs)\n def test_convolution_layout(self, op_type, stride, pad, kernel, dilation,\n size, input_channels, output_channels,\n batch_size, use_bias, gc, dc):\n assume(size >= dilation * (kernel - 1) + 1)\n\n X = np.random.rand(\n batch_size, size, size, input_channels).astype(np.float32) - 0.5\n w = np.random.rand(\n output_channels, kernel, kernel, input_channels).astype(np.float32)\\\n - 0.5\n b = np.random.rand(output_channels).astype(np.float32) - 0.5\n Output = collections.namedtuple(\"Output\", [\"Y\", \"engine\", \"order\"])\n outputs = []\n\n for order in [\"NCHW\", \"NHWC\"]:\n engine_list = ['']\n if _cudnn_supports(dilation=(dilation > 1), nhwc=(order == 'NHWC')):\n engine_list.append('CUDNN')\n\n for engine in engine_list:\n op = core.CreateOperator(\n op_type,\n [\"X\", \"w\", \"b\"] if use_bias else [\"X\", \"w\"],\n [\"Y\"],\n stride=stride,\n kernel=kernel,\n dilation=dilation,\n pad=pad,\n order=order,\n engine=engine,\n device_option=gc,\n )\n if order == \"NCHW\":\n X_f = X.transpose((0, 3, 1, 2))\n w_f = w.transpose((0, 3, 1, 2))\n else:\n X_f = X\n w_f = w\n self.assertDeviceChecks(\n dc,\n op,\n [X_f, w_f, b] if use_bias else [X_f, w_f],\n [0])\n self.ws.create_blob(\"X\").feed(X_f, device_option=gc)\n self.ws.create_blob(\"w\").feed(w_f, device_option=gc)\n self.ws.create_blob(\"b\").feed(b, device_option=gc)\n self.ws.run(op)\n outputs.append(Output(\n Y=self.ws.blobs[\"Y\"].fetch(), engine=engine, order=order))\n\n def canonical(o):\n if o.order == \"NHWC\":\n return o.Y.transpose((0, 3, 1, 2))\n else:\n return o.Y\n\n for o in outputs:\n np.testing.assert_allclose(\n canonical(outputs[0]),\n canonical(o),\n atol=1e-4,\n rtol=1e-4)\n\n @given(num_workers=st.integers(1, 4),\n net_type=st.sampled_from(\n [\"simple\", \"dag\"] +\n ([\"async_dag\"] if workspace.has_gpu_support else [])),\n do=st.sampled_from(hu.device_options),\n engine=st.sampled_from([\"CUDNN\", \"\"]))\n def test_convolution_sync(self, net_type, num_workers, do, engine):\n m = ModelHelper(name=\"test_model\")\n n = 1\n d = 2\n depth = 3\n iters = 5\n h = 5\n w = 5\n workspace.ResetWorkspace()\n\n use_cudnn = (engine == 'CUDNN')\n\n np.random.seed(1701)\n # Build a binary tree of conv layers, summing at each node.\n for i in reversed(range(depth)):\n for j in range(2 ** i):\n bottom_1 = \"{}_{}\".format(i + 1, 2 * j)\n bottom_2 = \"{}_{}\".format(i + 1, 2 * j + 1)\n mid_1 = \"{}_{}_m\".format(i + 1, 2 * j)\n mid_2 = \"{}_{}_m\".format(i + 1, 2 * j + 1)\n top = \"{}_{}\".format(i, j)\n w1, b1, w2, b2 = np.random.randn(4).tolist()\n brew.conv(\n m, bottom_1, mid_1,\n dim_in=d, dim_out=d,\n kernel=3,\n weight_init=('ConstantFill', dict(value=w1)),\n bias_init=('ConstantFill', dict(value=b1)),\n cudnn_state=np.random.randint(0, 3),\n stride=1,\n pad=1,\n deterministic=1,\n use_cudnn=use_cudnn,\n engine=engine)\n brew.conv(\n m, bottom_2, mid_2,\n dim_in=d, dim_out=d,\n kernel=3,\n stride=1,\n pad=1,\n weight_init=('ConstantFill', dict(value=w2)),\n bias_init=('ConstantFill', dict(value=b2)),\n deterministic=1,\n cudnn_state=np.random.randint(0, 3),\n use_cudnn=use_cudnn,\n engine=engine)\n m.net.Sum([mid_1, mid_2], top)\n\n m.net.Flatten([\"0_0\"], [\"0_0_flat\"])\n m.net.SquaredL2Distance([\"0_0_flat\", \"label\"], \"xent\")\n m.net.AveragedLoss(\"xent\", \"loss\")\n input_to_grad = m.AddGradientOperators([\"loss\"])\n m.Proto().device_option.CopyFrom(do)\n m.param_init_net.Proto().device_option.CopyFrom(do)\n m.Proto().type = net_type\n m.Proto().num_workers = num_workers\n self.ws.run(m.param_init_net)\n\n def run():\n import numpy as np\n np.random.seed(1701)\n input_blobs = [\"{}_{}\".format(depth, j) for j in range(2 ** depth)]\n for input_blob in input_blobs:\n self.ws.create_blob(input_blob).feed(\n np.random.randn(n, d, h, w).astype(np.float32),\n device_option=do)\n self.ws.create_blob(\"label\").feed(\n np.random.randn(n, d * h * w).astype(np.float32),\n device_option=do)\n self.ws.run(m.net)\n gradients = [\n self.ws.blobs[str(input_to_grad[input_blob])].fetch()\n for input_blob in input_blobs]\n return gradients\n\n outputs = [run() for _ in range(iters)]\n for output in outputs[1:]:\n np.testing.assert_array_equal(outputs[0], output)\n np.testing.assert_allclose(\n np.sum(np.square(output)),\n 1763719461732352.0,\n rtol=1e-5)\n\n def test_use_cudnn_engine_interactions(self):\n \"\"\"Make sure the use_cudnn and engine kwargs work as expected.\"\"\"\n for model_default in [None, True, False]:\n arg_scope = {}\n if model_default is not None:\n arg_scope['use_cudnn'] = model_default\n else:\n model_default = True # the default\n\n model = ModelHelper(arg_scope=arg_scope)\n self.assertEqual(model.arg_scope['use_cudnn'], model_default)\n f = functools.partial(brew.conv, model,\n 'conv_in', 'conv_out', 10, 10, 5)\n\n for op_cudnn in [None, True, False]:\n for op_engine in [None, '', 'CUDNN']:\n kwargs = {}\n if op_cudnn is not None:\n kwargs['use_cudnn'] = op_cudnn\n else:\n op_cudnn = False # the default\n if op_engine is not None:\n kwargs['engine'] = op_engine\n\n calculated_cudnn = kwargs.get('use_cudnn', model_default)\n expected_engine = kwargs.get(\n 'engine',\n 'CUDNN' if calculated_cudnn else '')\n\n if ((calculated_cudnn is True and op_engine == '') or\n (calculated_cudnn is False and op_engine == 'CUDNN')):\n with self.assertRaises(ValueError):\n f(**kwargs)\n else:\n f(**kwargs)\n self.assertEqual(model.Proto().op[-1].engine,\n expected_engine)\n\n\nif __name__ == \"__main__\":\n import unittest\n unittest.main()\n"
] |
[
[
"numpy.square",
"numpy.random.seed",
"numpy.testing.assert_array_equal",
"numpy.random.randn",
"numpy.random.rand",
"numpy.random.randint"
]
] |
shazz/tflearn-experiments
|
[
"9ed08dbc06c31cb3921b16f3732acad2341e8d0e"
] |
[
"cdc/data_importer.py"
] |
[
"#\n# CDC BRFSS Extract data importer\n# Load a TSV extract from the BRFSS 2015 Annual Survey\n# https://www.cdc.gov/brfss/annual_data/annual_2015.html\n#\n# Then convert to metric system and clean it\n# Good tutorial on pandas cleaning: https://github.com/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%207%20-%20Cleaning%20up%20messy%20data.ipynb\n# Those nice animals need a good shower\n\n# (C) 2016 - Shazz \n# Under MIT license\n\nimport pandas as pd\nimport numpy as np\nimport time\n\n# I did not fully understand the warning so I hide it.... \"ostrich policy\"\npd.options.mode.chained_assignment = None # default='warn'\ndata = pd.Series()\nisloaded = False\n\ndef load_data(row_nb):\n # use global variables to avoid recleaning if called again. Better way ?\n global data\n global isloaded\n \n start_time = time.time()\n if isloaded == False:\n \n na_values = [' ']\n data = pd.read_csv(\"data/2015_BRFSS_extract.tsv\", sep='\\t', header=0, na_values=na_values, dtype={'HEIG': str}) \n print(\"data loaded:\", data.shape)\n \n # convert to metric system (cm and kg)\n data['HEIG'] = ((pd.to_numeric(data['HEIG'].str.slice(0, 2))*30.48) + (pd.to_numeric(data['HEIG'].str.slice(2, 4))*2.54)).round(0)\n data['WEIG'] = (data['WEIG']/2.20462262185).round(0)\n \n # remove non sense heights\n overm_heights = data['HEIG'] >= 300\n overl_heights = data['HEIG'] == 0\n data['HEIG'][overm_heights] = np.nan\n data['HEIG'][overl_heights] = np.nan\n \n # remove non sense weights\n overm_weights = data['WEIG'] >= 400\n overl_weights = data['WEIG'] == 0\n data['WEIG'][overm_weights] = np.nan\n data['WEIG'][overl_weights] = np.nan\n\n # add a class for each sex\n female = data['S'] == 2\n male = data['S'] == 1\n \n data['S'][female] = 0\n data['S'][male] = 1\n\n # discard N/A values\n data = data.dropna()\n \n # generate labels with 2 classes\n t = np.asarray(data['S'], dtype='int32')\n print(\"generate labels: \", len(data['S']))\n labels = np.zeros((len(t), 2))\n for i in range(len(t)):\n labels[i, t[i]] = 1.\n\n process_time = time.time()\n print(\"data cleaned:\", data.shape, \"in \", process_time - start_time, \"s\")\n isloaded = True\n else:\n print(\"data already cleaned\")\n \n if row_nb > 0:\n print(\"Genre values:\", data['S'].unique())\n print(\"Height values:\", data['HEIG'].unique())\n print(\"Weight values:\", data['WEIG'].unique()) \n subset = data.iloc[:row_nb, :]\n Y = labels[:row_nb, :]\n else:\n subset = data\n Y = labels\n \n X = subset[[\"WEIG\",\"HEIG\"]] \n \n print(\"data loaded in \", time.time() - start_time, \"s\")\n \n return np.array(X), np.array(Y)\n"
] |
[
[
"numpy.asarray",
"numpy.array",
"pandas.read_csv",
"pandas.Series"
]
] |
AodhanSweeney/TLS_diurnal_climatology
|
[
"c23dc2c49c91b37189077a0791f92bb09bd027d4"
] |
[
"era5_tools.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport dask.dataframe as dd\n\ndef to_bin_hour(h):\n hour_step = 3\n binned_hour =int((np.floor(h / hour_step) * hour_step)/hour_step)\n return(binned_hour)\n\ndef daily_mean_remover(era_5_df):\n \"\"\"\n Description\n ===========\n Find the diurnal mean of the ERA-5 TLS temperatures in each 5x10 degree box and remove it\n \n Arguments\n =========\n \n era_5_df\n --------\n Pandas dataframe of ERA-5 TLS temperatures.\n \n returns\n =======\n daily_mean_removed_df\n ----------------\n Pandas dataframe of ERA-5 TLS temperatures after daily mean has been removed\n \"\"\"\n daily_mean_removed = []\n for year in range(2006, 2021):\n print(year)\n era_5_df_year = era_5_df[era_5_df['Year'] == str(year)]\n for day in era_5_df_year.Day.unique():\n era_5_day = era_5_df_year[era_5_df_year['Day'] == day]\n for lat in np.arange(-90, 90, 5):\n era_5_lat = era_5_day[era_5_day['Lat'] == lat]\n for lon in np.arange(-180, 180, 10):\n era_5_lon = era_5_lat[era_5_lat['Lon'] == lon]\n era_5_lon.Temp = era_5_lon.Temp - era_5_lon.Temp.mean()\n daily_mean_removed.append(era_5_lon)\n daily_mean_removed_df = pd.concat(daily_mean_removed)\n return (daily_mean_removed_df)\n\n\ndef df_organizer(daily_mean_removed_df):\n \"\"\"\n Description\n ===========\n Sorts dataframe so that chunking logic is consistent with where means will be drawn. Chunks are \n created by latitude band.\n \n Arguments\n =========\n \n daily_mean_removed_df\n --------\n Pandas dataframe of ERA-5 TLS temperatures after the daily mean has been removed.\n \n returns\n =======\n daily_mean_removed_dd\n ----------------\n Dask dataframe of ERA-5 TLS temperatures after daily mean has been removed\n \"\"\"\n daily_mean_removed_df['Day'] = daily_mean_removed_df['Day'].astype(int)\n daily_mean_removed_df['Hour'] = daily_mean_removed_df['Hour'].astype(int)\n daily_mean_removed_df['Year'] = daily_mean_removed_df['Year'].astype(int)\n daily_mean_removed_df['Lat'] = daily_mean_removed_df['Lat'].astype(int)\n daily_mean_removed_df['Lon'] = daily_mean_removed_df['Lon'].astype(int)\n daily_mean_removed_df['Temp'] = daily_mean_removed_df['Temp'].astype(float)\n \n # Sort data frame by lat so chunks will align correctly\n sorted_by_lat_df = daily_mean_removed_df.sort_values(by='Lat')\n daily_mean_removed_df = sorted_by_lat_df.reset_index(drop=True)\n daily_mean_removed_dd = dd.from_pandas(daily_mean_removed_df, npartitions=36, sort=False)\n return(daily_mean_removed_dd)\n\ndef diurnal_binner(season_of_cosmic_data):\n \"\"\"\n Description\n ===========\n A variation of the gpsro_tools.diurnal_binner() function. This version uses dask to compute diurnal cycles because the ERA-5\n binning can be tedious because of the large amount of data\n \n Arguments\n =========\n \n season_of_cosmic_data\n --------\n Dask dataframe of ERA-5 TLS temperatures after the daily mean has been removed.\n \n returns\n =======\n cycles_in_boxes_over_globe\n ----------------\n List where diurnal cycles are binned into 36 latitude and 36 longitude bins.\n \"\"\"\n cycles_in_boxes_over_globe = []\n for lat in np.arange(-90,90,5):\n cycle_in_boxes_at_lat = []\n season_of_data_at_lat = season_of_cosmic_data.loc[season_of_cosmic_data['Lat'] == lat]\n \n for lon in np.arange(-180, 180, 10):\n season_of_data_in_box = season_of_data_at_lat.loc[season_of_data_at_lat['Lon'] == lon]\n cycle_in_box = []\n \n for hour_idx in np.arange(0,8,1):\n season_of_data_in_hourbin = season_of_data_in_box.loc[season_of_data_in_box['hourbin'] == hour_idx]\n mean_of_hourbin = season_of_data_in_hourbin.Temp.mean()\n mean_of_hourbin = mean_of_hourbin.compute()\n cycle_in_box.append(mean_of_hourbin)\n \n cycle_in_boxes_at_lat.append(cycle_in_box)\n\n cycles_in_boxes_over_globe.append(cycle_in_boxes_at_lat)\n print('Lat done: ', lat)\n \n return(cycles_in_boxes_over_globe)\n"
] |
[
[
"numpy.arange",
"pandas.concat",
"numpy.floor"
]
] |
pygeo/sense
|
[
"4610fe5247cfba04124a30a7d3db33ea1feb8c80",
"4610fe5247cfba04124a30a7d3db33ea1feb8c80"
] |
[
"sense/surface/i2em.py",
"doc/fig10-11.py"
] |
[
"\"\"\"\nimplements the I2EM model (see Ulaby (2014), Chapter 10\nbackscattering model for single scale random surfaces\n\n\nThe code originates from ideas obtained from the supplement\nof Ulaby et al (2014)\n\"\"\"\nfrom . scatter import SurfaceScatter\nimport numpy as np\n\nfrom .. util import f2lam\nfrom .. core import Reflectivity\nimport math\n\nfrom scipy.integrate import dblquad\n\n\nfrom numba import jit\n\n\n@jit(cache=True,nopython=True)\ndef _calc_roughness_spectra_matrix(nx, ny, kl2, nspec, s, acf_type_id):\n \"\"\"\n calculate roughness spectra\n needs to return a matrix for further use\n in crosspol calculations\n \"\"\"\n\n if acf_type_id == 1: # gauss\n wm = _calc_wm_matrix_gauss(nx, ny, nspec, kl2, s)\n wn = _calc_wn_matrix_gauss(nx, ny, nspec, kl2, s)\n\n elif acf_type_id == 2: # exp\n wm = _calc_wm_matrix_exp(nx, ny, nspec, kl2, s)\n wn = _calc_wn_matrix_exp(nx, ny, nspec, kl2, s)\n else:\n assert False\n return wn, wm\n\n\n\n\n\nclass I2EM(SurfaceScatter):\n def __init__(self, f, eps, sig, l, theta, **kwargs):\n \"\"\"\n\n BACKSCATTERING MODEL\n\n Parameters\n ----------\n f : float\n frequency [GHz]\n eps : complex\n relative dielectric permitivity\n sig : float\n vertical surface roughness [m]\n l : float\n autocorrelation length [m]\n theta : float\n incidence angle [rad]\n acf_type : str\n type of autocorrelation function\n 'gauss' : gaussian type ACF\n auto : bool\n specify if number of spectral components should be automatically\n determined for cross-pol calculations\n if False, then nspec=15\n xpol : bool\n perform cross-pol calculations if possible\n might be slow in case of I2EM usage\n \"\"\"\n\n self.freq = f\n lam = f2lam(self.freq)\n k = 2.*np.pi/lam\n self.k = k\n self.sig = sig\n self.ks = self.k*self.sig\n self.l = l\n self._kl2 = (self.k*self.l)**2.\n self.acf_type = kwargs.get('acf_type', 'gauss')\n super(I2EM, self).__init__(eps, k*sig, theta, kl=k*l)\n \n # assume backscatter geometry\n self.phi = 0.\n self.thetas = self.theta*1.\n self.phis = np.deg2rad(180.)\n self.mode = 'backscatter'\n\n self.auto = kwargs.get('auto', True)\n self.xpol = kwargs.get('xpol', True)\n\n # do initializations for backscatter calculations\n self._init_hlp()\n self.init_model()\n\n # calculate the actual backscattering coefficients\n self._calc_sigma_backscatter()\n\n def init_model(self):\n \"\"\"\n initialize model for calculations\n \"\"\"\n self.niter = self._estimate_itterations()\n\n # determine number of spectral components for cross-pol calculations\n if self.auto:\n # same as function _estimate_itterations, but with slightly different config\n nspec = 0\n error = 1.E8\n while error > 1.0E-8:\n nspec += 1\n error = (self._ks2*(2.*self._cs)**2.)**nspec / math.factorial(nspec) \n self.n_spec = nspec\n else:\n self.n_spec = 15\n\n I = np.arange(self.n_spec)\n self._fac = map(math.factorial, I+1) # factorial(n)\n\n\n\n def _estimate_itterations(self):\n \"\"\"\n estimate the number of necessary itterations for \n the integral calculations\n \"\"\"\n\n err = 1.E8\n Ts = 1\n while err > 1.0e-8:\n Ts += 1\n err = ((self._ks2 *(self._cs + self._css)**2 )**Ts) / math.factorial(Ts)\n return Ts\n\n\n def _init_hlp(self):\n \"\"\" initiate help variables \"\"\"\n self._ks2 = self.ks**2.\n self._cs = np.cos(self.theta)\n self._cs2 = self._cs**2.\n self._s = np.sin(self.theta)\n self._sf = np.sin(self.phi)\n self._cf = np.cos(self.phi)\n self._ss = np.sin(self.thetas)\n self._css = np.cos(self.thetas)\n self._cfs = np.cos(self.phis)\n self._sfs = np.sin(self.phis)\n self._s2 = self._s**2.\n self._kx = self.k*self._s*self._cf\n self._ky = self.k*self._s*self._sf\n self._kz = self.k*self._cs\n\n self._ksx = self.k * self._ss *self._cfs\n self._ksy = self.k * self._ss *self._sfs\n self._ksz = self.k * self._css\n\n def _calc_sigma_backscatter(self):\n assert isinstance(self.theta, float), 'Currently array processing not supported yet!'\n # calculate backscattering coefficients\n self.vv, self.hh = self._i2em_bistatic()\n if self.xpol:\n self.hv = self._i2em_cross()\n\n def _i2em_bistatic(self):\n \"\"\"\n calculate sigma for the co-pol case\n backscatter geometr\n calculate sigma for the co-pol case\n backscatter geometry\n\n module 10.1\n \"\"\"\n\n # calculate the integral\n idx = np.arange(self.niter)+1\n self.fac = map(math.factorial, idx) # factorial for all N itterations; this is stored as it is needed multipole times\n\n self.wn, self.rss = self.calc_roughness_spectrum(acf_type=self.acf_type) \n Ivv, Ihh = self._calc_Ipp()\n Ivv_abs = np.abs(Ivv)\n Ihh_abs = np.abs(Ihh)\n\n # calculate shadowing effects\n ShdwS = self._calc_shadowing()\n\n a0 = self.wn / self.fac * (self.sig**(2.*idx))\n\n # final backscatter calculation\n hlp = ShdwS*0.5*self.k**2*np.exp(-self.sig**2*(self._kz**2.+self._ksz**2.))\n sigvv = np.sum(a0 * Ivv_abs**2.) * hlp\n sighh = np.sum(a0 * Ihh_abs**2.) * hlp\n return sigvv, sighh\n\n def _i2em_cross(self):\n rt = np.sqrt(self.eps - self._s2)\n rv = (self.eps*self._cs -rt) / (self.eps*self._cs + rt)\n rh = (self._cs - rt)/(self._cs + rt)\n rvh = (rv-rh)/2.\n\n Shdw = self._calc_shadow_cross()\n\n svh = self._integrate_xpol(rvh)\n return svh*Shdw\n\n\n def _integrate_xpol(self, rvh):\n \"\"\"\n integrate for X-pol\n dblquad(@(r,phi)xpol_integralfunc(r, phi, sp,xx, ks2, cs,s, kl2, L, er, rss, rvh, n_spec), 0.1, 1, 0, pi)\n\n the original matlab routines integrates\n xpol_integral(r,phi)\n rmin=0.1, rmax=1.\n phimin=0.,phimax=1.\n\n when using python, x and y are reversed, however\n this does not matter unless the bounds are specified in the right order\n \"\"\"\n ans, err = dblquad(self._xpol_integralfunc, 0.1, 1., lambda x : 0., lambda x : 1., args=[[rvh,self.eps, self._ks2, self._cs2, self.rss, self._cs, self._fac, self._kl2, self._s, self._get_acf_id()]])\n return ans\n\n\n\n def _get_acf_id(self):\n if self.acf_type == 'gauss':\n return 1\n if self.acf_type == 'exp15':\n return 2\n assert False, 'Unknown ACF type'\n\n\n @jit(cache=True)\n def _xpol_integralfunc(self, r, phi, *args):\n \"\"\"\n while the original matlab function\n returns a vector, this function\n returns a scalar, as the dblquad function\n in python requires so\n \"\"\"\n\n rvh = args[0][0]\n eps = args[0][1]\n ks2 = args[0][2]\n cs2 = args[0][3]\n rss = args[0][4]\n cs = args[0][5]\n fac = args[0][6]\n nspec = len(fac)\n kl2 = args[0][7]\n s = args[0][8]\n acf_type_id = args[0][9]\n\n r2 = r**2.\n sf = np.sin(phi)\n csf = np.cos(phi)\n rx = r * csf\n ry = r * sf\n\n rp = 1. + rvh\n rm = 1. - rvh\n\n q = np.sqrt(1.0001 - r2)\n qt = np.sqrt(eps - r2)\n\n a = rp / q\n b = rm / q\n c = rp / qt\n d = rm / qt\n\n # calculate cross-pol coefficient\n B3 = rx * ry / cs\n fvh1 = (b-c)*(1.- 3.*rvh) - (b - c/eps) * rp\n fvh2 = (a-d)*(1.+ 3.*rvh) - (a - d*eps) * rm\n Fvh = ( np.abs( (fvh1 + fvh2) *B3))**2.\n\n # calculate x-pol shadowing\n au = q /r /1.414 /rss\n fsh = (0.2821/au) *np.exp(-au**2.) -0.5 *(1.- math.erf(au))\n sha = 1./(1. + fsh)\n\n # calculate expressions for the surface spectra\n \n wn, wm = _calc_roughness_spectra_matrix(rx, ry, kl2, nspec, s, acf_type_id) \n\n vhmnsum = 0.\n for i in xrange(nspec):\n for j in xrange(nspec):\n vhmnsum += wn[i] * wm[j] * (ks2*cs2)**((i+1)+(j+1))/fac[i]/fac[j] \n\n # compute VH scattering coefficient\n acc = np.exp(-2.* ks2 *cs2) /(16. * np.pi)\n VH = 4. * acc * Fvh * vhmnsum * r\n y = VH * sha\n return y\n\n\n\n\n def _calc_shadow_cross(self):\n \"\"\"\"\n calculating shadow consideration in single scat (Smith, 1967)\n \"\"\"\n ct = np.cos(self.theta)/np.sin(self.theta)\n farg = ct /np.sqrt(2.) /self.rss\n gamma = 0.5 *(np.exp(-farg**2.) / 1.772 / farg - math.erfc(farg))\n return 1. / (1. + gamma)\n\n def _calc_shadowing(self):\n\n if self.mode == 'backscatter': #todo comparison with binary variable instead of string to be faster ??\n\n ct = np.cos(self.theta)/np.sin(self.theta)\n cts = np.cos(self.thetas)/np.sin(self.thetas)\n rslp = self.rss\n ctorslp = ct / math.sqrt(2.) /rslp\n ctsorslp = cts / np.sqrt(2.) /rslp\n shadf = 0.5 *(np.exp(-ctorslp**2.) / np.sqrt(np.pi)/ctorslp - math.erfc(ctorslp))\n shadfs = 0.5 *(np.exp(-ctsorslp**2.) / np.sqrt(np.pi)/ctsorslp - math.erfc(ctsorslp))\n ShdwS = 1./(1. + shadf + shadfs)\n else:\n ShdwS = 1.\n\n return ShdwS\n\n def calc_roughness_spectrum(self, acf_type=None):\n \"\"\"\n calculate roughness spectrum\n Return wn as an array\n \"\"\"\n assert 'Validate with code again'\n if acf_type == 'gauss':\n # gaussian autocorrelation function\n S = GaussianSpectrum(niter=self.niter, l=self.l, theta=self.theta, thetas=self.thetas, phi=self.phi,phis=self.phis, freq=self.freq, sig=self.sig)\n elif acf_type == 'exp15':\n # 1.5 power exponential function\n S = ExponentialSpectrum(niter=self.niter, l=self.l, theta=self.theta, thetas=self.thetas, phi=self.phi,phis=self.phis, freq=self.freq, sig=self.sig)\n else:\n assert False, 'Invalid surface roughness spectrum: ' + str(acf_type)\n \n return S.wn() # returns wn as an array with length NITER\n\n def _calc_Ipp(self):\n n = np.arange(self.niter)+1.\n qi = self.k*self._cs\n qs = self.k*self._css\n\n h1= np.exp(-self.sig**2. * self._kz * self._ksz)*(self._kz + self._ksz)**n\n\n # Calculate the Fppup(dn) i(s) coefficient\n R = Reflectivity(self.eps, self.theta)\n Rvi = R.rho_v\n Rhi = R.rho_h\n \n Fvvupi, Fhhupi = self.Fppupdn( 1,1,Rvi,Rhi)\n Fvvups, Fhhups = self.Fppupdn( 1,2,Rvi,Rhi)\n Fvvdni, Fhhdni = self.Fppupdn(-1,1,Rvi,Rhi)\n Fvvdns, Fhhdns = self.Fppupdn(-1,2,Rvi,Rhi)\n\n # fpp calculations\n fvv, fhh = self.calc_fpp(Rvi, Rhi)\n\n # Ipp\n Ivv = fvv*h1\n Ivv += 0.25*(Fvvupi *(self._ksz-qi)**(n-1) *np.exp(-self.sig**2. *(qi**2. - qi*(self._ksz-self._kz))))\n Ivv += 0.25*(Fvvdni *(self._ksz+qi)**(n-1) *np.exp(-self.sig**2. *(qi**2. + qi*(self._ksz-self._kz))))\n Ivv += 0.25*(Fvvups *(self._kz +qs)**(n-1) *np.exp(-self.sig**2. *(qs**2. - qs*(self._ksz-self._kz))))\n Ivv += 0.25*(Fvvdns *(self._kz -qs)**(n-1) *np.exp(-self.sig**2. *(qs**2. + qs*(self._ksz-self._kz))))\n\n Ihh = fhh*h1\n Ihh += 0.25*(Fhhupi *(self._ksz-qi)**(n-1) *np.exp(-self.sig**2. *(qi**2. - qi*(self._ksz-self._kz))))\n Ihh += 0.25*(Fhhdni *(self._ksz+qi)**(n-1) *np.exp(-self.sig**2. *(qi**2. + qi*(self._ksz-self._kz))))\n Ihh += 0.25*(Fhhups *(self._kz +qs)**(n-1) *np.exp(-self.sig**2. *(qs**2. - qs*(self._ksz-self._kz))))\n Ihh += 0.25*(Fhhdns *(self._kz -qs)**(n-1) *np.exp(-self.sig**2. *(qs**2. + qs*(self._ksz-self._kz))))\n\n return Ivv, Ihh\n\n def calc_fpp(self, Rvi, Rhi):\n\n Rvt, Rht = self.calc_reflection_coefficients(Rvi, Rhi)\n\n fvv = 2. * Rvt *(self._s * self._ss - (1. + self._cs * self._css) * self._cfs)/(self._cs + self._css)\n fhh = -2. * Rht *(self._s * self._ss - (1. + self._cs * self._css) * self._cfs)/(self._cs + self._css)\n return fvv, fhh\n\n\n def Fppupdn(self, u_d, i_s, Rvi, Rhi):\n assert i_s in [1,2]\n assert u_d in [-1,1]\n\n # set coefficients\n if i_s == 1:\n Gqi = u_d * self._kz\n Gqti = u_d * self.k *np.sqrt(self.eps-self._s**2.);\n qi = u_d * self._kz\n c11 = self.k * self._cfs *(self._ksz - qi)\n c21 = self._cs *(self._cfs *(self.k**2 *self._s*self._cf*(self._ss *self._cfs - self._s * self._cf) + Gqi*(self.k * self._css - qi))+ self.k**2. *self._cf * self._s *self._ss *self._sfs**2.)\n\n c31 = self.k*self._s*(self._s*self._cf*self._cfs*(self.k*self._css-qi) - Gqi*(self._cfs*(self._ss*self._cfs -self._s*self._cf)+ self._ss *self._sfs**2.))\n c41 = self.k *self._cs*(self._cfs*self._css*(self.k*self._css - qi) + self.k *self._ss*(self._ss*self._cfs-self._s*self._cf))\n c51 = Gqi*(self._cfs *self._css*(qi-self.k*self._css) - self.k *self._ss*(self._ss*self._cfs-self._s*self._cf))\n c12 = self.k * self._cfs *(self._ksz - qi)\n\n c22 = self._cs *(self._cfs *(self.k**2. *self._s*self._cf*(self._ss *self._cfs - self._s * self._cf) + Gqti*(self.k * self._css - qi)) + self.k**2. *self._cf * self._s *self._ss *self._sfs**2.)\n c32 = self.k*self._s*(self._s*self._cf*self._cfs*(self.k*self._css-qi) - Gqti*(self._cfs*(self._ss*self._cfs -self._s*self._cf)- self._ss *self._sfs**2.))\n\n c42 = self.k *self._cs*(self._cfs*self._css*(self.k*self._css - qi) + self.k *self._ss*(self._ss*self._cfs-self._s*self._cf))\n\n c52 = Gqti*(self._cfs *self._css*(qi-self.k*self._css) - self.k *self._ss*(self._ss*self._cfs-self._s*self._cf))\n\n else:\n Gqs = u_d * self._ksz\n Gqts = u_d *self.k *np.sqrt(self.eps-self._ss**2.)\n qs = u_d * self._ksz\n\n c11 = self.k * self._cfs *(self._kz + qs)\n c21 = Gqs *(self._cfs*(self._cs*(self.k*self._cs+qs)-self.k*self._s*(self._ss *self._cfs-self._s*self._cf))-self.k*self._s*self._ss*self._sfs**2.)\n c31 = self.k *self._ss*(self.k*self._cs*(self._ss*self._cfs - self._s*self._cf)+ self._s*(self._kz+qs))\n c41 = self.k*self._css*(self._cfs*(self._cs*(self._kz+qs)-self.k*self._s*(self._ss*self._cfs-self._s*self._cf))-self.k*self._s*self._ss*self._sfs**2.)\n c51 = -self._css *(self.k**2. *self._ss *(self._ss*self._cfs -self._s*self._cf)+ Gqs*self._cfs*(self._kz+qs))\n c12 = self.k * self._cfs *(self._kz + qs)\n c22 = Gqts *(self._cfs*(self._cs*(self._kz+qs)-self.k*self._s*(self._ss *self._cfs-self._s*self._cf))-self.k*self._s*self._ss*self._sfs**2.)\n c32 = self.k *self._ss*(self.k*self._cs*(self._ss*self._cfs - self._s*self._cf)+ self._s*(self._kz+qs))\n c42 = self.k*self._css*(self._cfs*(self._cs*(self._kz+qs)-self.k*self._s*(self._ss*self._cfs-self._s*self._cf))-self.k*self._s*self._ss*self._sfs**2.)\n c52 = -self._css *(self.k**2. *self._ss *(self._ss*self._cfs -self._s*self._cf)+ Gqts*self._cfs*(self._kz+qs))\n\n\n # now do final calculations ...\n q = self._kz\n qt = self.k * np.sqrt(self.eps - self._s**2.)\n\n vv = (1.+Rvi) *( -(1-Rvi) *c11 /q + (1.+Rvi) *c12 / qt)\n vv += (1.-Rvi) *( (1-Rvi) *c21 /q - (1.+Rvi) *c22 / qt)\n vv += (1.+Rvi) *( (1-Rvi) *c31 /q - (1.+Rvi) *c32 /self.eps /qt) \n vv += (1.-Rvi) *( (1+Rvi) *c41 /q - self.eps*(1. - Rvi) *c42 / qt)\n vv += (1.+Rvi) *( (1+Rvi) *c51 /q - (1.-Rvi) *c52 / qt)\n\n hh = (1.+Rhi) *( (1.-Rhi) * c11 /q - self.eps *(1.+Rhi) *c12 / qt)\n hh -= (1.-Rhi) *( (1.-Rhi) * c21 /q - (1.+Rhi) *c22 / qt)\n hh -= (1.+Rhi) *( (1.-Rhi) * c31 /q - (1.+Rhi) *c32 / qt)\n hh -= (1.-Rhi) *( (1.+Rhi) * c41 /q - (1.-Rhi) *c42 / qt)\n hh -= (1.+Rhi) *( (1.+Rhi) * c51 /q - (1.-Rhi) *c52 / qt)\n\n return vv, hh\n\n\n def _calc_r_transition(self):\n \"\"\" compute R transition \"\"\"\n\n Rv0 = (np.sqrt(self.eps)-1.) / (np.sqrt(self.eps) + 1.)\n Rh0 = -Rv0\n\n Ft = 8. * Rv0**2. + self._ss * (self._cs + np.sqrt(self.eps - self._s2))/(self._cs * np.sqrt(self.eps - self._s2))\n\n idx = np.arange(self.niter)+1\n a0 = (self.ks*self._cs)**(2.*idx)/self.fac\n a1 = np.sum(a0*self.wn)\n b1 = np.sum(a0 * (np.abs(Ft/2. + 2.**(idx+1) *Rv0/self._cs *np.exp(-(self.ks*self._cs)**2.)))**2. * self.wn)\n\n St = 0.25 * np.abs(Ft)**2. * a1/b1\n St0 = 1. / np.abs(1.+8.*Rv0/(self._cs * Ft))**2.\n Tf = 1. - St / St0\n\n return Rv0, Rh0, Tf\n\n def _calculate_average_reflection_coefficients(self):\n assert False, 'Not implemented yet!'\n #%----------- compute average reflection coefficients ------------\n #%-- these coefficients account for slope effects, especially near the\n #%brewster angle. They are not important if the slope is small.\n\n #sigx = 1.1 .*sig/L;\n #sigy = sigx;\n #xxx = 3*sigx;\n\n #Rav = dblquad(@(Zx, Zy)Rav_integration(Zx, Zy, cs,s,er,s2,sigx, sigy),-xxx,xxx, -xxx, xxx );\n\n #Rah = dblquad(@(Zx, Zy)Rah_integration(Zx, Zy, cs,s,er,s2,sigx, sigy),-xxx,xxx, -xxx, xxx );\n\n #Rav = Rav ./(2*pi * sigx * sigy);\n #Rah = Rah ./(2*pi * sigx * sigy);\n\n\n\n def calc_reflection_coefficients(self, Rvi, Rhi):\n\n\n Rv0, Rh0, Tf = self._calc_r_transition()\n\n\n # select proper reflection coefficients\n if self.mode == 'backscatter': # todo this comparison might slow down the program as it is called very often; perhaps modify\n Rvt = Rvi + (Rv0 - Rvi) * Tf\n Rht = Rhi + (Rh0 - Rhi) * Tf\n elif self.mode == 'bistatic':\n Rav = Rah = self._calculate_average_reflection_coefficients()\n Rvt = Rav\n Rht = Rah\n pass\n else:\n assert False\n\n return Rvt, Rht\n\n\nclass Roughness(object):\n \"\"\"\n calculate roughness spectrum\n \"\"\"\n def __init__(self, **kwargs):\n self.niter = kwargs.get('niter', None)\n self.l = kwargs.get('l', None)\n self.sig = kwargs.get('sig', None)\n self.theta = kwargs.get('theta', None)\n self.thetas = kwargs.get('thetas', None)\n self.phi = kwargs.get('phi', None)\n self.phis = kwargs.get('phis', None)\n self.freq = kwargs.get('freq', None)\n \n self._check()\n self.n = np.arange(self.niter)+1\n self._init()\n\n def wn(self):\n assert False, 'Should be implemented in child class!'\n\n def _init(self):\n ss = np.sin(self.thetas)\n self._s = np.sin(self.theta)\n sf = np.sin(self.phi)\n sfs = np.sin(self.phis)\n cfs = np.cos(self.phis)\n cf = np.cos(self.phi)\n lam = f2lam(self.freq)\n self.k = 2.*np.pi / lam\n self._kl = self.k*self.l\n self._kl2 = self._kl**2.\n \n # todo whereis this defined ???\n self.wvnb = self.k * np.sqrt( (ss *cfs - self._s *cf)**2. + (ss * sfs - self._s * sf)**2. )\n\n def _check(self):\n assert self.niter is not None, 'ERROR: niter was not set!'\n assert self.l is not None\n assert self.sig is not None \n assert self.theta is not None\n assert self.thetas is not None\n assert self.phi is not None\n assert self.phis is not None\n assert self.freq is not None\n\n\n\n@jit(cache=False, nopython=True)\ndef _calc_wn_matrix_gauss(rx, ry, nspec, kl2, s):\n wn = np.zeros(nspec)\n for i in xrange(nspec):\n wn[i] = 0.5 *kl2/(i+1.) * np.exp(-kl2*((rx-s)**2. + ry**2.)/(4.*(i+1))) \n return wn\n\n@jit(cache=False, nopython=True)\ndef _calc_wm_matrix_gauss(rx, ry, nspec, kl2, s):\n wm = np.zeros(nspec)\n for i in xrange(nspec):\n wm[i] = 0.5 *kl2/(i+1.) * np.exp(-kl2*((rx+s)**2. + ry**2.)/(4.*(i+1))) \n return wm\n\nclass GaussianSpectrum(Roughness):\n def __init__(self, **kwargs):\n super(GaussianSpectrum, self).__init__(**kwargs)\n \n def wn(self):\n # Fung (1994), Eq. 2B.4; except for wvnb\n n = self.n\n wn = (self.l**2.)/(2.*n) * np.exp(-(self.wvnb*self.l)**2. / (4.*n))\n rss = np.sqrt(2.)*self.sig/self.l\n return wn, rss\n\n def calc_wn_matrix(self, rx, ry, nspec):\n return _calc_wn_matrix_gauss(rx, ry, nspec, self._kl2, self._s)\n\n def calc_wm_matrix(self, rx, ry, nspec):\n\n return _calc_wm_matrix_gauss(rx, ry, nspec, self._kl2, self._s)\n\n@jit(cache=True,nopython=True)\ndef _calc_wn_matrix_exp(rx, ry, nspec, kl2, s):\n wn = np.zeros(nspec)\n for i in xrange(nspec):\n wn[i] = (i+1) * kl2 / ((i+1)**2.+kl2*((rx-s)**2. + ry**2.))**1.5\n return wn\n\n@jit(cache=True,nopython=True)\ndef _calc_wm_matrix_exp(rx, ry, nspec, kl2, s):\n wm = np.zeros(nspec)\n for i in xrange(nspec):\n wm[i] = (i+1) * kl2 / ((i+1)**2.+kl2*((rx+s)**2. + ry**2.))**1.5\n return wm\n\n\nclass ExponentialSpectrum(Roughness):\n \"\"\"\n exponential spectrum\n \"\"\"\n def __init__(self, **kwargs):\n super(ExponentialSpectrum, self).__init__(**kwargs)\n\n def wn(self):\n # Fung (1994): eq. 2.B.14\n n = self.n\n wn= self.l**2. / n**2. * (1.+(self.wvnb*self.l/n)**2.)**(-1.5)\n rss = self.sig/self.l\n return wn, rss\n \n def calc_wn_matrix(self, rx, ry, nspec):\n #for i in xrange(nspec):\n # n = i+1\n #return np.array([(i+1) * self._kl2 / ((i+1)**2.+self._kl2*((rx-self._s)**2. + ry**2.))**1.5 for i in xrange(nspec)])\n return _calc_wn_matrix_gauss(rx, ry, nspec, self._kl2, self._s)\n\n def calc_wm_matrix(self, rx, ry, nspec):\n #return np.array([(i+1) * self._kl2 / ((i+1)**2.+self._kl2*((rx+self._s)**2. + ry**2.))**1.5 for i in xrange(nspec)])\n return _calc_wm_matrix_gauss(rx, ry, nspec, self._kl2, self._s)\n\n\n\n",
"\"\"\"\ncomparison with figure 10-11 \nin Ulaby 2014\n\nfor copol this works pretty good.\nonly very slight deviations for very large incidence angles\n\"\"\"\n\nimport sys\nimport os\nsys.path.append(os.path.abspath(os.path.dirname(__file__)) + os.sep + '..')\n\nimport numpy as np\n\nfrom sense.surface import I2EM\n\nimport matplotlib.pyplot as plt\n\ndef db(x):\n return 10.*np.log10(x)\n\n\nplt.close('all')\n\n\ntheta_deg = np.linspace(0.,70.)\ntheta = np.deg2rad(theta_deg)\n\nf = plt.figure()\nax = f.add_subplot(111)\n\n\neps = 11.3-1.5j\nf = 3.\nacf_type='exp15'\n\n\n\n\ns1 = 0.5/100.\ns2 = 1.5/100.\nl = 0.1\n\n\n\nhh1=[]\nhh2=[]\nvv1=[]\nvv2=[]\nhv1=[]\nhv2=[]\nfor t in theta:\n I1 = I2EM(f, eps, s1, l, t, acf_type=acf_type, xpol=True)\n I2 = I2EM(f, eps, s2, l, t, acf_type=acf_type, xpol=True)\n hh1.append(I1.hh)\n hh2.append(I2.hh)\n vv1.append(I1.vv)\n vv2.append(I2.vv)\n #hv1.append(I1.hv)\n #hv2.append(I2.hv)\n\nhh1 = np.array(hh1)\nhh2 = np.array(hh2)\nvv1 = np.array(vv1)\nvv2 = np.array(vv2)\nhv1 = np.array(hv1)\nhv2 = np.array(hv2)\n\n\n\nax.plot(theta_deg, db(hh2), color='red', label='hh')\nax.plot(theta_deg, db(hh1), color='blue', label='hh')\n\nax.plot(theta_deg, db(vv2), color='red', label='vv', linestyle='--')\nax.plot(theta_deg, db(vv1), color='blue', label='vv', linestyle='--')\n\n#ax.plot(theta_deg, db(hv2), color='red', label='hv', linestyle='.')\n#ax.plot(theta_deg, db(hv1), color='blue', label='hv', linestyle='.')\n\nax.grid()\nax.set_xlim(0.,70.)\nax.set_ylim(-50.,30.)\n\nplt.show()\n"
] |
[
[
"numpy.abs",
"numpy.sqrt",
"numpy.arange",
"numpy.cos",
"numpy.sin",
"numpy.deg2rad",
"numpy.exp",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.linspace",
"numpy.deg2rad",
"numpy.log10",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
TemugeB/bodypose3d
|
[
"63b74d4af73445be8e039bfef4fcb5b0c55eb3e2"
] |
[
"show_3d_pose.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom utils import DLT\nplt.style.use('seaborn')\n\n\npose_keypoints = np.array([16, 14, 12, 11, 13, 15, 24, 23, 25, 26, 27, 28])\n\ndef read_keypoints(filename):\n fin = open(filename, 'r')\n\n kpts = []\n while(True):\n line = fin.readline()\n if line == '': break\n\n line = line.split()\n line = [float(s) for s in line]\n\n line = np.reshape(line, (len(pose_keypoints), -1))\n kpts.append(line)\n\n kpts = np.array(kpts)\n return kpts\n\n\ndef visualize_3d(p3ds):\n\n \"\"\"Now visualize in 3D\"\"\"\n torso = [[0, 1] , [1, 7], [7, 6], [6, 0]]\n armr = [[1, 3], [3, 5]]\n arml = [[0, 2], [2, 4]]\n legr = [[6, 8], [8, 10]]\n legl = [[7, 9], [9, 11]]\n body = [torso, arml, armr, legr, legl]\n colors = ['red', 'blue', 'green', 'black', 'orange']\n\n from mpl_toolkits.mplot3d import Axes3D\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n for framenum, kpts3d in enumerate(p3ds):\n if framenum%2 == 0: continue #skip every 2nd frame\n for bodypart, part_color in zip(body, colors):\n for _c in bodypart:\n ax.plot(xs = [kpts3d[_c[0],0], kpts3d[_c[1],0]], ys = [kpts3d[_c[0],1], kpts3d[_c[1],1]], zs = [kpts3d[_c[0],2], kpts3d[_c[1],2]], linewidth = 4, c = part_color)\n\n #uncomment these if you want scatter plot of keypoints and their indices.\n # for i in range(12):\n # #ax.text(kpts3d[i,0], kpts3d[i,1], kpts3d[i,2], str(i))\n # #ax.scatter(xs = kpts3d[i:i+1,0], ys = kpts3d[i:i+1,1], zs = kpts3d[i:i+1,2])\n\n\n #ax.set_axis_off()\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n\n ax.set_xlim3d(-10, 10)\n ax.set_xlabel('x')\n ax.set_ylim3d(-10, 10)\n ax.set_ylabel('y')\n ax.set_zlim3d(-10, 10)\n ax.set_zlabel('z')\n plt.pause(0.1)\n ax.cla()\n\n\nif __name__ == '__main__':\n\n p3ds = read_keypoints('kpts_3d.dat')\n visualize_3d(p3ds)\n"
] |
[
[
"numpy.array",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.figure"
]
] |
workproduct/magenta
|
[
"ba43c3e1a2b3b6a5731fa10a5a6bddd0c821eb84"
] |
[
"magenta/models/latent_transfer/sample_dataspace.py"
] |
[
"# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Sample from pre-trained VAE on dataspace.\n\nThis script provides sampling from VAE on dataspace trained using\n`train_dataspace.py`. The main purpose is to help manually check the quality\nof model on dataspace.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport importlib\nimport os\n\nfrom magenta.models.latent_transfer import common\nfrom magenta.models.latent_transfer import model_dataspace\nimport numpy as np\nimport tensorflow as tf\n\nFLAGS = tf.flags.FLAGS\n\ntf.flags.DEFINE_string('config', 'mnist_0',\n 'The name of the model config to use.')\ntf.flags.DEFINE_string('exp_uid', '_exp_0',\n 'String to append to config for filenames/directories.')\ntf.flags.DEFINE_integer('random_seed', 19260817, 'Random seed.')\n\n\ndef main(unused_argv):\n del unused_argv\n\n # Load Config\n config_name = FLAGS.config\n config_module = importlib.import_module('configs.%s' % config_name)\n config = config_module.config\n model_uid = common.get_model_uid(config_name, FLAGS.exp_uid)\n n_latent = config['n_latent']\n\n # Load dataset\n dataset = common.load_dataset(config)\n basepath = dataset.basepath\n save_path = dataset.save_path\n train_data = dataset.train_data\n\n # Make the directory\n save_dir = os.path.join(save_path, model_uid)\n best_dir = os.path.join(save_dir, 'best')\n tf.gfile.MakeDirs(save_dir)\n tf.gfile.MakeDirs(best_dir)\n tf.logging.info('Save Dir: %s', save_dir)\n\n # Set random seed\n np.random.seed(FLAGS.random_seed)\n tf.set_random_seed(FLAGS.random_seed)\n\n # Load Model\n tf.reset_default_graph()\n sess = tf.Session()\n with tf.device(tf.train.replica_device_setter(ps_tasks=0)):\n m = model_dataspace.Model(config, name=model_uid)\n _ = m() # noqa\n\n # Initialize\n sess.run(tf.global_variables_initializer())\n\n # Load\n m.vae_saver.restore(sess,\n os.path.join(best_dir, 'vae_best_%s.ckpt' % model_uid))\n\n # Sample from prior\n sample_count = 64\n\n image_path = os.path.join(basepath, 'sample', model_uid)\n tf.gfile.MakeDirs(image_path)\n\n # from prior\n z_p = np.random.randn(sample_count, m.n_latent)\n x_p = sess.run(m.x_mean, {m.z: z_p})\n x_p = common.post_proc(x_p, config)\n common.save_image(\n common.batch_image(x_p), os.path.join(image_path, 'sample_prior.png'))\n\n # Sample from priro, as Grid\n boundary = 2.0\n number_grid = 50\n blob = common.make_grid(\n boundary=boundary, number_grid=number_grid, dim_latent=n_latent)\n z_grid, dim_grid = blob.z_grid, blob.dim_grid\n x_grid = sess.run(m.x_mean, {m.z: z_grid})\n x_grid = common.post_proc(x_grid, config)\n batch_image_grid = common.make_batch_image_grid(dim_grid, number_grid)\n common.save_image(\n batch_image_grid(x_grid), os.path.join(image_path, 'sample_grid.png'))\n\n # Reconstruction\n sample_count = 64\n x_real = train_data[:sample_count]\n mu, sigma = sess.run([m.mu, m.sigma], {m.x: x_real})\n x_rec = sess.run(m.x_mean, {m.mu: mu, m.sigma: sigma})\n x_rec = common.post_proc(x_rec, config)\n\n x_real = common.post_proc(x_real, config)\n common.save_image(\n common.batch_image(x_real), os.path.join(image_path, 'image_real.png'))\n common.save_image(\n common.batch_image(x_rec), os.path.join(image_path, 'image_rec.png'))\n\n\nif __name__ == '__main__':\n tf.app.run(main)\n"
] |
[
[
"numpy.random.seed",
"tensorflow.flags.DEFINE_string",
"tensorflow.train.replica_device_setter",
"tensorflow.global_variables_initializer",
"tensorflow.gfile.MakeDirs",
"tensorflow.logging.info",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"numpy.random.randn",
"tensorflow.set_random_seed",
"tensorflow.flags.DEFINE_integer",
"tensorflow.app.run"
]
] |
fredhatt/estimsoundsynth
|
[
"04ff6697cc0ce329c013382941b1f9839e910114"
] |
[
"randtrend.py"
] |
[
"import numpy as np\nfrom ssynth import *\n\n# 1. Establish the overall trend (volume)\n\nendtime = 120 # seconds\ndt = 1 # seconds\nt = np.linspace(0, endtime, float(endtime)/dt)\ntrend = np.sin(t*50) + t/15 + np.sin(t*250)\ntrend /= np.max(trend)\ntrend *= 32767\n\n# 2. Establish the frequencies\n\nminF = 15\nmaxF = 300\nsigmaF = (maxF-minF)/10.\nftrend = [np.random.randint(minF, maxF/10)]\nfor i in range(len(t)): \n newF = ftrend[-1] + np.random.randn()*sigmaF\n if newF > maxF: newF = maxF\n if newF < minF: newF = minF\n ftrend.append(newF)\n\nfig, ax1 = sns.plt.subplots()\nax1.plot(trend, c='b')\nax1.set_ylabel('Trend')\n\nax2 = ax1.twinx()\nax2.plot(ftrend, c='r')\nax2.set_ylabel('Frequency (Hz)')\nsns.plt.savefig('trend.png')\n\n# 3. Generate sounds\n\nstream = np.zeros(1)\nfor idx_t in range(len(t)):\n this_freq = ftrend[idx_t]\n this_amp = trend[idx_t]\n duration = np.random.randint(1, 6)\n \n silence = pause(np.random.rand()*3 + 0.1) \n note = square(this_freq, len=duration, amp=this_amp)\n if np.random.randint(0, 1) == 0:\n note = modulate_fadein(note, in_by=0.05*duration)\n else:\n note = modulate_gaussian(note, c=0.2)\n\n stream = np.append( stream, note )\n stream = np.append( stream, silence )\n\n # for the next iteration\n idx_t += duration - 1\n \nplot_waveform(stream)\nwrite_waveform(stream, 'audio.wav')\n\n\n"
] |
[
[
"numpy.sin",
"numpy.max",
"numpy.append",
"numpy.random.randn",
"numpy.random.rand",
"numpy.zeros",
"numpy.random.randint"
]
] |
waitxxxx/TecoGAN-PyTorch
|
[
"15d87000ed35d2037317144f54fa0e3d64f34e2f",
"15d87000ed35d2037317144f54fa0e3d64f34e2f"
] |
[
"codes/data/paired_folder_dataset.py",
"codes/metrics/model_summary.py"
] |
[
"import os\nimport os.path as osp\n\nimport cv2\nimport numpy as np\nimport torch\n\nfrom .base_dataset import BaseDataset\nfrom utils.base_utils import retrieve_files\n\n\nclass PairedFolderDataset(BaseDataset):\n def __init__(self, data_opt, **kwargs):\n \"\"\" Folder dataset for paired data. It supports both BI & BD degradation.\n \"\"\"\n super(PairedFolderDataset, self).__init__(data_opt, **kwargs)\n\n # get keys\n gt_keys = sorted(os.listdir(self.gt_seq_dir))\n lr_keys = sorted(os.listdir(self.lr_seq_dir))\n self.keys = sorted(list(set(gt_keys) & set(lr_keys)))\n\n # filter keys\n sel_keys = set(self.keys)\n if hasattr(self, 'filter_file') and self.filter_file is not None:\n with open(self.filter_file, 'r') as f:\n sel_keys = {line.strip() for line in f}\n elif hasattr(self, 'filter_list') and self.filter_list is not None:\n sel_keys = set(self.filter_list)\n self.keys = sorted(list(sel_keys & set(self.keys)))\n\n def __len__(self):\n return len(self.keys)\n\n def __getitem__(self, item):\n key = self.keys[item]\n\n # load gt frames\n gt_seq = []\n for frm_path in retrieve_files(osp.join(self.gt_seq_dir, key)):\n frm = cv2.imread(frm_path)[..., ::-1]\n gt_seq.append(frm)\n gt_seq = np.stack(gt_seq) # thwc|rgb|uint8\n\n # load lr frames\n lr_seq = []\n for frm_path in retrieve_files(osp.join(self.lr_seq_dir, key)):\n frm = cv2.imread(frm_path)[..., ::-1].astype(np.float32) / 255.0\n lr_seq.append(frm)\n lr_seq = np.stack(lr_seq) # thwc|rgb|float32\n\n # convert to tensor\n gt_tsr = torch.from_numpy(np.ascontiguousarray(gt_seq)) # uint8\n lr_tsr = torch.from_numpy(np.ascontiguousarray(lr_seq)) # float32\n\n # gt: thwc|rgb||uint8 | lr: thwc|rgb|float32\n return {\n 'gt': gt_tsr,\n 'lr': lr_tsr,\n 'seq_idx': key,\n 'frm_idx': sorted(os.listdir(osp.join(self.gt_seq_dir, key)))\n }\n",
"import torch\nimport torch.nn as nn\n\n\n# define which modules to be incorporated\nregistered_module = [\n nn.Conv2d,\n nn.ConvTranspose2d,\n nn.Conv3d\n]\n\n# initialize\nregistered_hooks, model_info_lst = [], []\n\n\ndef calc_2d_gflops_per_batch(module, out_h, out_w):\n \"\"\" Calculate flops of conv weights (support groups_conv & dilated_conv)\n \"\"\"\n gflops = 0\n if hasattr(module, 'weight'):\n # Note: in_c is already divided by groups while out_c is not\n bias = 0 if hasattr(module, 'bias') else -1\n out_c, in_c, k_h, k_w = module.weight.shape\n\n gflops += (2*in_c*k_h*k_w + bias)*out_c*out_h*out_w/1e9\n return gflops\n\n\ndef calc_3d_gflops_per_batch(module, out_d, out_h, out_w):\n \"\"\" Calculate flops of conv weights (support groups_conv & dilated_conv)\n \"\"\"\n gflops = 0\n if hasattr(module, 'weight'):\n # Note: in_c is already divided by groups while out_c is not\n bias = 0 if hasattr(module, 'bias') else -1\n out_c, in_c, k_d, k_h, k_w = module.weight.shape\n\n gflops += (2*in_c*k_d*k_h*k_w + bias)*out_c*out_d*out_h*out_w/1e9\n return gflops\n\n\ndef hook_fn_forward(module, input, output):\n if isinstance(module, nn.Conv3d):\n batch_size, _, out_d, out_h, out_w = output.size()\n gflops = batch_size*calc_3d_gflops_per_batch(module, out_d, out_h, out_w)\n else:\n if isinstance(module, nn.ConvTranspose2d):\n batch_size, _, out_h, out_w = input[0].size()\n else:\n batch_size, _, out_h, out_w = output.size()\n gflops = batch_size*calc_2d_gflops_per_batch(module, out_h, out_w)\n\n model_info_lst.append({'gflops': gflops})\n\n\ndef register_hook(module):\n if isinstance(module, tuple(registered_module)):\n registered_hooks.append(module.register_forward_hook(hook_fn_forward))\n\n\ndef register(model, dummy_input_list):\n # reset params\n global registered_hooks, model_info_lst\n registered_hooks, model_info_lst = [], []\n\n # register hook\n model.apply(register_hook)\n\n # forward\n with torch.no_grad():\n model.eval()\n out = model(*dummy_input_list)\n\n # remove hooks\n for hook in registered_hooks:\n hook.remove()\n\n return out\n\n\ndef parse_model_info(model):\n tot_gflops = 0\n for module_info in model_info_lst:\n if module_info['gflops']:\n tot_gflops += module_info['gflops']\n\n tot_params = 0\n for param in model.parameters():\n tot_params += torch.prod(torch.tensor(param.size())).item()\n\n return tot_gflops, tot_params\n"
] |
[
[
"numpy.ascontiguousarray",
"numpy.stack"
],
[
"torch.no_grad"
]
] |
arghdos/SPyJac-paper
|
[
"7f65253a3acd3a93141e673c2cdd5810ecc6a0ca"
] |
[
"scripts/source_term_error.py"
] |
[
"import os\nimport numpy as np\nfrom load_error_files import print_error\n\nrtol = 1e-6\natol = 1e-10\n\n\ndef updater(err_dict, err, filename=None, mech_info={}):\n def __get_size(name):\n if 'rop' in name:\n if 'net' in name or 'fwd' in name:\n return mech_info['n_reactions']\n else:\n return mech_info['n_reversible']\n elif 'wdot' in name:\n return mech_info['n_species']\n elif 'phi' in name:\n return mech_info['n_species'] + 1\n\n for name in err:\n if 'value' in name or 'component' in name or 'store' in name:\n continue\n errs = err[name]\n values = err[name + '_value']\n errs = errs / (atol + rtol * np.abs(values))\n if ('rop_fwd' == name or 'rop_rev' == name) and 'store' in name and np.any(\n errs > 1e-4):\n from time import ctime\n print(filename, ctime(os.path.getmtime(filename)))\n print('Bad data detected...')\n\n precs = None\n if 'rop_net' in name:\n # calculate the precision norms\n precs = err['rop_component'] / (atol + rtol * np.abs(values))\n\n if name not in err_dict:\n err_dict[name] = np.zeros((__get_size(name)))\n\n if ('rop_fwd' in name or 'rop_rev' in name):\n if mech_info['n_cheb']:\n err_dict[name + '_nocheb'] = np.zeros((__get_size(name)))\n if mech_info['n_plog']:\n err_dict[name + '_noplog'] = np.zeros((__get_size(name)))\n\n if precs is not None:\n err_dict['rop_component'] = np.zeros((__get_size(\n 'rop_net')))\n\n if errs.shape != err_dict[name].shape:\n # check that we have a split\n assert 'C_w' in filename or 'F_d' in filename\n # discard extra zeros resulting from split padding\n errs = errs[:__get_size(name)]\n\n err_dict[name] = np.maximum(\n err_dict[name], errs)\n\n def __update_rxn_type(rxn_str):\n if ('rop_fwd' == name or 'rop_rev' == name) and \\\n mech_info['n_' + rxn_str]:\n inds = mech_info[name + '_' + rxn_str + '_inds']\n err_dict[name + '_no' + rxn_str][inds] = np.maximum(\n err_dict[name + '_no' + rxn_str][inds], errs[inds])\n\n __update_rxn_type('cheb')\n __update_rxn_type('plog')\n\n if 'rop_net' in name:\n update_locs = np.where(err_dict[name] == errs)\n # update the precision norms at these locations\n err_dict['rop_component'][\n update_locs] = precs[update_locs]\n\n\ndef format(val):\n return '{:1.2e}'.format(val)\n\n\ndef printer(err_dict):\n keyarr = ['fwd', 'rev', 'net', 'comp', 'phi']\n for name in sorted(err_dict, key=lambda x: keyarr.index(next(\n y for y in keyarr if y in x))):\n err_vals = err_dict[name][np.where(np.logical_not(\n np.isnan(err_dict[name])))]\n if 'phi' in name:\n print('tdot', format(err_vals[0]))\n print('edot', format(err_vals[1]))\n print('species', format(np.linalg.norm(err_vals[2:], ord=np.inf)))\n elif 'rop_net' in name:\n # find prevision range\n maxv = np.linalg.norm(err_vals, ord=np.inf)\n maxind = np.where(err_dict[name] == maxv)[0][0]\n print(name, format(maxv))\n print('rop_component', format(\n err_dict['rop_component'][maxind]))\n elif 'component' not in name:\n print(name, format(np.linalg.norm(err_vals, ord=np.inf)))\n\n\nprint_error('spec', updater, printer)\n"
] |
[
[
"numpy.maximum",
"numpy.abs",
"numpy.isnan",
"numpy.linalg.norm",
"numpy.any",
"numpy.where"
]
] |
skadio/mabwiser
|
[
"1589b771d95e65540f0b42c7112e32e227adf7b2"
] |
[
"tests/test_mab.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.preprocessing import StandardScaler\n\nfrom mabwiser.mab import MAB, LearningPolicy, NeighborhoodPolicy\nfrom tests.test_base import BaseTest\n\n\nclass MABTest(BaseTest):\n\n #################################################\n # Test context free predict() method\n ################################################\n\n def test_arm_list_int(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_arm_list_str(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[\"A\", \"B\", \"C\"],\n decisions=[\"A\", \"A\", \"A\", \"B\", \"B\", \"B\", \"C\", \"C\", \"C\"],\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for lp in MABTest.para_lps:\n self.predict(arms=[\"A\", \"B\", \"C\"],\n decisions=[\"A\", \"A\", \"A\", \"B\", \"B\", \"B\", \"C\", \"C\", \"C\"],\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_decision_series(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3],\n decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_reward_series(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],\n rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],\n rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_decision_reward_series(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3],\n decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_decision_array(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3],\n decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_reward_array(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],\n rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],\n rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_decision_reward_array(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3],\n decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_decision_series_reward_array(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3],\n decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_decision_array_reward_series(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3],\n decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n\n #################################################\n # Test context free predict_expectation() method\n ################################################\n\n def test_exp_arm_list_int(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=False)\n\n def test_exp_arm_list_str(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[\"A\", \"B\", \"C\"],\n decisions=[\"A\", \"A\", \"A\", \"B\", \"B\", \"B\", \"C\", \"C\", \"C\"],\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=False)\n\n def test_exp_decision_series(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=False)\n\n def test_exp_reward_series(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],\n rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=False)\n\n def test_exp_decision_reward_series(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=False)\n\n def test_exp_decision_array(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=False)\n\n def test_exp_reward_array(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],\n rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=False)\n\n def test_exp_decision_reward_array(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=False)\n\n def test_exp_decision_series_reward_array(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=False)\n\n def test_exp_decision_array_reward_series(self):\n\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3],\n decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),\n rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=False)\n\n def test_context_history_series(self):\n\n contexts = pd.DataFrame({'column1': [1, 2, 3], 'column2': [2, 3, 1]})\n\n for lp in BaseTest.para_lps:\n arm, mab = self.predict(arms=[0, 1],\n decisions=[1, 1, 1],\n rewards=[0, 0, 0],\n learning_policy=lp,\n context_history=contexts['column1'],\n contexts=[[1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n self.assertEqual(mab._imp.arm_to_model[0].beta.shape[0], 1)\n\n for cp in BaseTest.nps:\n for lp in BaseTest.lps:\n arm, mab = self.predict(arms=[0, 1],\n decisions=[1, 1, 1],\n rewards=[0, 0, 0],\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=contexts['column1'],\n contexts=[[1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n self.assertEqual(np.ndim(mab._imp.contexts), 2)\n\n for cp in BaseTest.cps:\n for lp in BaseTest.lps:\n arm, mab = self.predict(arms=[0, 1],\n decisions=[1, 1, 1],\n rewards=[0, 0, 0],\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=contexts['column1'],\n contexts=[[1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n self.assertEqual(np.ndim(mab._imp.contexts), 2)\n\n def test_context_series(self):\n\n contexts = pd.DataFrame({'column1': [1, 2, 3, 3, 2, 1], 'column2': [2, 3, 1, 1, 2, 3]})\n\n for lp in BaseTest.para_lps:\n arm, mab = self.predict(arms=[0, 1],\n decisions=[1, 1, 1, 1, 1, 1],\n rewards=[0, 0, 0, 0, 0, 0],\n learning_policy=lp,\n context_history=contexts['column1'],\n contexts=pd.Series([1]),\n seed=123456,\n num_run=1,\n is_predict=True)\n\n self.assertEqual(mab._imp.arm_to_model[0].beta.shape[0], 1)\n\n for cp in BaseTest.nps:\n for lp in BaseTest.lps:\n arm, mab = self.predict(arms=[0, 1],\n decisions=[1, 1, 1, 1, 1, 1],\n rewards=[0, 0, 0, 0, 0, 0],\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=contexts['column1'],\n contexts=pd.Series([1]),\n seed=123456,\n num_run=1,\n is_predict=True)\n\n self.assertEqual(np.ndim(mab._imp.contexts), 2)\n\n for cp in BaseTest.cps:\n for lp in BaseTest.lps:\n arm, mab = self.predict(arms=[0, 1],\n decisions=[1, 1, 1, 1, 1, 1],\n rewards=[0, 0, 0, 0, 0, 0],\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=contexts['column1'],\n contexts=pd.Series([1]),\n seed=123456,\n num_run=1,\n is_predict=True)\n\n self.assertEqual(np.ndim(mab._imp.contexts), 2)\n\n #################################################\n # Test contextual predict() method\n ################################################\n\n def test_context_arm_list_int(self):\n\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.nps:\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.cps:\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_context_arm_list_str(self):\n\n for lp in MABTest.para_lps:\n self.predict(arms=[\"A\", \"B\", \"C\", \"D\"],\n decisions=[\"A\", \"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"C\", \"C\", \"C\"],\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.nps:\n for lp in MABTest.lps:\n self.predict(arms=[\"A\", \"B\", \"C\", \"D\"],\n decisions=[\"A\", \"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"C\", \"C\", \"C\"],\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.cps:\n for lp in MABTest.lps:\n self.predict(arms=[\"A\", \"B\", \"C\", \"D\"],\n decisions=[\"A\", \"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"C\", \"C\", \"C\"],\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, -2, 2, 3, 11], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, -5, 2, 3, 10], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, -2, 4, 3, 9], [20, 19, 18, 17, 16], [1, 2, 1, 1, 3],\n [17, 18, 17, 19, 18]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_context_decision_series(self):\n\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.nps:\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.cps:\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_context_reward_series(self):\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.nps:\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.cps:\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_context_decision_reward_series(self):\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),\n rewards=pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.nps:\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),\n rewards=pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.cps:\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),\n rewards=pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_context_decision_array(self):\n\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=np.array([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.nps:\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=np.array([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.cps:\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=np.array([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_context_reward_array(self):\n\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=np.array([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.nps:\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=np.array([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.cps:\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=np.array([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n def test_context_decision_reward_array(self):\n\n for lp in MABTest.para_lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=np.array([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),\n rewards=np.array([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.nps:\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=np.array([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),\n rewards=np.array([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n for cp in MABTest.cps:\n for lp in MABTest.lps:\n self.predict(arms=[1, 2, 3, 4],\n decisions=np.array([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),\n rewards=np.array([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n #################################################\n # Test random generator\n ################################################\n def test_seed(self):\n\n arms, mab = self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.EpsilonGreedy(epsilon=0.25),\n seed=123456,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arms), 4)\n self.assertEqual(arms, [3, 3, 3, 3])\n self.assertIs(mab._rng, mab._imp.rng)\n\n arms, mab = self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.EpsilonGreedy(epsilon=0.25),\n seed=7,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arms), 4)\n self.assertEqual(arms, [2, 3, 3, 3])\n self.assertIs(mab._rng, mab._imp.rng)\n\n arms, mab = self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.EpsilonGreedy(epsilon=0.25),\n seed=79,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arms), 4)\n self.assertEqual(arms, [3, 3, 3, 2])\n self.assertIs(mab._rng, mab._imp.rng)\n\n arms, mab = self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.EpsilonGreedy(epsilon=0.33),\n seed=123456,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arms), 4)\n self.assertEqual(arms, [3, 3, 3, 3])\n self.assertIs(mab._rng, mab._imp.rng)\n\n arms, mab = self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.EpsilonGreedy(epsilon=0.33),\n seed=7,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arms), 4)\n self.assertEqual(arms, [2, 1, 1, 3])\n self.assertIs(mab._rng, mab._imp.rng)\n\n arms, mab = self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.EpsilonGreedy(epsilon=0.33),\n seed=79,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arms), 4)\n self.assertEqual(arms, [3, 3, 3, 2])\n self.assertIs(mab._rng, mab._imp.rng)\n\n def test_set_rng(self):\n for lp in MABTest.lps:\n mab = MAB([0, 1], lp)\n self.assertIs(mab._rng, mab._imp.rng)\n\n for lp in MABTest.para_lps:\n mab = MAB([0, 1], lp)\n self.assertIs(mab._rng, mab._imp.rng)\n\n for cp in MABTest.nps:\n for lp in MABTest.lps:\n mab = MAB([0, 1], lp, cp)\n self.assertIs(mab._rng, mab._imp.rng)\n self.assertIs(mab._rng, mab._imp.lp.rng)\n\n for cp in MABTest.cps:\n for lp in MABTest.lps:\n mab = MAB([0, 1], lp, cp)\n self.assertIs(mab._rng, mab._imp.rng)\n self.assertIs(mab._rng, mab._imp.lp_list[0].rng)\n\n #################################################\n # Test add_arm() method\n ################################################\n\n def test_add_arm(self):\n for lp in MABTest.lps:\n mab = MAB([0, 1], lp)\n mab.add_arm(2)\n self.assertTrue(2 in mab.arms)\n self.assertTrue(len(mab._imp.arms) == 3)\n self.assertTrue(2 in mab._imp.arm_to_expectation.keys())\n\n mab.add_arm('a')\n self.assertTrue('a' in mab.arms)\n self.assertTrue(len(mab._imp.arms) == 4)\n self.assertTrue('a' in mab._imp.arm_to_expectation.keys())\n\n def test_add_arm_contextual(self):\n for lp in MABTest.para_lps:\n mab = MAB([0, 1], lp)\n mab.add_arm(2)\n self.assertTrue(2 in mab.arms)\n self.assertTrue(len(mab._imp.arms) == 3)\n self.assertTrue(2 in mab._imp.arm_to_expectation.keys())\n\n mab.add_arm('a')\n self.assertTrue('a' in mab.arms)\n self.assertTrue(len(mab._imp.arms) == 4)\n self.assertTrue('a' in mab._imp.arm_to_expectation.keys())\n\n for cp in MABTest.nps:\n for lp in MABTest.lps:\n mab = MAB([0, 1], lp, cp)\n mab.add_arm(2)\n self.assertTrue(2 in mab.arms)\n self.assertTrue(len(mab._imp.arms) == 3)\n self.assertTrue(len(mab._imp.lp.arms) == 3)\n self.assertTrue(2 in mab._imp.lp.arm_to_expectation.keys())\n\n mab.add_arm('a')\n self.assertTrue('a' in mab.arms)\n self.assertTrue(len(mab._imp.arms) == 4)\n self.assertTrue(len(mab._imp.lp.arms) == 4)\n self.assertTrue('a' in mab._imp.lp.arm_to_expectation.keys())\n\n for cp in MABTest.cps:\n for lp in MABTest.lps:\n mab = MAB([0, 1], lp, cp)\n mab.add_arm(2)\n self.assertTrue(2 in mab.arms)\n self.assertTrue(len(mab._imp.arms) == 3)\n self.assertTrue(len(mab._imp.lp_list[0].arms) == 3)\n self.assertTrue(2 in mab._imp.lp_list[0].arm_to_expectation.keys())\n\n mab.add_arm('a')\n self.assertTrue('a' in mab.arms)\n self.assertTrue(len(mab._imp.arms) == 4)\n self.assertTrue(len(mab._imp.lp_list[0].arms) == 4)\n self.assertTrue('a' in mab._imp.lp_list[0].arm_to_expectation.keys())\n\n #################################################\n # Test partial_fit() method\n ################################################\n\n def test_partial_fit(self):\n for lp in MABTest.lps:\n arm, mab = self.predict(arms=[\"A\", \"B\", \"C\", \"D\"],\n decisions=[\"A\", \"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"C\", \"C\", \"C\"],\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n seed=123456,\n num_run=1,\n is_predict=True)\n mab.partial_fit([\"A\", \"B\"], [0, 0])\n\n def test_partial_fit_contextual(self):\n for lp in MABTest.para_lps:\n arm, mab = self.predict(arms=[\"A\", \"B\", \"C\", \"D\"],\n decisions=[\"A\", \"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"C\", \"C\", \"C\"],\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n mab.partial_fit([\"A\", \"B\"], [0, 0], [[1, 3, 1, 1, 1], [0, 0, 0, 0, 0]])\n\n for cp in MABTest.nps:\n for lp in MABTest.lps:\n arm, mab = self.predict(arms=[\"A\", \"B\", \"C\", \"D\"],\n decisions=[\"A\", \"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"C\", \"C\", \"C\"],\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n mab.partial_fit([\"A\", \"B\"], [0, 0], [[1, 3, 1, 1, 1], [0, 0, 0, 0, 0]])\n\n for cp in MABTest.cps:\n for lp in MABTest.lps:\n arm, mab = self.predict(arms=[\"A\", \"B\", \"C\", \"D\"],\n decisions=[\"A\", \"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"C\", \"C\", \"C\"],\n rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=lp,\n neighborhood_policy=cp,\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n mab.partial_fit([\"A\", \"B\"], [0, 0], [[1, 3, 1, 1, 1], [0, 0, 0, 0, 0]])\n\n def test_partial_fit_without_fit(self):\n\n for lp in BaseTest.lps:\n mab = MAB([1, 2], lp)\n mab.partial_fit([1, 2], [0, 1])\n x1 = mab.predict()\n\n mab = MAB([1, 2], lp)\n mab.fit([1, 2], [0, 1])\n x2 = mab.predict()\n\n self.assertEqual(x1, x2)\n\n for para_lp in BaseTest.para_lps:\n mab = MAB([1, 2], para_lp)\n mab.partial_fit([1, 2], [0, 1], [[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]])\n x1 = mab.predict([[0, 10, -2, 4, 2]])\n\n mab = MAB([1, 2], para_lp)\n mab.fit([1, 2], [0, 1], [[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]])\n x2 = mab.predict([[0, 10, -2, 4, 2]])\n\n self.assertEqual(x1, x2)\n\n for cp in BaseTest.nps:\n for lp in BaseTest.lps:\n mab = MAB([1, 2], lp, cp)\n mab.partial_fit([1, 2, 2], [0, 1, 0], [[0, 1, 2, 3, 5],\n [1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0]])\n x1 = mab.predict([[0, 10, -2, 4, 2]])\n\n mab = MAB([1, 2], lp, cp)\n mab.partial_fit([1, 2, 2], [0, 1, 0], [[0, 1, 2, 3, 5],\n [1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0]])\n x2 = mab.predict([[0, 10, -2, 4, 2]])\n\n self.assertEqual(x1, x2)\n\n for cp in BaseTest.nps:\n for para_lp in BaseTest.para_lps:\n mab = MAB([1, 2], para_lp, cp)\n mab.partial_fit([1, 2, 2], [0, 1, 0], [[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]])\n\n x1 = mab.predict([[0, 0, 0, 0, 0], ])\n\n mab = MAB([1, 2], para_lp, cp)\n mab.fit([1, 2, 2], [0, 1, 0], [[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]])\n x2 = mab.predict([[0, 0, 0, 0, 0]])\n\n self.assertEqual(x1, x2)\n\n for cp in BaseTest.cps:\n for lp in BaseTest.lps:\n mab = MAB([1, 2], lp, cp)\n mab.partial_fit([1, 2, 2], [0, 1, 0], [[0, 9, 2, 3, 5],\n [1, 1, 1, 1, 1],\n [-3, 0, 0, -7, 0]])\n x1 = mab.predict([[0, 10, -2, 4, 2]])\n\n mab = MAB([1, 2], lp, cp)\n mab.partial_fit([1, 2, 2], [0, 1, 0], [[0, 9, 2, 3, 5],\n [1, 1, 1, 1, 1],\n [-3, 0, 0, -7, 0]])\n x2 = mab.predict([[0, 10, -2, 4, 2]])\n\n self.assertEqual(x1, x2)\n\n for cp in BaseTest.cps:\n for para_lp in BaseTest.para_lps:\n mab = MAB([1, 2], para_lp, cp)\n mab.partial_fit([1, 2, 2], [0, 1, 0], [[0, 9, 2, 3, 5],\n [1, 1, 1, 1, 1],\n [-3, 0, 0, -7, 0]])\n\n x1 = mab.predict([[0, 0, 0, 0, 0]])\n\n mab = MAB([1, 2], para_lp, cp)\n mab.fit([1, 2, 2], [0, 1, 0], [[0, 9, 2, 3, 5],\n [1, 1, 1, 1, 1],\n [-3, 0, 0, -7, 0]])\n x2 = mab.predict([[0, 0, 0, 0, 0]])\n\n self.assertEqual(x1, x2)\n\n def test_partial_fit_single_row(self):\n rng = np.random.RandomState(seed=9)\n train_data = pd.DataFrame({'a': [rng.rand() for _ in range(20)],\n 'b': [rng.rand() for _ in range(20)],\n 'c': [rng.rand() for _ in range(20)],\n 'decision': [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2],\n 'reward': [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1]})\n test_data = pd.DataFrame({'a': [rng.rand() for _ in range(3)], 'b': [rng.rand() for _ in range(3)],\n 'c': [rng.rand() for _ in range(3)], 'decision': [ 1, 1, 2], 'reward': [ 0, 1, 1]})\n context_columns = ['a', 'b', 'c']\n\n for para_lp in BaseTest.para_lps:\n mab = MAB([1, 2], para_lp)\n mab.fit(train_data['decision'], train_data['reward'], train_data[context_columns])\n for index, row in test_data.iterrows():\n mab.predict(row[context_columns])\n mab.partial_fit([row['decision']], [row['reward']], row[context_columns])\n\n for cp in BaseTest.nps:\n for lp in BaseTest.lps:\n mab = MAB([1, 2], lp, cp)\n mab.fit(train_data['decision'], train_data['reward'], train_data[context_columns])\n for index, row in test_data.iterrows():\n mab.predict(row[context_columns])\n mab.partial_fit([row['decision']], [row['reward']], row[context_columns])\n\n # With neighbors based approaches it is difficult to guarantee that\n for cp in BaseTest.nps:\n for para_lp in BaseTest.para_lps:\n mab = MAB([1, 2], para_lp, cp)\n mab.fit(train_data['decision'], train_data['reward'], train_data[context_columns])\n for index, row in test_data.iterrows():\n mab.predict(row[context_columns])\n mab.partial_fit([row['decision']], [row['reward']], row[context_columns])\n\n def test_convert_matrix(self):\n a = np.array([[1, 2, 3], [2, 2, 2]])\n b = [[1, 2, 3], [2, 2, 2]]\n c = pd.DataFrame({'one': [1, 2, 3], 'two': [2, 2, 2]})\n d = np.array([[1, 2, 3], [2, 2, 2]], order='F')\n\n MAB._convert_matrix(None)\n MAB._convert_matrix(a)\n MAB._convert_matrix(b)\n MAB._convert_matrix(c)\n MAB._convert_matrix(c['one'])\n MAB._convert_matrix(c.loc[0], row=True)\n MAB._convert_matrix(d)\n\n"
] |
[
[
"pandas.Series",
"pandas.DataFrame",
"numpy.ndim",
"numpy.array",
"numpy.random.RandomState"
]
] |
mneeleman/xastropy
|
[
"790aebfbc5cd26d43281bb70dec6bc63007dbb87"
] |
[
"xastropy/stats/basic.py"
] |
[
"\"\"\"\n#;+ \n#; NAME:\n#; stats.basic\n#; Version 1.0\n#;\n#; PURPOSE:\n#; Module for basic stat calculations\n#; 04-Dec-2014 by JXP\n#;-\n#;------------------------------------------------------------------------------\n\"\"\"\nfrom __future__ import print_function, absolute_import, division, unicode_literals\n\nimport numpy as np\nimport os\nfrom scipy.interpolate import interp1d\n\nfrom xastropy.xutils import xdebug as xdb\n\n# def perc\n# def lin_to_log\n\ndef lin_to_log(x, sig):\n \"\"\" Convert linear value+error to log \n\n Parameters:\n x: float\n sig: float \n\n Returns:\n logx, sig_logx\n Log value and error in log\n\n JXP 26 Mar 2015\n \"\"\"\n logx = np.log10( x ) \n lgvar = ((1. / (np.log(10.0)*x))**2) * sig**2\n sig_logx = np.sqrt(lgvar)\n\n return logx, sig_logx\n\ndef perc(x, per=0.68):\n \"\"\" Calculate the percentile bounds of a distribution\n\n Parameters:\n x: float\n numpy array of values\n per: float (0.68)\n Percentile for the calulation\n\n Returns:\n xper: array\n Value at lower, value at upper\n\n JXP 04 Dec 2014\n \"\"\"\n #\n npt = len(x)\n\n # Sort\n xsort = np.sort(x)\n perx = (np.arange(npt)+1) / npt\n\n f = interp1d(perx,xsort)\n\n frac = (1.-per) / 2.\n\n # Fill\n xper = np.zeros(2)\n try:\n xper[0] = f( frac )\n except ValueError:\n xper[0] = np.min(x)\n\n try:\n xper[1] = f( 1.-frac )\n except ValueError:\n xper[1] = np.max(x)\n\n #xdb.set_trace()\n\n # Return\n return xper\n\ndef poisson_interval(k, cl=0.95, sigma=None): \n \"\"\"Uses chisquared info to get the poisson interval. Uses scipy.stats\n (imports in function). \n Taken from http://stackoverflow.com/questions/14813530/poisson-confidence-interval-with-numpy\n Checked against my own x_poisscl.pro code in XIDL\n\n Parameters:\n -----------\n cl: float\n Confidence limit\n \"\"\"\n from scipy.stats import norm, chi2\n if sigma is not None:\n icl = norm.cdf(sigma)\n cl = 1. - 2*(1.-icl)\n #\n alpha = 1. - cl\n a = alpha\n low, high = (chi2.ppf(a/2, 2*k) / 2, chi2.ppf(1-a/2, 2*k + 2) / 2)\n if k == 0: \n low = 0.0\n return low, high\n"
] |
[
[
"scipy.stats.chi2.ppf",
"numpy.log",
"numpy.sqrt",
"scipy.stats.norm.cdf",
"numpy.min",
"numpy.arange",
"numpy.sort",
"numpy.max",
"scipy.interpolate.interp1d",
"numpy.log10",
"numpy.zeros"
]
] |
lanSeFangZhou/tokenizer_tools
|
[
"edd931ae86a6e381b57e50f8b59ae19d3151d26b"
] |
[
"tokenizer_tools/hooks.py"
] |
[
"import pandas as pd\nimport tensorflow as tf\n\n\nclass TensorObserveHook(tf.train.SessionRunHook):\n def __init__(self, d1_mapping=None, d2_mapping=None, d2_mapper=None):\n self.d1_mapping = {} if not d1_mapping else d1_mapping\n self.d2_mapping = {} if not d2_mapping else d2_mapping\n self.d2_mapper = {} if not d2_mapper else d2_mapper\n\n def before_run(self, run_context):\n fetches = list(map(\n lambda x: run_context.session.graph.get_tensor_by_name(x),\n list(self.d1_mapping.values()) + list(self.d2_mapping.values())\n ))\n return tf.train.SessionRunArgs(\n fetches=fetches\n )\n\n def after_run(self, run_context, run_values):\n print('-- 1d level --')\n for i in range(len(self.d1_mapping)):\n print(list(self.d2_mapping.keys())[i], run_values.results[i])\n\n print('-- 2d level --')\n data = []\n for i in range(len(self.d1_mapping), len(self.d1_mapping) + len(self.d2_mapping)):\n v = run_values.results[i]\n k = list(self.d2_mapping.keys())[i - len(self.d1_mapping)]\n mapper = self.d2_mapper.get(k, lambda x: x)\n\n data.append([k, v, mapper])\n\n for index in range(len(data[0][1])):\n output_data = {}\n for k, v, mapper in data:\n decoded_v = list(map(mapper, v[index]))\n output_data[k] = decoded_v\n\n df = pd.DataFrame(output_data)\n\n print(df)\n\n\nif __name__ == \"__main__\":\n hook = TensorObserveHook(\n {\n 'fake_golden': 'fake_golden:0',\n 'fake_prediction': 'fake_prediction:0'\n },\n {\n \"word_str\": \"word_strings_Lookup:0\",\n \"predictions_id\": \"predictions:0\",\n \"predict_str\": \"predict_Lookup:0\",\n \"labels_id\": \"labels:0\",\n \"labels_str\": \"IteratorGetNext:2\",\n },\n {\n \"word_str\": lambda x: x.decode(),\n 'predict_str': lambda x: x.decode(),\n 'labels_str': lambda x: x.decode()\n }\n )\n"
] |
[
[
"tensorflow.train.SessionRunArgs",
"pandas.DataFrame"
]
] |
david8862/yolact
|
[
"dbfb1006c9b658fc01b6afc79b06d095ff64e7d7"
] |
[
"utils/functions.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport os\nimport math\nfrom collections import deque\nfrom pathlib import Path\nfrom layers.interpolate import InterpolateModule\n\nclass MovingAverage():\n \"\"\" Keeps an average window of the specified number of items. \"\"\"\n\n def __init__(self, max_window_size=1000):\n self.max_window_size = max_window_size\n self.reset()\n\n def add(self, elem):\n \"\"\" Adds an element to the window, removing the earliest element if necessary. \"\"\"\n if not math.isfinite(elem):\n print('Warning: Moving average ignored a value of %f' % elem)\n return\n\n self.window.append(elem)\n self.sum += elem\n\n if len(self.window) > self.max_window_size:\n self.sum -= self.window.popleft()\n\n def append(self, elem):\n \"\"\" Same as add just more pythonic. \"\"\"\n self.add(elem)\n\n def reset(self):\n \"\"\" Resets the MovingAverage to its initial state. \"\"\"\n self.window = deque()\n self.sum = 0\n\n def get_avg(self):\n \"\"\" Returns the average of the elements in the window. \"\"\"\n return self.sum / max(len(self.window), 1)\n\n def __str__(self):\n return str(self.get_avg())\n\n def __repr__(self):\n return repr(self.get_avg())\n\n def __len__(self):\n return len(self.window)\n\n\nclass ProgressBar():\n \"\"\" A simple progress bar that just outputs a string. \"\"\"\n\n def __init__(self, length, max_val):\n self.max_val = max_val\n self.length = length\n self.cur_val = 0\n\n self.cur_num_bars = -1\n self._update_str()\n\n def set_val(self, new_val):\n self.cur_val = new_val\n\n if self.cur_val > self.max_val:\n self.cur_val = self.max_val\n if self.cur_val < 0:\n self.cur_val = 0\n\n self._update_str()\n\n def is_finished(self):\n return self.cur_val == self.max_val\n\n def _update_str(self):\n num_bars = int(self.length * (self.cur_val / self.max_val))\n\n if num_bars != self.cur_num_bars:\n self.cur_num_bars = num_bars\n self.string = '█' * num_bars + '░' * (self.length - num_bars)\n\n def __repr__(self):\n return self.string\n\n def __str__(self):\n return self.string\n\n\ndef init_console():\n \"\"\"\n Initialize the console to be able to use ANSI escape characters on Windows.\n \"\"\"\n if os.name == 'nt':\n from colorama import init\n init()\n\n\nclass SavePath:\n \"\"\"\n Why is this a class?\n Why do I have a class for creating and parsing save paths?\n What am I doing with my life?\n \"\"\"\n\n def __init__(self, model_name:str, epoch:int, iteration:int):\n self.model_name = model_name\n self.epoch = epoch\n self.iteration = iteration\n\n def get_path(self, root:str=''):\n file_name = self.model_name + '_' + str(self.epoch) + '_' + str(self.iteration) + '.pth'\n return os.path.join(root, file_name)\n\n @staticmethod\n def from_str(path:str):\n file_name = os.path.basename(path)\n\n if file_name.endswith('.pth'):\n file_name = file_name[:-4]\n\n params = file_name.split('_')\n\n if file_name.endswith('interrupt'):\n params = params[:-1]\n\n model_name = '_'.join(params[:-2])\n epoch = params[-2]\n iteration = params[-1]\n\n return SavePath(model_name, int(epoch), int(iteration))\n\n @staticmethod\n def remove_interrupt(save_folder):\n for p in Path(save_folder).glob('*_interrupt.pth'):\n p.unlink()\n\n @staticmethod\n def get_interrupt(save_folder):\n for p in Path(save_folder).glob('*_interrupt.pth'): \n return str(p)\n return None\n\n @staticmethod\n def get_latest(save_folder, config):\n \"\"\" Note: config should be config.name. \"\"\"\n max_iter = -1\n max_name = None\n\n for p in Path(save_folder).glob(config + '_*'):\n path_name = str(p)\n\n try:\n save = SavePath.from_str(path_name)\n except:\n continue\n\n if save.model_name == config and save.iteration > max_iter:\n max_iter = save.iteration\n max_name = path_name\n\n return max_name\n\ndef make_net(in_channels, conf, include_last_relu=True):\n \"\"\"\n A helper function to take a config setting and turn it into a network.\n Used by protonet and extrahead. Returns (network, out_channels)\n \"\"\"\n def make_layer(layer_cfg):\n nonlocal in_channels\n\n # Possible patterns:\n # ( 256, 3, {}) -> conv\n # ( 256,-2, {}) -> deconv\n # (None,-2, {}) -> bilinear interpolate\n # ('cat',[],{}) -> concat the subnetworks in the list\n #\n # You know it would have probably been simpler just to adopt a 'c' 'd' 'u' naming scheme.\n # Whatever, it's too late now.\n if isinstance(layer_cfg[0], str):\n layer_name = layer_cfg[0]\n\n if layer_name == 'cat':\n nets = [make_net(in_channels, x) for x in layer_cfg[1]]\n layer = Concat([net[0] for net in nets], layer_cfg[2])\n num_channels = sum([net[1] for net in nets])\n else:\n num_channels = layer_cfg[0]\n kernel_size = layer_cfg[1]\n\n if kernel_size > 0:\n layer = nn.Conv2d(in_channels, num_channels, kernel_size, **layer_cfg[2])\n else:\n if num_channels is None:\n layer = InterpolateModule(scale_factor=-kernel_size, mode='bilinear', align_corners=False, **layer_cfg[2])\n else:\n layer = nn.ConvTranspose2d(in_channels, num_channels, -kernel_size, **layer_cfg[2])\n\n in_channels = num_channels if num_channels is not None else in_channels\n\n # Don't return a ReLU layer if we're doing an upsample. This probably doesn't affect anything\n # output-wise, but there's no need to go through a ReLU here.\n # Commented out for backwards compatibility with previous models\n # if num_channels is None:\n # return [layer]\n # else:\n return [layer, nn.ReLU(inplace=True)]\n\n # Use sum to concat together all the component layer lists\n net = sum([make_layer(x) for x in conf], [])\n if not include_last_relu:\n net = net[:-1]\n\n return nn.Sequential(*(net)), in_channels\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.ConvTranspose2d"
]
] |
yatbear/cv
|
[
"abff7fd64f141eef7c222c16885c8200b7dd18d0"
] |
[
"recognizer/p8.py"
] |
[
"#!usr/bin/env python\n\n# EN.600.661 HW #1 \n#\n# Usage: python [files]\n#\n# Detect end-points of line segments\n#\n# Author: yatbear <[email protected]>\n# 2015-09-21\n\nfrom __future__ import division\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\ndef p8(image_in, hough_image_in, edge_thresholded_in, hough_thresh): \n # Darken the original edges \n edge_inds = np.where(edge_thresholded_in > 0)\n edge_thresholded_in[edge_inds] = 20\n\n (m, n) = edge_thresholded_in.shape\n line_image_out = edge_thresholded_in.copy() \n\n # Thresholding\n ind = np.where(hough_image_in >= hough_thresh)\n param_inds = zip(ind[0], ind[1])\n\n # Parameter space\n theta_range = np.deg2rad(np.arange(-90.0, 90.0, 1.0))\n diag = np.ceil(np.sqrt((m-1)**2 + (n-1)**2))\n rho_range = np.linspace(-diag, diag, diag*2.0)\n \n for (t, r) in param_inds:\n theta = theta_range[t]\n rho = rho_range[r]\n sin = np.sin(theta)\n cos = np.cos(theta) \n\n # Find interceptions with image boundaries\n (x1, y1) = (rho / cos, 0) \\\n if cos != 0 else (np.nan, 0)\n (x2, y2) = (0, -rho / sin) \\\n if sin != 0 else (0, np.nan)\n (x3, y3) = (((m-1)*sin + rho) / cos, m - 1) \\\n if cos != 0 else (np.nan, m - 1)\n (x4, y4) = (n - 1, ((n-1)*cos - rho) / sin) \\\n if sin != 0 else (n - 1, np.nan)\n\n # Find end points for each line\n endpts = []\n pts = [(x1, y1), (x2, y2), (x3, y3), (x4, y4)]\n for (x, y) in pts:\n if x >= 0 and x <= n - 1:\n if y >= 0 and y <= m - 1:\n endpts.append((int(x), int(y)))\n\n if len(endpts) != 2:\n # print len(endpts)\n continue\n\n color = 60 \n cv2.line(line_image_out, endpts[0], endpts[1], color)\n\n cropped_lines_image_out = edge_thresholded_in.copy()\n\n for y in xrange(n):\n for x in xrange(m):\n if line_image_out[x][y] > 0: \n if edge_thresholded_in[x][y] > 0:\n cropped_lines_image_out[x][y] = line_image_out[x][y]\n\n plt.axis(\"off\")\n plt.imshow(cropped_lines_image_out)\n plt.show()\n\n return cropped_lines_image_out\n\n# from p6 import *\n# from p5 import *\n\n# path = \"pgm/hough_simple_1.pgm\"\n# image_in = cv2.imread(path, 0)\n# edge_image_thresh_out = p5(image_in)\n# [edge_image_thresh_out, hough_out] = p6(edge_image_thresh_out, 40)\n# p8(image_in, hough_out, edge_image_thresh_out, 164)"
] |
[
[
"matplotlib.pyplot.imshow",
"numpy.sqrt",
"numpy.linspace",
"numpy.arange",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"numpy.where"
]
] |
diegotg2000/DiegoTorres_Ejercicio20
|
[
"9b95b23788740c7703267bc115297338e25a7962"
] |
[
"complejo.py"
] |
[
"import numpy as np\r\n\r\nclass Complejo:\r\n def __init__(self,x,y):\r\n self.real=x\r\n self.imaginario=y\r\n self.norma=np.sqrt(x**2+y**2)\r\n def conjugado(self):\r\n self.imaginario=-self.imaginario\r\n def calcula_norma(self):\r\n return self.norma\r\n def multiplicar(self, otro):\r\n a=self.real\r\n b=self.imaginario\r\n c=otro.real\r\n d=otro.imaginario\r\n return Complejo(a*c-b*d,b*c+a*d)\r\n def pow(self,n):\r\n p=self\r\n for i in range(n-1):\r\n p=p.multiplicar(self)\r\n return p\r\n def imprimir(self):\r\n x=self.real\r\n y=self.imaginario\r\n return(x,y)\r\n\r\n "
] |
[
[
"numpy.sqrt"
]
] |
TimothyChen225/AFC-X
|
[
"901a0019b7c153804570c480c3da4825776dbf02"
] |
[
"feature.py"
] |
[
"from collections import Counter\r\n\r\nfrom Bio import SeqIO\r\n\r\nimport numpy as np\r\n\r\nimport warnings\r\nimport math\r\n\r\nwarnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')\r\nfrom gensim.models import Word2Vec\r\n\r\nMax_length = 100 # maximum length of used peptides\r\n\r\ndef check_length(file):\r\n length = []\r\n global Max_length\r\n with open(file) as f:\r\n for i in f:\r\n if i[0] != \">\":\r\n length.append(len(i))\r\n temp_max = max(length)\r\n if temp_max > Max_length:\r\n Max_length = temp_max\r\n\r\n\r\ndef add(x, i):\r\n x_copy = x.copy()\r\n x_copy[i] = 1\r\n return x_copy\r\n\r\n\r\n\r\n\r\ndef BLOSUM62(seq):\r\n blosum62 = {\r\n 'A': [4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0], # A\r\n 'R': [-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3], # R\r\n 'N': [-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3], # N\r\n 'D': [-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3], # D\r\n 'C': [0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1], # C\r\n 'Q': [-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2], # Q\r\n 'E': [-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2], # E\r\n 'G': [0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3], # G\r\n 'H': [-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3], # H\r\n 'I': [-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3], # I\r\n 'L': [-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1], # L\r\n 'K': [-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2], # K\r\n 'M': [-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1], # M\r\n 'F': [-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1], # F\r\n 'P': [-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2], # P\r\n 'S': [1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2], # S\r\n 'T': [0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0], # T\r\n 'W': [-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3], # W\r\n 'Y': [-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1], # Y\r\n 'V': [0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4], # V\r\n '-': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # -\r\n }\r\n\r\n pad_len = Max_length - len(seq)\r\n seqs = []\r\n\r\n for aa in seq:\r\n seqs.append(blosum62[aa])\r\n for _ in range(pad_len):\r\n seqs.append(blosum62['-'])\r\n\r\n return seqs\r\n\r\n\r\ndef Count(aaSet, sequence):\r\n number = 0\r\n for aa in sequence:\r\n if aa in aaSet:\r\n number = number + 1\r\n cutoffNums = [1, math.floor(0.25 * number), math.floor(0.50 * number), math.floor(0.75 * number), number]\r\n cutoffNums = [i if i >= 1 else 1 for i in cutoffNums]\r\n\r\n code = []\r\n for cutoff in cutoffNums:\r\n myCount = 0\r\n for i in range(len(sequence)):\r\n if sequence[i] in aaSet:\r\n myCount += 1\r\n if myCount == cutoff:\r\n code.append((i + 1) / len(sequence) * Max_length)\r\n break\r\n if myCount == 0:\r\n code.append(0)\r\n return code\r\n\r\n\r\ndef CTDD(seq):\r\n group1 = {\r\n 'hydrophobicity_PRAM900101': 'RKEDQN',\r\n 'hydrophobicity_ARGP820101': 'QSTNGDE',\r\n 'hydrophobicity_ZIMJ680101': 'QNGSWTDERA',\r\n 'hydrophobicity_PONP930101': 'KPDESNQT',\r\n 'hydrophobicity_CASG920101': 'KDEQPSRNTG',\r\n 'hydrophobicity_ENGD860101': 'RDKENQHYP',\r\n 'hydrophobicity_FASG890101': 'KERSQD',\r\n 'normwaalsvolume': 'GASTPDC',\r\n 'polarity': 'LIFWCMVY',\r\n 'polarizability': 'GASDT',\r\n 'charge': 'KR',\r\n 'secondarystruct': 'EALMQKRH',\r\n 'solventaccess': 'ALFCGIVW'\r\n }\r\n group2 = {\r\n 'hydrophobicity_PRAM900101': 'GASTPHY',\r\n 'hydrophobicity_ARGP820101': 'RAHCKMV',\r\n 'hydrophobicity_ZIMJ680101': 'HMCKV',\r\n 'hydrophobicity_PONP930101': 'GRHA',\r\n 'hydrophobicity_CASG920101': 'AHYMLV',\r\n 'hydrophobicity_ENGD860101': 'SGTAW',\r\n 'hydrophobicity_FASG890101': 'NTPG',\r\n 'normwaalsvolume': 'NVEQIL',\r\n 'polarity': 'PATGS',\r\n 'polarizability': 'CPNVEQIL',\r\n 'charge': 'ANCQGHILMFPSTWYV',\r\n 'secondarystruct': 'VIYCWFT',\r\n 'solventaccess': 'RKQEND'\r\n }\r\n group3 = {\r\n 'hydrophobicity_PRAM900101': 'CLVIMFW',\r\n 'hydrophobicity_ARGP820101': 'LYPFIW',\r\n 'hydrophobicity_ZIMJ680101': 'LPFYI',\r\n 'hydrophobicity_PONP930101': 'YMFWLCVI',\r\n 'hydrophobicity_CASG920101': 'FIWC',\r\n 'hydrophobicity_ENGD860101': 'CVLIMF',\r\n 'hydrophobicity_FASG890101': 'AYHWVMFLIC',\r\n 'normwaalsvolume': 'MHKFRYW',\r\n 'polarity': 'HQRKNED',\r\n 'polarizability': 'KMHFRYW',\r\n 'charge': 'DE',\r\n 'secondarystruct': 'GNPSD',\r\n 'solventaccess': 'MSPTHY'\r\n }\r\n\r\n groups = [group1, group2, group3]\r\n property = (\r\n 'hydrophobicity_PRAM900101', 'hydrophobicity_ARGP820101', 'hydrophobicity_ZIMJ680101',\r\n 'hydrophobicity_PONP930101',\r\n 'hydrophobicity_CASG920101', 'hydrophobicity_ENGD860101', 'hydrophobicity_FASG890101', 'normwaalsvolume',\r\n 'polarity', 'polarizability', 'charge', 'secondarystruct', 'solventaccess')\r\n\r\n encodings = []\r\n\r\n code = []\r\n for p in property:\r\n code = code + Count(group1[p], seq) + Count(group2[p], seq) + Count(group3[p], seq)\r\n encodings.append(code)\r\n return encodings\r\n\r\n\r\ndef DPC(seq):\r\n AA = 'ACDEFGHIKLMNPQRSTVWY'\r\n encodings = []\r\n diPeptides = [aa1 + aa2 for aa1 in AA for aa2 in AA]\r\n # header = ['#'] + diPeptides\r\n # encodings.append(header)\r\n\r\n AADict = {}\r\n for i in range(len(AA)):\r\n AADict[AA[i]] = i\r\n\r\n # for i in fastas:\r\n # name, sequence = i[0], re.sub('-', '', i[1])\r\n code = []\r\n tmpCode = [0] * 400\r\n for j in range(len(seq) - 2 + 1):\r\n tmpCode[AADict[seq[j]] * 20 + AADict[seq[j + 1]]] = tmpCode[AADict[seq[j]] * 20 + AADict[\r\n seq[j + 1]]] + 1\r\n if sum(tmpCode) != 0:\r\n tmpCode = [i / sum(tmpCode) for i in tmpCode]\r\n code = code + tmpCode\r\n encodings.append(code)\r\n return encodings\r\n\r\n\r\ndef AAC(seq):\r\n AA = 'ACDEFGHIKLMNPQRSTVWY'\r\n # AA = 'ARNDCQEGHILKMFPSTWYV'\r\n encodings = []\r\n\r\n # for i in fastas:\r\n # name, sequence = i[0], re.sub('-', '', i[1])\r\n count = Counter(seq)\r\n for key in count:\r\n count[key] = count[key] / len(seq)\r\n code = []\r\n for aa in AA:\r\n code.append(count[aa])\r\n encodings.append(code)\r\n return encodings\r\n\r\n\r\ndef ZSCALE(seq):\r\n zscale = {\r\n 'A': [0.24, -2.32, 0.60, -0.14, 1.30], # A\r\n 'C': [0.84, -1.67, 3.71, 0.18, -2.65], # C\r\n 'D': [3.98, 0.93, 1.93, -2.46, 0.75], # D\r\n 'E': [3.11, 0.26, -0.11, -0.34, -0.25], # E\r\n 'F': [-4.22, 1.94, 1.06, 0.54, -0.62], # F\r\n 'G': [2.05, -4.06, 0.36, -0.82, -0.38], # G\r\n 'H': [2.47, 1.95, 0.26, 3.90, 0.09], # H\r\n 'I': [-3.89, -1.73, -1.71, -0.84, 0.26], # I\r\n 'K': [2.29, 0.89, -2.49, 1.49, 0.31], # K\r\n 'L': [-4.28, -1.30, -1.49, -0.72, 0.84], # L\r\n 'M': [-2.85, -0.22, 0.47, 1.94, -0.98], # M\r\n 'N': [3.05, 1.62, 1.04, -1.15, 1.61], # N\r\n 'P': [-1.66, 0.27, 1.84, 0.70, 2.00], # P\r\n 'Q': [1.75, 0.50, -1.44, -1.34, 0.66], # Q\r\n 'R': [3.52, 2.50, -3.50, 1.99, -0.17], # R\r\n 'S': [2.39, -1.07, 1.15, -1.39, 0.67], # S\r\n 'T': [0.75, -2.18, -1.12, -1.46, -0.40], # T\r\n 'V': [-2.59, -2.64, -1.54, -0.85, -0.02], # V\r\n 'W': [-4.36, 3.94, 0.59, 3.44, -1.59], # W\r\n 'Y': [-2.54, 2.44, 0.43, 0.04, -1.47], # Y\r\n '-': [0.00, 0.00, 0.00, 0.00, 0.00], # -\r\n }\r\n encodings = []\r\n # header = ['#']\r\n # for p in range(1, len(fastas[0][1]) + 1):\r\n # for z in ('1', '2', '3', '4', '5'):\r\n # header.append('Pos' + str(p) + '.ZSCALE' + z)\r\n # encodings.append(header)\r\n\r\n # for i in fastas:\r\n # name, sequence = i[0], i[1]\r\n code = []\r\n\r\n for _ in range(Max_length - len(seq)):\r\n code = code + zscale['-']\r\n\r\n for aa in seq:\r\n code = code + zscale[aa]\r\n encodings.append(code)\r\n return encodings\r\n\r\n\r\ndef TPC(seq):\r\n AA = 'ACDEFGHIKLMNPQRSTVWY'\r\n encodings = []\r\n triPeptides = [aa1 + aa2 + aa3 for aa1 in AA for aa2 in AA for aa3 in AA]\r\n\r\n AADict = {}\r\n for i in range(len(AA)):\r\n AADict[AA[i]] = i\r\n\r\n # for i in fastas:\r\n # name, sequence = i[0], re.sub('-', '', i[1])\r\n code = []\r\n tmpCode = [0] * 8000\r\n for j in range(len(seq) - 3 + 1):\r\n tmpCode[AADict[seq[j]] * 400 + AADict[seq[j + 1]] * 20 + AADict[seq[j + 2]]] = tmpCode[AADict[seq[j]] * 400 +\r\n AADict[seq[j + 1]] * 20 +\r\n AADict[seq[j + 2]]] + 1\r\n if sum(tmpCode) != 0:\r\n tmpCode = [i / sum(tmpCode) for i in tmpCode]\r\n code = code + tmpCode\r\n encodings.append(code)\r\n return encodings\r\n\r\n\r\ndef DDE(seq):\r\n AA = 'ACDEFGHIKLMNPQRSTVWY'\r\n\r\n myCodons = {\r\n 'A': 4,\r\n 'C': 2,\r\n 'D': 2,\r\n 'E': 2,\r\n 'F': 2,\r\n 'G': 4,\r\n 'H': 2,\r\n 'I': 3,\r\n 'K': 2,\r\n 'L': 6,\r\n 'M': 1,\r\n 'N': 2,\r\n 'P': 4,\r\n 'Q': 2,\r\n 'R': 6,\r\n 'S': 6,\r\n 'T': 4,\r\n 'V': 4,\r\n 'W': 1,\r\n 'Y': 2\r\n }\r\n\r\n encodings = []\r\n diPeptides = [aa1 + aa2 for aa1 in AA for aa2 in AA]\r\n\r\n myTM = []\r\n for pair in diPeptides:\r\n myTM.append((myCodons[pair[0]] / 61) * (myCodons[pair[1]] / 61))\r\n\r\n AADict = {}\r\n for i in range(len(AA)):\r\n AADict[AA[i]] = i\r\n\r\n # for i in fastas:\r\n # name, sequence = i[0], re.sub('-', '', i[1])\r\n code = []\r\n tmpCode = [0] * 400\r\n for j in range(len(seq) - 2 + 1):\r\n tmpCode[AADict[seq[j]] * 20 + AADict[seq[j + 1]]] = tmpCode[AADict[seq[j]] * 20 + AADict[\r\n seq[j + 1]]] + 1\r\n if sum(tmpCode) != 0:\r\n tmpCode = [i / sum(tmpCode) for i in tmpCode]\r\n\r\n myTV = []\r\n for j in range(len(myTM)):\r\n myTV.append(myTM[j] * (1 - myTM[j]) / (len(seq) - 1))\r\n\r\n for j in range(len(tmpCode)):\r\n tmpCode[j] = (tmpCode[j] - myTM[j]) / math.sqrt(myTV[j])\r\n\r\n code = code + tmpCode\r\n encodings.append(code)\r\n return encodings\r\n\r\n\r\ndef CalculateKSCTriad(sequence, gap, features, AADict):\r\n res = []\r\n for g in range(gap + 1):\r\n myDict = {}\r\n for f in features:\r\n myDict[f] = 0\r\n\r\n for i in range(len(sequence)):\r\n if i + gap + 1 < len(sequence) and i + 2 * gap + 2 < len(sequence):\r\n fea = AADict[sequence[i]] + '.' + AADict[sequence[i + gap + 1]] + '.' + AADict[\r\n sequence[i + 2 * gap + 2]]\r\n myDict[fea] = myDict[fea] + 1\r\n\r\n maxValue, minValue = max(myDict.values()), min(myDict.values())\r\n for f in features:\r\n res.append((myDict[f] - minValue) / maxValue)\r\n\r\n return res\r\n\r\n\r\ndef CTriad(seq):\r\n AAGroup = {\r\n 'g1': 'AGV',\r\n 'g2': 'ILFP',\r\n 'g3': 'YMTS',\r\n 'g4': 'HNQW',\r\n 'g5': 'RK',\r\n 'g6': 'DE',\r\n 'g7': 'C'\r\n }\r\n\r\n myGroups = sorted(AAGroup.keys())\r\n\r\n AADict = {}\r\n for g in myGroups:\r\n for aa in AAGroup[g]:\r\n AADict[aa] = g\r\n\r\n features = [f1 + '.' + f2 + '.' + f3 for f1 in myGroups for f2 in myGroups for f3 in myGroups]\r\n\r\n encodings = []\r\n # header = ['#']\r\n # for f in features:\r\n # header.append(f)\r\n # encodings.append(header)\r\n\r\n # me, sequence = i[0], re.sub('-', '', i[1])\r\n code = []\r\n if len(seq) < 3:\r\n print('Error: for \"CTriad\" encoding, the input fasta sequences should be greater than 3. \\n\\n')\r\n return 0\r\n code = code + CalculateKSCTriad(seq, 0, features, AADict)\r\n encodings.append(code)\r\n\r\n return encodings\r\n\r\n\r\ndef CalculateKSCTriad(sequence, gap, features, AADict):\r\n res = []\r\n for g in range(gap + 1):\r\n myDict = {}\r\n for f in features:\r\n myDict[f] = 0\r\n\r\n for i in range(len(sequence)):\r\n if i + g + 1 < len(sequence) and i + 2 * g + 2 < len(sequence):\r\n fea = AADict[sequence[i]] + '.' + AADict[sequence[i + g + 1]] + '.' + AADict[sequence[i + 2 * g + 2]]\r\n myDict[fea] = myDict[fea] + 1\r\n\r\n maxValue, minValue = max(myDict.values()), min(myDict.values())\r\n for f in features:\r\n res.append((myDict[f] - minValue) / maxValue)\r\n\r\n return res\r\n\r\n\r\ndef KSCTriad(seq, gap=1):\r\n AAGroup = {\r\n 'g1': 'AGV',\r\n 'g2': 'ILFP',\r\n 'g3': 'YMTS',\r\n 'g4': 'HNQW',\r\n 'g5': 'RK',\r\n 'g6': 'DE',\r\n 'g7': 'C'\r\n }\r\n\r\n myGroups = sorted(AAGroup.keys())\r\n\r\n AADict = {}\r\n for g in myGroups:\r\n for aa in AAGroup[g]:\r\n AADict[aa] = g\r\n\r\n features = [f1 + '.' + f2 + '.' + f3 for f1 in myGroups for f2 in myGroups for f3 in myGroups]\r\n\r\n encodings = []\r\n\r\n code = []\r\n if len(seq) < 2 * gap + 3:\r\n print('Error: for \"KSCTriad\" encoding, the input fasta sequences should be greater than (2*gap+3). \\n\\n')\r\n return 0\r\n code = code + CalculateKSCTriad(seq, gap, features, AADict)\r\n encodings.append(code)\r\n\r\n return encodings\r\n\r\n\r\ndef GTPC(seq):\r\n group = {\r\n 'alphaticr': 'GAVLMI',\r\n 'aromatic': 'FYW',\r\n 'postivecharger': 'KRH',\r\n 'negativecharger': 'DE',\r\n 'uncharger': 'STCPNQ'\r\n }\r\n\r\n groupKey = group.keys()\r\n baseNum = len(groupKey)\r\n triple = [g1 + '.' + g2 + '.' + g3 for g1 in groupKey for g2 in groupKey for g3 in groupKey]\r\n\r\n index = {}\r\n for key in groupKey:\r\n for aa in group[key]:\r\n index[aa] = key\r\n\r\n encodings = []\r\n\r\n code = []\r\n myDict = {}\r\n for t in triple:\r\n myDict[t] = 0\r\n\r\n sum = 0\r\n for j in range(len(seq) - 3 + 1):\r\n myDict[index[seq[j]] + '.' + index[seq[j + 1]] + '.' + index[seq[j + 2]]] = myDict[index[seq[j]] + '.' + index[\r\n seq[j + 1]] + '.' + index[seq[j + 2]]] + 1\r\n sum = sum + 1\r\n\r\n if sum == 0:\r\n for t in triple:\r\n code.append(0)\r\n else:\r\n for t in triple:\r\n code.append(myDict[t] / sum)\r\n encodings.append(code)\r\n\r\n return encodings\r\n\r\n\r\ndef generateGroupPairs(groupKey):\r\n gPair = {}\r\n for key1 in groupKey:\r\n for key2 in groupKey:\r\n gPair[key1 + '.' + key2] = 0\r\n return gPair\r\n\r\n\r\ndef CKSAAGP(seq, gap=2):\r\n if gap < 0:\r\n print('Error: the gap should be equal or greater than zero' + '\\n\\n')\r\n return 0\r\n\r\n group = {\r\n 'alphaticr': 'GAVLMI',\r\n 'aromatic': 'FYW',\r\n 'postivecharger': 'KRH',\r\n 'negativecharger': 'DE',\r\n 'uncharger': 'STCPNQ'\r\n }\r\n\r\n AA = 'ARNDCQEGHILKMFPSTWYV'\r\n\r\n groupKey = group.keys()\r\n\r\n index = {}\r\n for key in groupKey:\r\n for aa in group[key]:\r\n index[aa] = key\r\n\r\n gPairIndex = []\r\n for key1 in groupKey:\r\n for key2 in groupKey:\r\n gPairIndex.append(key1 + '.' + key2)\r\n\r\n encodings = []\r\n\r\n code = []\r\n for g in range(gap + 1):\r\n gPair = generateGroupPairs(groupKey)\r\n sum = 0\r\n for p1 in range(len(seq)):\r\n p2 = p1 + g + 1\r\n if p2 < len(seq) and seq[p1] in AA and seq[p2] in AA:\r\n gPair[index[seq[p1]] + '.' + index[seq[p2]]] = gPair[index[seq[p1]] + '.' + index[\r\n seq[p2]]] + 1\r\n sum = sum + 1\r\n\r\n if sum == 0:\r\n for gp in gPairIndex:\r\n code.append(0)\r\n else:\r\n for gp in gPairIndex:\r\n code.append(gPair[gp] / sum)\r\n\r\n encodings.append(code)\r\n\r\n return encodings\r\n\r\n\r\ndef GAAC(seq):\r\n group = {\r\n 'alphatic': 'GAVLMI',\r\n 'aromatic': 'FYW',\r\n 'postivecharge': 'KRH',\r\n 'negativecharge': 'DE',\r\n 'uncharge': 'STCPNQ'\r\n }\r\n\r\n groupKey = group.keys()\r\n\r\n encodings = []\r\n code = []\r\n count = Counter(seq)\r\n myDict = {}\r\n for key in groupKey:\r\n for aa in group[key]:\r\n myDict[key] = myDict.get(key, 0) + count[aa]\r\n\r\n for key in groupKey:\r\n code.append(myDict[key] / len(seq))\r\n encodings.append(code)\r\n\r\n return encodings\r\n\r\n\r\ndef GDPC(seq):\r\n group = {\r\n 'alphaticr': 'GAVLMI',\r\n 'aromatic': 'FYW',\r\n 'postivecharger': 'KRH',\r\n 'negativecharger': 'DE',\r\n 'uncharger': 'STCPNQ'\r\n }\r\n\r\n groupKey = group.keys()\r\n baseNum = len(groupKey)\r\n dipeptide = [g1 + '.' + g2 for g1 in groupKey for g2 in groupKey]\r\n\r\n index = {}\r\n for key in groupKey:\r\n for aa in group[key]:\r\n index[aa] = key\r\n\r\n encodings = []\r\n\r\n code = []\r\n myDict = {}\r\n for t in dipeptide:\r\n myDict[t] = 0\r\n\r\n sum = 0\r\n for j in range(len(seq) - 2 + 1):\r\n myDict[index[seq[j]] + '.' + index[seq[j + 1]]] = myDict[index[seq[j]] + '.' + index[\r\n seq[j + 1]]] + 1\r\n sum = sum + 1\r\n\r\n if sum == 0:\r\n for t in dipeptide:\r\n code.append(0)\r\n else:\r\n for t in dipeptide:\r\n code.append(myDict[t] / sum)\r\n encodings.append(code)\r\n\r\n return encodings\r\n\r\n\r\ndef AAINDEX(seq):\r\n temp = \"-\" * (Max_length - len(seq))\r\n seq += temp\r\n\r\n AA = 'ARNDCQEGHILKMFPSTWYV'\r\n\r\n fileAAindex = \"data\\\\AAindex1.txt\"\r\n with open(fileAAindex) as f:\r\n records = f.readlines()[1:]\r\n\r\n AAindex = []\r\n AAindexName = []\r\n for i in records:\r\n AAindex.append(i.rstrip().split()[1:] if i.rstrip() != '' else None)\r\n AAindexName.append(i.rstrip().split()[0] if i.rstrip() != '' else None)\r\n\r\n index = {}\r\n for i in range(len(AA)):\r\n index[AA[i]] = i\r\n\r\n encodings = []\r\n\r\n code = []\r\n\r\n for aa in seq:\r\n if aa == '-':\r\n for j in AAindex:\r\n code.append(0)\r\n\r\n continue\r\n for j in AAindex:\r\n code.append(j[index[aa]])\r\n\r\n encodings.append(code)\r\n\r\n return encodings\r\n\r\n\r\ndef CTDT(seq):\r\n group1 = {\r\n 'hydrophobicity_PRAM900101': 'RKEDQN',\r\n 'hydrophobicity_ARGP820101': 'QSTNGDE',\r\n 'hydrophobicity_ZIMJ680101': 'QNGSWTDERA',\r\n 'hydrophobicity_PONP930101': 'KPDESNQT',\r\n 'hydrophobicity_CASG920101': 'KDEQPSRNTG',\r\n 'hydrophobicity_ENGD860101': 'RDKENQHYP',\r\n 'hydrophobicity_FASG890101': 'KERSQD',\r\n 'normwaalsvolume': 'GASTPDC',\r\n 'polarity': 'LIFWCMVY',\r\n 'polarizability': 'GASDT',\r\n 'charge': 'KR',\r\n 'secondarystruct': 'EALMQKRH',\r\n 'solventaccess': 'ALFCGIVW'\r\n }\r\n group2 = {\r\n 'hydrophobicity_PRAM900101': 'GASTPHY',\r\n 'hydrophobicity_ARGP820101': 'RAHCKMV',\r\n 'hydrophobicity_ZIMJ680101': 'HMCKV',\r\n 'hydrophobicity_PONP930101': 'GRHA',\r\n 'hydrophobicity_CASG920101': 'AHYMLV',\r\n 'hydrophobicity_ENGD860101': 'SGTAW',\r\n 'hydrophobicity_FASG890101': 'NTPG',\r\n 'normwaalsvolume': 'NVEQIL',\r\n 'polarity': 'PATGS',\r\n 'polarizability': 'CPNVEQIL',\r\n 'charge': 'ANCQGHILMFPSTWYV',\r\n 'secondarystruct': 'VIYCWFT',\r\n 'solventaccess': 'RKQEND'\r\n }\r\n group3 = {\r\n 'hydrophobicity_PRAM900101': 'CLVIMFW',\r\n 'hydrophobicity_ARGP820101': 'LYPFIW',\r\n 'hydrophobicity_ZIMJ680101': 'LPFYI',\r\n 'hydrophobicity_PONP930101': 'YMFWLCVI',\r\n 'hydrophobicity_CASG920101': 'FIWC',\r\n 'hydrophobicity_ENGD860101': 'CVLIMF',\r\n 'hydrophobicity_FASG890101': 'AYHWVMFLIC',\r\n 'normwaalsvolume': 'MHKFRYW',\r\n 'polarity': 'HQRKNED',\r\n 'polarizability': 'KMHFRYW',\r\n 'charge': 'DE',\r\n 'secondarystruct': 'GNPSD',\r\n 'solventaccess': 'MSPTHY'\r\n }\r\n\r\n groups = [group1, group2, group3]\r\n property = (\r\n 'hydrophobicity_PRAM900101', 'hydrophobicity_ARGP820101', 'hydrophobicity_ZIMJ680101',\r\n 'hydrophobicity_PONP930101',\r\n 'hydrophobicity_CASG920101', 'hydrophobicity_ENGD860101', 'hydrophobicity_FASG890101', 'normwaalsvolume',\r\n 'polarity', 'polarizability', 'charge', 'secondarystruct', 'solventaccess')\r\n\r\n encodings = []\r\n\r\n code = []\r\n aaPair = [seq[j:j + 2] for j in range(len(seq) - 1)]\r\n for p in property:\r\n c1221, c1331, c2332 = 0, 0, 0\r\n for pair in aaPair:\r\n if (pair[0] in group1[p] and pair[1] in group2[p]) or (pair[0] in group2[p] and pair[1] in group1[p]):\r\n c1221 = c1221 + 1\r\n continue\r\n if (pair[0] in group1[p] and pair[1] in group3[p]) or (pair[0] in group3[p] and pair[1] in group1[p]):\r\n c1331 = c1331 + 1\r\n continue\r\n if (pair[0] in group2[p] and pair[1] in group3[p]) or (pair[0] in group3[p] and pair[1] in group2[p]):\r\n c2332 = c2332 + 1\r\n code = code + [c1221 / len(aaPair), c1331 / len(aaPair), c2332 / len(aaPair)]\r\n encodings.append(code)\r\n return encodings\r\n\r\n\r\ndef Geary(seq, props=['CIDH920105', 'BHAR880101', 'CHAM820101', 'CHAM820102',\r\n 'CHOC760101', 'BIGC670101', 'CHAM810101', 'DAYM780201'],\r\n nlag=2):\r\n AA = 'ARNDCQEGHILKMFPSTWYV'\r\n fileAAidx = \"data\\\\AAidx.txt\"\r\n with open(fileAAidx) as f:\r\n records = f.readlines()[1:]\r\n myDict = {}\r\n for i in records:\r\n array = i.rstrip().split('\\t')\r\n myDict[array[0]] = array[1:]\r\n\r\n AAidx = []\r\n AAidxName = []\r\n for i in props:\r\n if i in myDict:\r\n AAidx.append(myDict[i])\r\n AAidxName.append(i)\r\n else:\r\n print('\"' + i + '\" properties not exist.')\r\n return None\r\n\r\n AAidx1 = np.array([float(j) for i in AAidx for j in i])\r\n AAidx = AAidx1.reshape((len(AAidx), 20))\r\n\r\n propMean = np.mean(AAidx, axis=1)\r\n propStd = np.std(AAidx, axis=1)\r\n\r\n for i in range(len(AAidx)):\r\n for j in range(len(AAidx[i])):\r\n AAidx[i][j] = (AAidx[i][j] - propMean[i]) / propStd[i]\r\n\r\n index = {}\r\n for i in range(len(AA)):\r\n index[AA[i]] = i\r\n\r\n encodings = []\r\n\r\n code = []\r\n N = len(seq)\r\n for prop in range(len(props)):\r\n xmean = sum([AAidx[prop][index[aa]] for aa in seq]) / N\r\n for n in range(1, nlag + 1):\r\n if len(seq) > nlag:\r\n # if key is '-', then the value is 0\r\n rn = (N - 1) / (2 * (N - n)) * ((sum(\r\n [(AAidx[prop][index.get(seq[j], 0)] - AAidx[prop][index.get(seq[j + n], 0)]) ** 2 for\r\n j in range(len(seq) - n)])) / (sum(\r\n [(AAidx[prop][index.get(seq[j], 0)] - xmean) ** 2 for j in range(len(seq))])))\r\n else:\r\n rn = 'NA'\r\n code.append(rn)\r\n encodings.append(code)\r\n return encodings\r\n\r\n\r\ndef CKSAAP(seq, gap=2, **kw):\r\n\r\n if gap < 0:\r\n print('Error: the gap should be equal or greater than zero' + '\\n\\n')\r\n return 0\r\n\r\n AA = 'ACDEFGHIKLMNPQRSTVWY'\r\n encodings = []\r\n aaPairs = []\r\n for aa1 in AA:\r\n for aa2 in AA:\r\n aaPairs.append(aa1 + aa2)\r\n\r\n code = []\r\n for g in range(gap + 1):\r\n myDict = {}\r\n for pair in aaPairs:\r\n myDict[pair] = 0\r\n sum = 0\r\n for index1 in range(len(seq)):\r\n index2 = index1 + g + 1\r\n if index1 < len(seq) and index2 < len(seq) and seq[index1] in AA and seq[\r\n index2] in AA:\r\n myDict[seq[index1] + seq[index2]] = myDict[seq[index1] + seq[index2]] + 1\r\n sum = sum + 1\r\n for pair in aaPairs:\r\n code.append(myDict[pair] / sum)\r\n encodings.append(code)\r\n return encodings\r\n\r\n\r\ndef Rvalue(aa1, aa2, AADict, Matrix):\r\n return sum([(Matrix[i][AADict[aa1]] - Matrix[i][AADict[aa2]]) ** 2 for i in range(len(Matrix))]) / len(Matrix)\r\n\r\n\r\ndef PAAC(seq, lambdaValue=3, w=0.05):\r\n\r\n\r\n dataFile = 'data\\PAAC.txt'\r\n with open(dataFile) as f:\r\n records = f.readlines()\r\n AA = ''.join(records[0].rstrip().split()[1:])\r\n AADict = {}\r\n for i in range(len(AA)):\r\n AADict[AA[i]] = i\r\n AAProperty = []\r\n AAPropertyNames = []\r\n for i in range(1, len(records)):\r\n array = records[i].rstrip().split() if records[i].rstrip() != '' else None\r\n AAProperty.append([float(j) for j in array[1:]])\r\n AAPropertyNames.append(array[0])\r\n\r\n AAProperty1 = []\r\n for i in AAProperty:\r\n meanI = sum(i) / 20\r\n fenmu = math.sqrt(sum([(j - meanI) ** 2 for j in i]) / 20)\r\n AAProperty1.append([(j - meanI) / fenmu for j in i])\r\n\r\n encodings = []\r\n\r\n\r\n\r\n code = []\r\n theta = []\r\n for n in range(1, lambdaValue + 1):\r\n theta.append(\r\n sum([Rvalue(seq[j], seq[j + n], AADict, AAProperty1) for j in range(len(seq) - n)]) / (\r\n len(seq) - n))\r\n myDict = {}\r\n for aa in AA:\r\n myDict[aa] = seq.count(aa)\r\n code = code + [myDict[aa] / (1 + w * sum(theta)) for aa in AA]\r\n code = code + [(w * j) / (1 + w * sum(theta)) for j in theta]\r\n encodings.append(code)\r\n return encodings\r\n\r\n# AFC-T, AFC-CP\r\n\r\ndef Feature(f):\r\n amino_acids = \"XACDEFGHIKLMNPQRSTVWY\"\r\n amino_acids_dict = {}\r\n seqs = []\r\n seqs_blosum62 = []\r\n seqs_dde = []\r\n seqs_z = []\r\n seqs_dpc = []\r\n seqs_aac = []\r\n seqs_ctdd = []\r\n lable_seqs = []\r\n work2vec = []\r\n seqs_sr = []\r\n seqs_ksctriad = []\r\n seqs_gtpc = []\r\n seqs_cksaagp = []\r\n seqs_gaac = []\r\n seqs_gdpc = []\r\n seqs_aaindex = []\r\n seqs_ctdt = []\r\n seqs_geary = []\r\n seqs_cksaap = []\r\n seqs_ctrial = []\r\n seqs_paac = []\r\n for n, s in enumerate(amino_acids):\r\n amino_acids_dict[s] = n\r\n #new_antifu = Word2Vec.load('fa_model_All.bin')\r\n\r\n for n, s in enumerate(SeqIO.parse(f, \"fasta\")):\r\n seq_blosum62 = BLOSUM62(s.seq)\r\n seq_ksctriad = KSCTriad(s.seq)\r\n seq_dde = DDE(s.seq)\r\n seq_z = ZSCALE(s.seq)\r\n seq_aac = AAC(s.seq)\r\n seq_dpc = DPC(s.seq)\r\n seq_ctdd = CTDD(s.seq)\r\n seq_ctrial = CTriad(s.seq)\r\n seq_gtpc = GTPC(s.seq)\r\n seq_cksaagp = CKSAAGP(s.seq)\r\n seq_gaac = GAAC(s.seq)\r\n seq_gdpc = GDPC(s.seq)\r\n seq_ctdt = CTDT(s.seq)\r\n seq_geary = Geary(s.seq)\r\n seq_cksaap = CKSAAP(s.seq)\r\n seq_aaindex = AAINDEX(s.seq)\r\n seq_paac = PAAC(s.seq)\r\n seqs_dde.append(seq_dde)\r\n seqs_z.append(seq_z)\r\n seqs_aac.append(seq_aac)\r\n seqs_dpc.append(seq_dpc)\r\n seqs_ctdd.append(seq_ctdd)\r\n seqs_blosum62.append(seq_blosum62)\r\n seqs_ctrial.append(seq_ctrial)\r\n seqs_ksctriad.append(seq_ksctriad)\r\n seqs_gtpc.append(seq_gtpc)\r\n seqs_cksaagp.append(seq_cksaagp)\r\n seqs_gaac.append(seq_gaac)\r\n seqs_gdpc.append(seq_gdpc)\r\n seqs_ctdt.append(seq_ctdt)\r\n seqs_geary.append(seq_geary)\r\n seqs_cksaap.append(seq_cksaap)\r\n seqs_aaindex.append(seq_aaindex)\r\n seqs_paac.append(seq_paac)\r\n temp_pad = []\r\n temp_pad1 = []\r\n temps = []\r\n for i in range(20):\r\n temp_pad1.append(0)\r\n for i in range(Max_length - len(s)):\r\n temps.append(temp_pad1)\r\n for i in range(Max_length - len(str(s.seq))):\r\n temp_pad.append(0)\r\n train_seq = [amino_acids_dict[a.upper()] for a in str(s.seq).upper()] + temp_pad\r\n\r\n seqs_sr.append(train_seq)\r\n #aux_p3 = [new_antifu.wv[a] if a in \"ACDEFGHIKLMNPQRSTVWY\" else [0 for i in range(20)] for a in\r\n #str(s.seq).upper()] + temps\r\n #work2vec.append(aux_p3)\r\n\r\n if s.id[-1] == \"1\":\r\n #print(s.id)\r\n lable_seqs.append([1])\r\n else:\r\n #print(s.id)\r\n lable_seqs.append([0])\r\n\r\n return seqs_blosum62, lable_seqs, work2vec, seqs_sr, seqs_dde, seqs_z, seqs_aac, seqs_dpc, seqs_ctdd, seqs_ctrial, seqs_ksctriad, seqs_gtpc, seqs_cksaagp, seqs_gaac, seqs_gdpc, seqs_ctdt, seqs_geary, seqs_cksaap, seqs_aaindex, seqs_paac\r\n\r\n# AFC-C based on main dataset\r\n\r\ndef Feature1(f):\r\n amino_acids = \"XACDEFGHIKLMNPQRSTVWY\"\r\n amino_acids_dict = {}\r\n seqs = []\r\n seqs_blosum62 = []\r\n seqs_dde = []\r\n seqs_z = []\r\n seqs_dpc = []\r\n seqs_aac = []\r\n seqs_ctdd = []\r\n lable_seqs = []\r\n work2vec = []\r\n seqs_sr = []\r\n seqs_ksctriad = []\r\n seqs_gtpc = []\r\n seqs_cksaagp = []\r\n seqs_gaac = []\r\n seqs_gdpc = []\r\n seqs_aaindex = []\r\n seqs_ctdt = []\r\n seqs_geary = []\r\n seqs_cksaap = []\r\n seqs_ctrial = []\r\n seqs_paac = []\r\n for n, s in enumerate(amino_acids):\r\n amino_acids_dict[s] = n\r\n #new_antifu = Word2Vec.load('D:\\E下载\\Dataset\\Dataset\\\\fa_model_All.bin')\r\n\r\n for n, s in enumerate(SeqIO.parse(f, \"fasta\")):\r\n seq_blosum62 = BLOSUM62(s.seq)\r\n #seq_ksctriad = KSCTriad(s.seq)\r\n seq_dde = DDE(s.seq)\r\n seq_z = ZSCALE(s.seq)\r\n seq_aac = AAC(s.seq)\r\n seq_dpc = DPC(s.seq)\r\n seq_ctdd = CTDD(s.seq)\r\n #seq_ctrial = CTriad(s.seq)\r\n seq_gtpc = GTPC(s.seq)\r\n seq_cksaagp = CKSAAGP(s.seq)\r\n seq_gaac = GAAC(s.seq)\r\n seq_gdpc = GDPC(s.seq)\r\n seq_ctdt = CTDT(s.seq)\r\n seq_geary = Geary(s.seq)\r\n #seq_cksaap = CKSAAP(s.seq)\r\n\r\n seq_aaindex = AAINDEX(s.seq)\r\n #seq_paac = PAAC(s.seq)\r\n\r\n seqs_dde.append(seq_dde)\r\n seqs_z.append(seq_z)\r\n seqs_aac.append(seq_aac)\r\n seqs_dpc.append(seq_dpc)\r\n seqs_ctdd.append(seq_ctdd)\r\n seqs_blosum62.append(seq_blosum62)\r\n #seqs_ctrial.append(seq_ctrial)\r\n #seqs_ksctriad.append(seq_ksctriad)\r\n seqs_gtpc.append(seq_gtpc)\r\n seqs_cksaagp.append(seq_cksaagp)\r\n seqs_gaac.append(seq_gaac)\r\n seqs_gdpc.append(seq_gdpc)\r\n seqs_ctdt.append(seq_ctdt)\r\n seqs_geary.append(seq_geary)\r\n #seqs_cksaap.append(seq_cksaap)\r\n seqs_aaindex.append(seq_aaindex)\r\n #seqs_paac.append(seq_paac)\r\n temp_pad = []\r\n temp_pad1 = []\r\n temps = []\r\n for i in range(20):\r\n temp_pad1.append(0)\r\n for i in range(Max_length - len(s)):\r\n temps.append(temp_pad1)\r\n for i in range(Max_length - len(str(s.seq))):\r\n temp_pad.append(0)\r\n train_seq = [amino_acids_dict[a.upper()] for a in str(s.seq).upper()] + temp_pad\r\n\r\n seqs_sr.append(train_seq)\r\n #aux_p3 = [new_antifu.wv[a] if a in \"ACDEFGHIKLMNPQRSTVWY\" else [0 for i in range(20)] for a in\r\n #str(s.seq).upper()] + temps\r\n #work2vec.append(aux_p3)\r\n if s.id[-1] == \"1\":\r\n lable_seqs.append([1])\r\n else:\r\n lable_seqs.append([0])\r\n\r\n return seqs_blosum62, lable_seqs, work2vec, seqs_sr, seqs_dde, seqs_z, seqs_aac, seqs_dpc, seqs_ctdd, seqs_ctrial, seqs_ksctriad, seqs_gtpc, seqs_cksaagp, seqs_gaac, seqs_gdpc, seqs_ctdt, seqs_geary, seqs_cksaap, seqs_aaindex, seqs_paac\r\n\r\n# AFC-C based on alternate dataset\r\n\r\ndef Feature2(f):\r\n amino_acids = \"XACDEFGHIKLMNPQRSTVWY\"\r\n amino_acids_dict = {}\r\n seqs = []\r\n seqs_blosum62 = []\r\n seqs_dde = []\r\n seqs_z = []\r\n seqs_dpc = []\r\n seqs_aac = []\r\n seqs_ctdd = []\r\n lable_seqs = []\r\n work2vec = []\r\n seqs_sr = []\r\n seqs_ksctriad = []\r\n seqs_gtpc = []\r\n seqs_cksaagp = []\r\n seqs_gaac = []\r\n seqs_gdpc = []\r\n seqs_aaindex = []\r\n seqs_ctdt = []\r\n seqs_geary = []\r\n seqs_cksaap = []\r\n seqs_ctrial = []\r\n seqs_paac = []\r\n for n, s in enumerate(amino_acids):\r\n amino_acids_dict[s] = n\r\n #new_antifu = Word2Vec.load('D:\\E下载\\Dataset\\Dataset\\\\fa_model_All.bin')\r\n\r\n for n, s in enumerate(SeqIO.parse(f, \"fasta\")):\r\n seq_blosum62 = BLOSUM62(s.seq)\r\n #seq_ksctriad = KSCTriad(s.seq)\r\n seq_dde = DDE(s.seq)\r\n seq_z = ZSCALE(s.seq)\r\n seq_aac = AAC(s.seq)\r\n seq_dpc = DPC(s.seq)\r\n seq_ctdd = CTDD(s.seq)\r\n seq_ctrial = CTriad(s.seq)\r\n seq_gtpc = GTPC(s.seq)\r\n seq_cksaagp = CKSAAGP(s.seq)\r\n seq_gaac = GAAC(s.seq)\r\n seq_gdpc = GDPC(s.seq)\r\n seq_ctdt = CTDT(s.seq)\r\n seq_geary = Geary(s.seq)\r\n #seq_cksaap = CKSAAP(s.seq)\r\n\r\n seq_aaindex = AAINDEX(s.seq)\r\n #seq_paac = PAAC(s.seq)\r\n\r\n seqs_dde.append(seq_dde)\r\n seqs_z.append(seq_z)\r\n seqs_aac.append(seq_aac)\r\n seqs_dpc.append(seq_dpc)\r\n seqs_ctdd.append(seq_ctdd)\r\n seqs_blosum62.append(seq_blosum62)\r\n seqs_ctrial.append(seq_ctrial)\r\n #seqs_ksctriad.append(seq_ksctriad)\r\n seqs_gtpc.append(seq_gtpc)\r\n seqs_cksaagp.append(seq_cksaagp)\r\n seqs_gaac.append(seq_gaac)\r\n seqs_gdpc.append(seq_gdpc)\r\n seqs_ctdt.append(seq_ctdt)\r\n seqs_geary.append(seq_geary)\r\n #seqs_cksaap.append(seq_cksaap)\r\n seqs_aaindex.append(seq_aaindex)\r\n #seqs_paac.append(seq_paac)\r\n temp_pad = []\r\n temp_pad1 = []\r\n temps = []\r\n for i in range(20):\r\n temp_pad1.append(0)\r\n for i in range(Max_length - len(s)):\r\n temps.append(temp_pad1)\r\n for i in range(Max_length - len(str(s.seq))):\r\n temp_pad.append(0)\r\n train_seq = [amino_acids_dict[a.upper()] for a in str(s.seq).upper()] + temp_pad\r\n\r\n seqs_sr.append(train_seq)\r\n #aux_p3 = [new_antifu.wv[a] if a in \"ACDEFGHIKLMNPQRSTVWY\" else [0 for i in range(20)] for a in\r\n #str(s.seq).upper()] + temps\r\n #work2vec.append(aux_p3)\r\n if s.id[-1] == \"1\":\r\n lable_seqs.append([1])\r\n else:\r\n lable_seqs.append([0])\r\n\r\n return seqs_blosum62, lable_seqs, work2vec, seqs_sr, seqs_dde, seqs_z, seqs_aac, seqs_dpc, seqs_ctdd, seqs_ctrial, seqs_ksctriad, seqs_gtpc, seqs_cksaagp, seqs_gaac, seqs_gdpc, seqs_ctdt, seqs_geary, seqs_cksaap, seqs_aaindex, seqs_paac\r\n\r\n\r\n\r\n\r\n\r\n"
] |
[
[
"numpy.std",
"numpy.mean"
]
] |
7enTropy7/Rummy_RL
|
[
"0b2123672f3979bb9de15d204d6d8cf52271958a"
] |
[
"Rummy PPO/model.py"
] |
[
"import os\nimport numpy as np\nimport torch as T\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.distributions.categorical import Categorical\n\nclass PPOMemory:\n def __init__(self, batch_size):\n self.states = []\n self.probs = []\n self.vals = []\n self.actions = []\n self.rewards = []\n self.dones = []\n\n self.batch_size = batch_size\n\n def generate_batches(self):\n n_states = len(self.states)\n batch_start = np.arange(0, n_states, self.batch_size)\n indices = np.arange(n_states, dtype=np.int64)\n np.random.shuffle(indices)\n batches = [indices[i:i+self.batch_size] for i in batch_start]\n\n return np.array(self.states),\\\n np.array(self.actions),\\\n np.array(self.probs),\\\n np.array(self.vals),\\\n np.array(self.rewards),\\\n np.array(self.dones),\\\n batches\n\n def store_memory(self, state, action, probs, vals, reward, done):\n self.states.append(state)\n self.actions.append(action)\n self.probs.append(probs)\n self.vals.append(vals)\n self.rewards.append(reward)\n self.dones.append(done)\n\n def clear_memory(self):\n self.states = []\n self.probs = []\n self.actions = []\n self.rewards = []\n self.dones = []\n self.vals = []\n\nclass ActorNetwork(nn.Module):\n def __init__(self, n_actions, input_dims, alpha,\n fc1_dims=512, fc2_dims=512, chkpt_dir='ckpt'):\n super(ActorNetwork, self).__init__()\n\n self.checkpoint_file = os.path.join(chkpt_dir, 'actor_torch_ppo')\n self.actor = nn.Sequential(\n nn.Linear(*input_dims, fc1_dims),\n nn.ReLU(),\n nn.Linear(fc1_dims, fc2_dims),\n nn.ReLU(),\n nn.Linear(fc2_dims, n_actions),\n nn.Softmax(dim=-1)\n )\n\n self.optimizer = optim.Adam(self.parameters(), lr=alpha)\n self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')\n self.to(self.device)\n\n def forward(self, state):\n dist = self.actor(state)\n dist = Categorical(dist)\n \n return dist\n\n def save_checkpoint(self):\n T.save(self.state_dict(), self.checkpoint_file)\n\n def load_checkpoint(self):\n self.load_state_dict(T.load(self.checkpoint_file))\n\nclass CriticNetwork(nn.Module):\n def __init__(self, input_dims, alpha, fc1_dims=512, fc2_dims=512,\n chkpt_dir='ckpt'):\n super(CriticNetwork, self).__init__()\n\n self.checkpoint_file = os.path.join(chkpt_dir, 'critic_torch_ppo')\n self.critic = nn.Sequential(\n nn.Linear(*input_dims, fc1_dims),\n nn.ReLU(),\n nn.Linear(fc1_dims, fc2_dims),\n nn.ReLU(),\n nn.Linear(fc2_dims, 1)\n )\n\n self.optimizer = optim.Adam(self.parameters(), lr=alpha)\n self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')\n self.to(self.device)\n\n def forward(self, state):\n value = self.critic(state)\n\n return value\n\n def save_checkpoint(self):\n T.save(self.state_dict(), self.checkpoint_file)\n\n def load_checkpoint(self):\n self.load_state_dict(T.load(self.checkpoint_file))\n \n\nclass Agent:\n def __init__(self, n_actions, input_dims, gamma=0.99, alpha=0.0003, gae_lambda=0.95,\n policy_clip=0.2, batch_size=64, n_epochs=10):\n self.gamma = gamma\n self.policy_clip = policy_clip\n self.n_epochs = n_epochs\n self.gae_lambda = gae_lambda\n\n self.actor = ActorNetwork(n_actions, input_dims, alpha)\n self.critic = CriticNetwork(input_dims, alpha)\n self.memory = PPOMemory(batch_size)\n \n def remember(self, state, action, probs, vals, reward, done):\n self.memory.store_memory(state, action, probs, vals, reward, done)\n\n def save_models(self):\n print('... saving models ...')\n self.actor.save_checkpoint()\n self.critic.save_checkpoint()\n\n def load_models(self):\n print('... loading models ...')\n self.actor.load_checkpoint()\n self.critic.load_checkpoint()\n\n def choose_action(self, observation):\n state = T.tensor(observation, dtype=T.float).to(self.actor.device)\n dist = self.actor(state)\n value = self.critic(state)\n action = dist.sample()\n\n probs = T.squeeze(dist.log_prob(action)).item()\n action = T.squeeze(action).item()\n value = T.squeeze(value).item()\n\n return action, probs, value\n\n def learn(self):\n for _ in range(self.n_epochs):\n state_arr, action_arr, old_prob_arr, vals_arr,\\\n reward_arr, dones_arr, batches = \\\n self.memory.generate_batches()\n values = vals_arr\n advantage = np.zeros(len(reward_arr), dtype=np.float32)\n\n for t in range(len(reward_arr)-1):\n discount = 1\n a_t = 0\n for k in range(t, len(reward_arr)-1):\n a_t += discount*(reward_arr[k] + self.gamma*values[k+1]*\\\n (1-int(dones_arr[k])) - values[k])\n discount *= self.gamma*self.gae_lambda\n advantage[t] = a_t\n advantage = T.tensor(advantage).to(self.actor.device)\n\n values = T.tensor(values).to(self.actor.device)\n for batch in batches:\n states = T.tensor(state_arr[batch], dtype=T.float).to(self.actor.device)\n old_probs = T.tensor(old_prob_arr[batch]).to(self.actor.device)\n actions = T.tensor(action_arr[batch]).to(self.actor.device)\n\n dist = self.actor(states)\n critic_value = self.critic(states)\n\n critic_value = T.squeeze(critic_value)\n\n new_probs = dist.log_prob(actions)\n prob_ratio = new_probs.exp() / old_probs.exp()\n #prob_ratio = (new_probs - old_probs).exp()\n weighted_probs = advantage[batch] * prob_ratio\n weighted_clipped_probs = T.clamp(prob_ratio, 1-self.policy_clip,\n 1+self.policy_clip)*advantage[batch]\n actor_loss = -T.min(weighted_probs, weighted_clipped_probs).mean()\n\n returns = advantage[batch] + values[batch]\n critic_loss = (returns-critic_value)**2\n critic_loss = critic_loss.mean()\n\n total_loss = actor_loss + 0.5*critic_loss\n self.actor.optimizer.zero_grad()\n self.critic.optimizer.zero_grad()\n total_loss.backward()\n self.actor.optimizer.step()\n self.critic.optimizer.step()\n\n self.memory.clear_memory() \n"
] |
[
[
"torch.distributions.categorical.Categorical",
"torch.nn.Softmax",
"torch.clamp",
"torch.load",
"numpy.arange",
"torch.min",
"numpy.random.shuffle",
"torch.tensor",
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.nn.ReLU",
"numpy.array",
"torch.squeeze"
]
] |
Willsparker/UniProjects
|
[
"5fffc656e78b45db0506e42f4ba5c4211d1b9399"
] |
[
"Visual_Intelligence/Kinect_Object_Recognition/VI-2020-21-Python_Package/confusionMatrix.py"
] |
[
"import matplotlib.pyplot as plt\nimport seaborn as sn\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix\n\ntruth = [\"Camera\",\"Dog\",\"Android\",\"Baby\",\"Keyboard\",\"Dinosaur\",\"Dragon\",\"Bunny\",\"Blackberry\",\"Diet Coke Bottle\",\"Coffee Tin\",\"Car\",\"Mug\",\"Koala\",\"Mug\"] \nlabels = [\"Camera\",\"Dog\",\"Android\",\"Baby\",\"Keyboard\",\"Dinosaur\",\"Dragon\",\"Blackberry\",\"Diet Coke Bottle\",\"Coffee Tin\",\"Car\",\"Mug\",\"Koala\",\"Duck\",\"Bunny\"]\npred = [\"Camera\",\"Dog\",\"Android\",\"Baby\",\"Camera\",\"Camera\",\"Koala\",\"Koala\",\"Camera\",\"Camera\",\"Android\",\"Camera\",\"Android\",\"Koala\",\"Baby\"]\n\nplt.figure(1)\nx = confusion_matrix(truth,pred,labels=labels)\ndf_cm = pd.DataFrame(x, range(15),range(15)); \nsn.heatmap(df_cm, annot=True)\nplt.show()"
] |
[
[
"matplotlib.pyplot.show",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.figure"
]
] |
Dongwon-Shin/2018-2nd-Algorithm
|
[
"b063f678bd723b82fab5797af479dee5f0f641ed"
] |
[
"Python/nparray.py"
] |
[
"import numpy as np\nstocks = np.array([140.49, 0.97, 40.68, 41.53, 55.7,57.21, 98.2, 99.19, 109.96, 111.47, 35.71, 36.27, 87.85, 89.11, 30.22, 30.91]) \nstocks\n\nstocks = stocks.reshape(8, 2).T\nstocks\n\nsap = np.array([\"MMM\", \"ABT\", \"ABBV\", \"ACN\", \"ACE\", \"ATVI\", \"ADBE\", \"ADT\"])\nsap"
] |
[
[
"numpy.array"
]
] |
aayushidwivedi01/spark-tk-old
|
[
"fcf25f86498ac416cce77de0db4cf0aa503d20ac"
] |
[
"regression-tests/sparktkregtests/testcases/dicom/inspect_dicom_test.py"
] |
[
"# vim: set encoding=utf-8\n\n# Copyright (c) 2016 Intel Corporation \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"tests dicom.inspect() functionality\"\"\"\n\nimport unittest\nfrom sparktkregtests.lib import sparktk_test\nimport os\nimport dicom\nimport numpy\n\n\nclass InspectDicomTest(sparktk_test.SparkTKTestCase):\n\n def setUp(self):\n \"\"\"import dicom data for testing\"\"\"\n super(InspectDicomTest, self).setUp()\n self.dataset = self.get_file(\"dicom_uncompressed\")\n self.dicom = self.context.dicom.import_dcm(self.dataset)\n self.xml_directory = self.get_local_dataset(\"dicom_xml/\")\n self.image_directory = self.get_local_dataset(\"dicom_uncompressed/\")\n\n def test_metadata_imagedata_row_count_same(self):\n \"\"\"test that the row count are the same for inspect pixeldate/metadata\"\"\"\n metadata_result = self.dicom.metadata.inspect(self.dicom.metadata.count())\n image_result = self.dicom.pixeldata.inspect(self.dicom.pixeldata.count())\n self.assertEqual(len(metadata_result.rows), len(image_result.rows))\n\n def test_metadata_content(self):\n \"\"\"tests metadata inspect content\"\"\"\n # first we will get the files we created the dicom from\n files = []\n for filename in sorted([f for f in os.listdir(self.xml_directory)]):\n with open(self.xml_directory + str(filename)) as xmlfile:\n contents = xmlfile.read()\n files.append(contents)\n\n inspect = self.dicom.metadata.inspect()\n\n # we ensure the metadata in dicom matches the generated\n # xmls from the files we created the dicom from\n for (inspect_file, xml_file) in zip(inspect.rows, files):\n # we need to remove the bulkdata tag before we compare since\n # it records the location where the files were loaded from\n # and therefore will differ between the content\n inspect_file = inspect_file[1].encode(\"ascii\", \"ignore\")\n bulk_data_index = xml_file.index(\"<BulkData\")\n xml_bulk_data = xml_file[bulk_data_index:bulk_data_index + xml_file[bulk_data_index:].index(\">\") + 1]\n inspect_bulk_data = inspect_file[bulk_data_index:bulk_data_index + inspect_file[bulk_data_index:].index(\">\") + 1]\n\n xml_file = xml_file.replace(xml_bulk_data, \"\")\n inspect_file = inspect_file.replace(inspect_bulk_data, \"\")\n\n self.assertEqual(xml_file, inspect_file)\n\n def test_image_content_inspect_dcm_basic(self):\n \"\"\"content test of image data for dicom\"\"\"\n # load the files so we can compare with the dicom result\n files = []\n for filename in sorted([f for f in os.listdir(self.image_directory)]):\n pixel_data = dicom.read_file(self.image_directory + filename).pixel_array\n files.append(pixel_data)\n\n # iterate through the data in the files and in the dicom frame\n # and ensure that they match\n image_inspect = self.dicom.pixeldata.inspect()\n for (dcm_image, pixel_image) in zip(image_inspect.rows, files):\n numpy.testing.assert_equal(pixel_image, dcm_image[1])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.testing.assert_equal"
]
] |
hadim/torchani
|
[
"29606ef95c046488752706c3caaddc66ce846dc2"
] |
[
"torchani/aev.py"
] |
[
"import torch\nfrom torch import Tensor\nimport math\nfrom typing import Tuple, Optional, NamedTuple\nimport sys\n\nif sys.version_info[:2] < (3, 7):\n class FakeFinal:\n def __getitem__(self, x):\n return x\n Final = FakeFinal()\nelse:\n from torch.jit import Final\n\n\nclass SpeciesAEV(NamedTuple):\n species: Tensor\n aevs: Tensor\n\n\ndef cutoff_cosine(distances: Tensor, cutoff: float) -> Tensor:\n # assuming all elements in distances are smaller than cutoff\n return 0.5 * torch.cos(distances * (math.pi / cutoff)) + 0.5\n\n\ndef radial_terms(Rcr: float, EtaR: Tensor, ShfR: Tensor, distances: Tensor) -> Tensor:\n \"\"\"Compute the radial subAEV terms of the center atom given neighbors\n\n This correspond to equation (3) in the `ANI paper`_. This function just\n compute the terms. The sum in the equation is not computed.\n The input tensor have shape (conformations, atoms, N), where ``N``\n is the number of neighbor atoms within the cutoff radius and output\n tensor should have shape\n (conformations, atoms, ``self.radial_sublength()``)\n\n .. _ANI paper:\n http://pubs.rsc.org/en/Content/ArticleLanding/2017/SC/C6SC05720A#!divAbstract\n \"\"\"\n distances = distances.unsqueeze(-1).unsqueeze(-1)\n fc = cutoff_cosine(distances, Rcr)\n # Note that in the equation in the paper there is no 0.25\n # coefficient, but in NeuroChem there is such a coefficient.\n # We choose to be consistent with NeuroChem instead of the paper here.\n ret = 0.25 * torch.exp(-EtaR * (distances - ShfR)**2) * fc\n # At this point, ret now have shape\n # (conformations, atoms, N, ?, ?) where ? depend on constants.\n # We then should flat the last 2 dimensions to view the subAEV as one\n # dimension vector\n return ret.flatten(start_dim=-2)\n\n\ndef angular_terms(Rca: float, ShfZ: Tensor, EtaA: Tensor, Zeta: Tensor,\n ShfA: Tensor, vectors12: Tensor) -> Tensor:\n \"\"\"Compute the angular subAEV terms of the center atom given neighbor pairs.\n\n This correspond to equation (4) in the `ANI paper`_. This function just\n compute the terms. The sum in the equation is not computed.\n The input tensor have shape (conformations, atoms, N), where N\n is the number of neighbor atom pairs within the cutoff radius and\n output tensor should have shape\n (conformations, atoms, ``self.angular_sublength()``)\n\n .. _ANI paper:\n http://pubs.rsc.org/en/Content/ArticleLanding/2017/SC/C6SC05720A#!divAbstract\n \"\"\"\n vectors12 = vectors12.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)\n distances12 = vectors12.norm(2, dim=-5)\n\n cos_angles = vectors12.prod(0).sum(1) / distances12.prod(0)\n # 0.95 is multiplied to the cos values to prevent acos from returning NaN.\n angles = torch.acos(0.95 * cos_angles)\n\n fcj12 = cutoff_cosine(distances12, Rca)\n factor1 = ((1 + torch.cos(angles - ShfZ)) / 2) ** Zeta\n factor2 = torch.exp(-EtaA * (distances12.sum(0) / 2 - ShfA) ** 2)\n ret = 2 * factor1 * factor2 * fcj12.prod(0)\n # At this point, ret now have shape\n # (conformations, atoms, N, ?, ?, ?, ?) where ? depend on constants.\n # We then should flat the last 4 dimensions to view the subAEV as one\n # dimension vector\n return ret.flatten(start_dim=-4)\n\n\ndef compute_shifts(cell: Tensor, pbc: Tensor, cutoff: float) -> Tensor:\n \"\"\"Compute the shifts of unit cell along the given cell vectors to make it\n large enough to contain all pairs of neighbor atoms with PBC under\n consideration\n\n Arguments:\n cell (:class:`torch.Tensor`): tensor of shape (3, 3) of the three\n vectors defining unit cell:\n tensor([[x1, y1, z1], [x2, y2, z2], [x3, y3, z3]])\n cutoff (float): the cutoff inside which atoms are considered pairs\n pbc (:class:`torch.Tensor`): boolean vector of size 3 storing\n if pbc is enabled for that direction.\n\n Returns:\n :class:`torch.Tensor`: long tensor of shifts. the center cell and\n symmetric cells are not included.\n \"\"\"\n reciprocal_cell = cell.inverse().t()\n inv_distances = reciprocal_cell.norm(2, -1)\n num_repeats = torch.ceil(cutoff * inv_distances).to(torch.long)\n num_repeats = torch.where(pbc, num_repeats, num_repeats.new_zeros(()))\n r1 = torch.arange(1, num_repeats[0] + 1, device=cell.device)\n r2 = torch.arange(1, num_repeats[1] + 1, device=cell.device)\n r3 = torch.arange(1, num_repeats[2] + 1, device=cell.device)\n o = torch.zeros(1, dtype=torch.long, device=cell.device)\n return torch.cat([\n torch.cartesian_prod(r1, r2, r3),\n torch.cartesian_prod(r1, r2, o),\n torch.cartesian_prod(r1, r2, -r3),\n torch.cartesian_prod(r1, o, r3),\n torch.cartesian_prod(r1, o, o),\n torch.cartesian_prod(r1, o, -r3),\n torch.cartesian_prod(r1, -r2, r3),\n torch.cartesian_prod(r1, -r2, o),\n torch.cartesian_prod(r1, -r2, -r3),\n torch.cartesian_prod(o, r2, r3),\n torch.cartesian_prod(o, r2, o),\n torch.cartesian_prod(o, r2, -r3),\n torch.cartesian_prod(o, o, r3),\n ])\n\n\ndef neighbor_pairs(padding_mask: Tensor, coordinates: Tensor, cell: Tensor,\n shifts: Tensor, cutoff: float) -> Tuple[Tensor, Tensor]:\n \"\"\"Compute pairs of atoms that are neighbors\n\n Arguments:\n padding_mask (:class:`torch.Tensor`): boolean tensor of shape\n (molecules, atoms) for padding mask. 1 == is padding.\n coordinates (:class:`torch.Tensor`): tensor of shape\n (molecules, atoms, 3) for atom coordinates.\n cell (:class:`torch.Tensor`): tensor of shape (3, 3) of the three vectors\n defining unit cell: tensor([[x1, y1, z1], [x2, y2, z2], [x3, y3, z3]])\n cutoff (float): the cutoff inside which atoms are considered pairs\n shifts (:class:`torch.Tensor`): tensor of shape (?, 3) storing shifts\n \"\"\"\n coordinates = coordinates.detach().masked_fill(padding_mask.unsqueeze(-1), math.nan)\n cell = cell.detach()\n num_atoms = padding_mask.shape[1]\n num_mols = padding_mask.shape[0]\n all_atoms = torch.arange(num_atoms, device=cell.device)\n\n # Step 2: center cell\n # torch.triu_indices is faster than combinations\n p12_center = torch.triu_indices(num_atoms, num_atoms, 1, device=cell.device)\n shifts_center = shifts.new_zeros((p12_center.shape[1], 3))\n\n # Step 3: cells with shifts\n # shape convention (shift index, molecule index, atom index, 3)\n num_shifts = shifts.shape[0]\n all_shifts = torch.arange(num_shifts, device=cell.device)\n prod = torch.cartesian_prod(all_shifts, all_atoms, all_atoms).t()\n shift_index = prod[0]\n p12 = prod[1:]\n shifts_outside = shifts.index_select(0, shift_index)\n\n # Step 4: combine results for all cells\n shifts_all = torch.cat([shifts_center, shifts_outside])\n p12_all = torch.cat([p12_center, p12], dim=1)\n shift_values = shifts_all.to(cell.dtype) @ cell\n\n # step 5, compute distances, and find all pairs within cutoff\n selected_coordinates = coordinates.index_select(1, p12_all.view(-1)).view(num_mols, 2, -1, 3)\n distances = (selected_coordinates[:, 0, ...] - selected_coordinates[:, 1, ...] + shift_values).norm(2, -1)\n in_cutoff = (distances <= cutoff).nonzero()\n molecule_index, pair_index = in_cutoff.unbind(1)\n molecule_index *= num_atoms\n atom_index12 = p12_all[:, pair_index]\n shifts = shifts_all.index_select(0, pair_index)\n return molecule_index + atom_index12, shifts\n\n\ndef neighbor_pairs_nopbc(padding_mask: Tensor, coordinates: Tensor, cutoff: float) -> Tensor:\n \"\"\"Compute pairs of atoms that are neighbors (doesn't use PBC)\n\n This function bypasses the calculation of shifts and duplication\n of atoms in order to make calculations faster\n\n Arguments:\n padding_mask (:class:`torch.Tensor`): boolean tensor of shape\n (molecules, atoms) for padding mask. 1 == is padding.\n coordinates (:class:`torch.Tensor`): tensor of shape\n (molecules, atoms, 3) for atom coordinates.\n cutoff (float): the cutoff inside which atoms are considered pairs\n \"\"\"\n coordinates = coordinates.detach().masked_fill(padding_mask.unsqueeze(-1), math.nan)\n current_device = coordinates.device\n num_atoms = padding_mask.shape[1]\n num_mols = padding_mask.shape[0]\n p12_all = torch.triu_indices(num_atoms, num_atoms, 1, device=current_device)\n p12_all_flattened = p12_all.view(-1)\n\n pair_coordinates = coordinates.index_select(1, p12_all_flattened).view(num_mols, 2, -1, 3)\n distances = (pair_coordinates[:, 0, ...] - pair_coordinates[:, 1, ...]).norm(2, -1)\n in_cutoff = (distances <= cutoff).nonzero()\n molecule_index, pair_index = in_cutoff.unbind(1)\n molecule_index *= num_atoms\n atom_index12 = p12_all[:, pair_index] + molecule_index\n return atom_index12\n\n\ndef triu_index(num_species: int) -> Tensor:\n species1, species2 = torch.triu_indices(num_species, num_species).unbind(0)\n pair_index = torch.arange(species1.shape[0], dtype=torch.long)\n ret = torch.zeros(num_species, num_species, dtype=torch.long)\n ret[species1, species2] = pair_index\n ret[species2, species1] = pair_index\n return ret\n\n\ndef cumsum_from_zero(input_: Tensor) -> Tensor:\n cumsum = torch.zeros_like(input_)\n torch.cumsum(input_[:-1], dim=0, out=cumsum[1:])\n return cumsum\n\n\ndef triple_by_molecule(atom_index12: Tensor) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Input: indices for pairs of atoms that are close to each other.\n each pair only appear once, i.e. only one of the pairs (1, 2) and\n (2, 1) exists.\n\n Output: indices for all central atoms and it pairs of neighbors. For\n example, if input has pair (0, 1), (0, 2), (0, 3), (0, 4), (1, 2),\n (1, 3), (1, 4), (2, 3), (2, 4), (3, 4), then the output would have\n central atom 0, 1, 2, 3, 4 and for cental atom 0, its pairs of neighbors\n are (1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)\n \"\"\"\n # convert representation from pair to central-others\n ai1 = atom_index12.view(-1)\n sorted_ai1, rev_indices = ai1.sort()\n\n # sort and compute unique key\n uniqued_central_atom_index, counts = torch.unique_consecutive(sorted_ai1, return_inverse=False, return_counts=True)\n\n # compute central_atom_index\n pair_sizes = counts * (counts - 1) // 2\n pair_indices = torch.repeat_interleave(pair_sizes)\n central_atom_index = uniqued_central_atom_index.index_select(0, pair_indices)\n\n # do local combinations within unique key, assuming sorted\n m = counts.max().item() if counts.numel() > 0 else 0\n n = pair_sizes.shape[0]\n intra_pair_indices = torch.tril_indices(m, m, -1, device=ai1.device).unsqueeze(1).expand(-1, n, -1)\n mask = (torch.arange(intra_pair_indices.shape[2], device=ai1.device) < pair_sizes.unsqueeze(1)).flatten()\n sorted_local_index12 = intra_pair_indices.flatten(1, 2)[:, mask]\n sorted_local_index12 += cumsum_from_zero(counts).index_select(0, pair_indices)\n\n # unsort result from last part\n local_index12 = rev_indices[sorted_local_index12]\n\n # compute mapping between representation of central-other to pair\n n = atom_index12.shape[1]\n sign12 = ((local_index12 < n).to(torch.int8) * 2) - 1\n return central_atom_index, local_index12 % n, sign12\n\n\ndef compute_aev(species: Tensor, coordinates: Tensor, triu_index: Tensor,\n constants: Tuple[float, Tensor, Tensor, float, Tensor, Tensor, Tensor, Tensor],\n sizes: Tuple[int, int, int, int, int], cell_shifts: Optional[Tuple[Tensor, Tensor]]) -> Tensor:\n Rcr, EtaR, ShfR, Rca, ShfZ, EtaA, Zeta, ShfA = constants\n num_species, radial_sublength, radial_length, angular_sublength, angular_length = sizes\n num_molecules = species.shape[0]\n num_atoms = species.shape[1]\n num_species_pairs = angular_length // angular_sublength\n coordinates_ = coordinates\n coordinates = coordinates_.flatten(0, 1)\n\n # PBC calculation is bypassed if there are no shifts\n if cell_shifts is None:\n atom_index12 = neighbor_pairs_nopbc(species == -1, coordinates_, Rcr)\n selected_coordinates = coordinates.index_select(0, atom_index12.view(-1)).view(2, -1, 3)\n vec = selected_coordinates[0] - selected_coordinates[1]\n else:\n cell, shifts = cell_shifts\n atom_index12, shifts = neighbor_pairs(species == -1, coordinates_, cell, shifts, Rcr)\n shift_values = shifts.to(cell.dtype) @ cell\n selected_coordinates = coordinates.index_select(0, atom_index12.view(-1)).view(2, -1, 3)\n vec = selected_coordinates[0] - selected_coordinates[1] + shift_values\n\n species = species.flatten()\n species12 = species[atom_index12]\n\n distances = vec.norm(2, -1)\n\n # compute radial aev\n radial_terms_ = radial_terms(Rcr, EtaR, ShfR, distances)\n radial_aev = radial_terms_.new_zeros((num_molecules * num_atoms * num_species, radial_sublength))\n index12 = atom_index12 * num_species + species12.flip(0)\n radial_aev.index_add_(0, index12[0], radial_terms_)\n radial_aev.index_add_(0, index12[1], radial_terms_)\n radial_aev = radial_aev.reshape(num_molecules, num_atoms, radial_length)\n\n # Rca is usually much smaller than Rcr, using neighbor list with cutoff=Rcr is a waste of resources\n # Now we will get a smaller neighbor list that only cares about atoms with distances <= Rca\n even_closer_indices = (distances <= Rca).nonzero().flatten()\n atom_index12 = atom_index12.index_select(1, even_closer_indices)\n species12 = species12.index_select(1, even_closer_indices)\n vec = vec.index_select(0, even_closer_indices)\n\n # compute angular aev\n central_atom_index, pair_index12, sign12 = triple_by_molecule(atom_index12)\n species12_small = species12[:, pair_index12]\n vec12 = vec.index_select(0, pair_index12.view(-1)).view(2, -1, 3) * sign12.unsqueeze(-1)\n species12_ = torch.where(sign12 == 1, species12_small[1], species12_small[0])\n angular_terms_ = angular_terms(Rca, ShfZ, EtaA, Zeta, ShfA, vec12)\n angular_aev = angular_terms_.new_zeros((num_molecules * num_atoms * num_species_pairs, angular_sublength))\n index = central_atom_index * num_species_pairs + triu_index[species12_[0], species12_[1]]\n angular_aev.index_add_(0, index, angular_terms_)\n angular_aev = angular_aev.reshape(num_molecules, num_atoms, angular_length)\n return torch.cat([radial_aev, angular_aev], dim=-1)\n\n\nclass AEVComputer(torch.nn.Module):\n r\"\"\"The AEV computer that takes coordinates as input and outputs aevs.\n\n Arguments:\n Rcr (float): :math:`R_C` in equation (2) when used at equation (3)\n in the `ANI paper`_.\n Rca (float): :math:`R_C` in equation (2) when used at equation (4)\n in the `ANI paper`_.\n EtaR (:class:`torch.Tensor`): The 1D tensor of :math:`\\eta` in\n equation (3) in the `ANI paper`_.\n ShfR (:class:`torch.Tensor`): The 1D tensor of :math:`R_s` in\n equation (3) in the `ANI paper`_.\n EtaA (:class:`torch.Tensor`): The 1D tensor of :math:`\\eta` in\n equation (4) in the `ANI paper`_.\n Zeta (:class:`torch.Tensor`): The 1D tensor of :math:`\\zeta` in\n equation (4) in the `ANI paper`_.\n ShfA (:class:`torch.Tensor`): The 1D tensor of :math:`R_s` in\n equation (4) in the `ANI paper`_.\n ShfZ (:class:`torch.Tensor`): The 1D tensor of :math:`\\theta_s` in\n equation (4) in the `ANI paper`_.\n num_species (int): Number of supported atom types.\n\n .. _ANI paper:\n http://pubs.rsc.org/en/Content/ArticleLanding/2017/SC/C6SC05720A#!divAbstract\n \"\"\"\n Rcr: Final[float]\n Rca: Final[float]\n num_species: Final[int]\n\n radial_sublength: Final[int]\n radial_length: Final[int]\n angular_sublength: Final[int]\n angular_length: Final[int]\n aev_length: Final[int]\n sizes: Final[Tuple[int, int, int, int, int]]\n\n def __init__(self, Rcr, Rca, EtaR, ShfR, EtaA, Zeta, ShfA, ShfZ, num_species):\n super().__init__()\n self.Rcr = Rcr\n self.Rca = Rca\n assert Rca <= Rcr, \"Current implementation of AEVComputer assumes Rca <= Rcr\"\n self.num_species = num_species\n\n # convert constant tensors to a ready-to-broadcast shape\n # shape convension (..., EtaR, ShfR)\n self.register_buffer('EtaR', EtaR.view(-1, 1))\n self.register_buffer('ShfR', ShfR.view(1, -1))\n # shape convension (..., EtaA, Zeta, ShfA, ShfZ)\n self.register_buffer('EtaA', EtaA.view(-1, 1, 1, 1))\n self.register_buffer('Zeta', Zeta.view(1, -1, 1, 1))\n self.register_buffer('ShfA', ShfA.view(1, 1, -1, 1))\n self.register_buffer('ShfZ', ShfZ.view(1, 1, 1, -1))\n\n # The length of radial subaev of a single species\n self.radial_sublength = self.EtaR.numel() * self.ShfR.numel()\n # The length of full radial aev\n self.radial_length = self.num_species * self.radial_sublength\n # The length of angular subaev of a single species\n self.angular_sublength = self.EtaA.numel() * self.Zeta.numel() * self.ShfA.numel() * self.ShfZ.numel()\n # The length of full angular aev\n self.angular_length = (self.num_species * (self.num_species + 1)) // 2 * self.angular_sublength\n # The length of full aev\n self.aev_length = self.radial_length + self.angular_length\n self.sizes = self.num_species, self.radial_sublength, self.radial_length, self.angular_sublength, self.angular_length\n\n self.register_buffer('triu_index', triu_index(num_species).to(device=self.EtaR.device))\n\n # Set up default cell and compute default shifts.\n # These values are used when cell and pbc switch are not given.\n cutoff = max(self.Rcr, self.Rca)\n default_cell = torch.eye(3, dtype=self.EtaR.dtype, device=self.EtaR.device)\n default_pbc = torch.zeros(3, dtype=torch.bool, device=self.EtaR.device)\n default_shifts = compute_shifts(default_cell, default_pbc, cutoff)\n self.register_buffer('default_cell', default_cell)\n self.register_buffer('default_shifts', default_shifts)\n\n def constants(self):\n return self.Rcr, self.EtaR, self.ShfR, self.Rca, self.ShfZ, self.EtaA, self.Zeta, self.ShfA\n\n def forward(self, input_: Tuple[Tensor, Tensor],\n cell: Optional[Tensor] = None,\n pbc: Optional[Tensor] = None) -> SpeciesAEV:\n \"\"\"Compute AEVs\n\n Arguments:\n input_ (tuple): Can be one of the following two cases:\n\n If you don't care about periodic boundary conditions at all,\n then input can be a tuple of two tensors: species, coordinates.\n species must have shape ``(N, A)``, coordinates must have shape\n ``(N, A, 3)`` where ``N`` is the number of molecules in a batch,\n and ``A`` is the number of atoms.\n\n .. warning::\n\n The species must be indexed in 0, 1, 2, 3, ..., not the element\n index in periodic table. Check :class:`torchani.SpeciesConverter`\n if you want periodic table indexing.\n\n .. note:: The coordinates, and cell are in Angstrom.\n\n If you want to apply periodic boundary conditions, then the input\n would be a tuple of two tensors (species, coordinates) and two keyword\n arguments `cell=...` , and `pbc=...` where species and coordinates are\n the same as described above, cell is a tensor of shape (3, 3) of the\n three vectors defining unit cell:\n\n .. code-block:: python\n\n tensor([[x1, y1, z1],\n [x2, y2, z2],\n [x3, y3, z3]])\n\n and pbc is boolean vector of size 3 storing if pbc is enabled\n for that direction.\n\n Returns:\n NamedTuple: Species and AEVs. species are the species from the input\n unchanged, and AEVs is a tensor of shape ``(N, A, self.aev_length())``\n \"\"\"\n species, coordinates = input_\n assert species.shape == coordinates.shape[:-1]\n\n if cell is None and pbc is None:\n aev = compute_aev(species, coordinates, self.triu_index, self.constants(), self.sizes, None)\n else:\n assert (cell is not None and pbc is not None)\n cutoff = max(self.Rcr, self.Rca)\n shifts = compute_shifts(cell, pbc, cutoff)\n aev = compute_aev(species, coordinates, self.triu_index, self.constants(), self.sizes, (cell, shifts))\n\n return SpeciesAEV(species, aev)\n"
] |
[
[
"torch.ceil",
"torch.zeros",
"torch.cat",
"torch.zeros_like",
"torch.eye",
"torch.exp",
"torch.repeat_interleave",
"torch.acos",
"torch.tril_indices",
"torch.unique_consecutive",
"torch.triu_indices",
"torch.arange",
"torch.where",
"torch.cartesian_prod",
"torch.cumsum",
"torch.cos"
]
] |
Lowe-Institute/data-automation
|
[
"10d8ec7729eff6e7702a367ff5bae2a871069799"
] |
[
"lowe/acs/ACSClient.py"
] |
[
"import asyncio\nimport aiohttp\nimport backoff\nimport json\nimport os\nimport pandas as pd\nimport requests\n\nfrom dotenv import load_dotenv, find_dotenv\nfrom lowe.locations.lookup import name2fips, fips2name\nfrom typing import Union, List, Dict\n\ntry:\n import importlib.resources as pkg_resources\nexcept ImportError:\n import importlib_resources as pkg_resources\n\nfrom . import tableids\n\n\nclass ACSClient(object):\n def __init__(self, key_env_name: str = \"API_KEY_ACS\"):\n \"\"\"the ACS Client class provides methods for wrapping around the ACS client\n\n Parameters\n ----------\n key_env_name : str, optional\n name of the environment variable in your .env\n file corresponding to your ACS API key, by default \"API_KEY_ACS\"\n \"\"\"\n load_dotenv(find_dotenv())\n self.API_KEY = os.environ.get(key_env_name, None)\n try:\n assert self.API_KEY is not None\n except AssertionError:\n print(\n f\"Error: make sure you have your ACS API key loaded as an environment variable under the name {key_env_name}.\"\n )\n\n self.surveys = {\"1\": \"acs1\", \"3\": \"acs3\", \"5\": \"acs5\"}\n\n self.tabletypes = {\n \"detail\": \"\", # Default table type\n \"subject\": \"/subject\",\n \"profile\": \"/profile\",\n \"data profile\": \"/profile\",\n \"dprofile\": \"/profile\",\n \"comparison profile\": \"/cprofile\",\n \"comp profile\": \"/cprofile\",\n \"cprofile\": \"/cprofile\",\n }\n\n async def initialize(self):\n self.session = aiohttp.ClientSession()\n\n async def close(self):\n if not self.session.closed:\n await self.session.close()\n\n def _base_uri(\n self,\n year: Union[int, str],\n tabletype: str = \"detail\",\n estimate: Union[int, str] = \"5\",\n ):\n \"\"\"_base_uri generates the base URI for the ACS API for each type of table and the 1, 3, and 5 year estimate tables\n\n Parameters\n ----------\n year : Union[int, str]\n Year we want to pull the data for\n tabletype : str, optional\n Type of table we want to pull, by default \"detail\"\n Options are:\n - \"detail\" <--> ACS Detail tables,\n - \"subject\" <--> Subject Tables,\n - [\"profile\", \"data profile\", or \"dprofile\"] <--> Data Profile Tables,\n - [\"comparison profile\", \"comp profile\", \"cprofile\"] for ACS comparison profiles\n estimate : Union[int,str], optional\n [description], by default \"5\"\n\n NOTE: 1 year estimate URLs will almost definitely not work, but 3- and 5-year estimates will\n\n Returns\n -------\n str\n Base URL for querying ACS API\n \"\"\"\n survey = self.surveys[str(estimate)]\n try:\n table = self.tabletypes[tabletype.lower()]\n except KeyError:\n print(\"ERROR: Please provide valid table type\")\n\n base = f\"https://api.census.gov/data/{str(year)}/acs/{survey}\"\n\n return base + table\n\n def _get_var_defs(\n self,\n fname: str,\n year: Union[int, str] = \"2019\",\n tabletype: str = \"detail\",\n estimate: Union[int, str] = \"5\",\n ):\n \"\"\"Checks to see if the variable decoding JSON is included in the tableids/ folder, and if not, downloads it\"\"\"\n # First, get the base url\n base = self._base_uri(\n year=str(year), tabletype=tabletype, estimate=str(estimate)\n )\n req_uri = base + \"/variables.json\"\n vars = requests.get(req_uri)\n js = vars.json()\n\n with open(fname, \"w\", encoding=\"utf-8\") as f:\n json.dump(js, f, ensure_ascii=False, indent=4)\n\n return js\n\n def _infer_table_type(self, tableid: str):\n tableid = tableid.lower()\n if tableid[0] == \"b\":\n return \"detail\"\n elif tableid[0] == \"s\":\n return \"subject\"\n elif tableid[0:2] == \"dp\":\n return \"dprofile\"\n elif tableid[0:2] == \"cp\":\n return \"cprofile\"\n return None\n\n def _infer_varfile(self, year: Union[int, str], tabletype: str):\n if tabletype.lower() == \"detail\":\n return f\"detail_vars_{str(year)}.json\"\n elif tabletype.lower() == \"subject\":\n return f\"subject_vars_{str(year)}.json\"\n elif tabletype.lower() == \"dprofile\":\n return f\"dprofile_vars_{str(year)}.json\"\n\n @backoff.on_exception(\n backoff.expo, (aiohttp.ClientError, aiohttp.ClientResponseError), max_tries=5\n )\n async def _collect_table(\n self,\n tableid: str,\n year: Union[int, str],\n location: Dict[str, str],\n tabletype: str = \"detail\",\n estimate: Union[int, str] = \"5\",\n debug: bool = False,\n ):\n # Check to see if the client session exists\n try:\n assert self.session is not None\n except AssertionError:\n print(\n \"Error: Please initialize client \\\n session with `client.initialize()`\"\n )\n\n base = self._base_uri(year=year, tabletype=tabletype, estimate=estimate)\n\n key_translations = {\"msa\": \"geocomp\", \"city\": \"place\", \"county\": \"county\"}\n\n # The 'for' part is a little more tricky. We need to append\n # MSA, county, and city in that order, with %20 in between\n place = \"\"\n for k, v in location.items():\n if v is not None and k.lower() != \"state\":\n place += (\n f\"%20{key_translations[k]}:{v}\"\n if len(place) > 0\n else f\"{key_translations[k]}:{v}\"\n )\n\n keyz = list(location.keys())\n\n if len(keyz) == 1 and \"state\" in keyz:\n params = {\n \"get\": f\"group({tableid})\",\n \"for\": f\"state:{location['state']}\",\n \"key\": self.API_KEY,\n }\n elif len(keyz) > 1:\n params = {\n \"get\": f\"group({tableid})\",\n \"for\": place,\n \"in\": f\"state:{location['state']}\",\n \"key\": self.API_KEY,\n }\n else:\n params = {\n \"get\": f\"group({tableid})\",\n \"for\": \"us:1\",\n \"key\": self.API_KEY,\n }\n\n if tabletype == \"detail\" or tabletype == \"\":\n params[\"get\"] = tableid + \",\"\n\n async with self.session.get(base, params=params, raise_for_status=True) as resp:\n if debug:\n print(resp.url)\n print(resp.status)\n return await resp.json()\n\n async def _process_request(\n self,\n tableid: str,\n year: Union[int, str],\n location: Dict[str, str],\n tabletype: str = \"detail\",\n estimate: Union[int, str] = \"5\",\n varfile: str = \"subject_vars_2019.json\",\n debug: bool = False,\n ):\n # Pulls data from ACS\n if debug:\n print(\"making request...\")\n resp = await self._collect_table(\n tableid=tableid,\n year=year,\n location=location,\n tabletype=tabletype,\n estimate=estimate,\n debug=debug,\n )\n\n if debug:\n print(\"opening JSON...\")\n # Opens the JSON file with subject tables info\n\n with pkg_resources.open_text(tableids, varfile) as f:\n subjectDict = json.load(f)\n\n # ids: list of subject ids\n # vals: list of corresponding values\n if debug:\n print(\"post-processing....\")\n ids, vals = resp[0], resp[1]\n concept_label = []\n values = []\n\n # state_decoding = bidict({k.fips: k.abbr for k in us.states.STATES})\n location_names = fips2name(location)\n\n subjectDict = subjectDict[\"variables\"]\n\n for idx, id in enumerate(ids):\n subject = id\n # Search for the subject ids in our JSON file\n # try/catch so we only query query-able fields in the JSON\n try:\n concept_label.append(\n (\n subjectDict[subject][\"concept\"]\n + \" \"\n + subjectDict[subject][\"label\"]\n ).replace(\"!!\", \" \")\n )\n values.append(vals[idx])\n except KeyError:\n continue\n\n # Intermediate output DF\n subject_df = pd.DataFrame(\n {\"concept_label\": concept_label, \"values\": values, \"year\": year}\n )\n\n # Drop duplicates\n subject_df.drop_duplicates(inplace=True, subset=[\"concept_label\"])\n\n # Final DF that can be merged\n acs_subject_pivoted = subject_df.pivot(\n index=\"year\", columns=\"concept_label\", values=\"values\"\n )\n\n acs_subject_pivoted.drop(acs_subject_pivoted.columns[0], axis=1, inplace=True)\n\n location_str = \"\"\n\n if not location: # If the location is empty\n location_str = \"us\"\n\n for key, value in location_names.items():\n if key.lower() != \"state\":\n value = value.split(\",\")[0]\n acs_subject_pivoted[key.lower()] = value.lower()\n location_str += (\n value.lower() if len(location_str) == 0 else \" \" + value.lower()\n )\n\n acs_subject_pivoted[\"location_key\"] = location_str\n\n return acs_subject_pivoted\n\n async def _tables_range(\n self,\n tableid: str,\n location: Dict[str, str],\n start_year: Union[int, str] = \"2015\",\n end_year: Union[int, str] = \"2019\",\n tabletype: str = \"detail\",\n varfile: str = \"subject_vars_2019.json\",\n estimate: Union[int, str] = \"5\",\n debug: bool = False,\n ):\n \"\"\"Helper function to get multiple years of ACS data for a single subject and return them as a single dataframe\"\"\"\n year_range = range(int(start_year), int(end_year) + 1)\n\n if isinstance(location, dict): # If there is only one location passed\n # Clean location\n if \"city\" in location.keys():\n if len(location[\"city\"]) == 7:\n if \"state\" not in location.keys():\n location[\"state\"] = location[\"city\"][\n 0:2\n ] # Add the state code to the state key\n location[\"city\"] = location[\"city\"][2:] # shave off the state code\n if \"county\" in location.keys(): # Clean the county\n if \"_\" in location[\"county\"]:\n splt = location[\"county\"].split(\"_\")\n location[\"county\"] = splt[-1]\n if \"state\" not in location.keys():\n location[\"state\"] = splt[0]\n\n results = await asyncio.gather(\n *[\n self._process_request(\n tableid=tableid,\n year=year,\n location=location,\n tabletype=tabletype,\n estimate=estimate,\n varfile=varfile,\n debug=debug,\n )\n for year in year_range\n ]\n )\n elif isinstance(location, list): # If there is more than one location passed in\n for loc in location:\n if \"city\" in loc.keys():\n if len(loc[\"city\"]) == 7:\n if \"state\" not in loc.keys():\n loc[\"state\"] = loc[\"city\"][\n 0:2\n ] # Add the state code to the state key\n loc[\"city\"] = loc[\"city\"][2:] # shave off the state code\n if \"county\" in loc.keys(): # Clean the county\n if \"_\" in loc[\"county\"]:\n splt = loc[\"county\"].split(\"_\")\n loc[\"county\"] = splt[-1]\n if \"state\" not in loc.keys():\n loc[\"state\"] = splt[0]\n\n results = await asyncio.gather(\n *[\n self._process_request(\n tableid=tableid,\n year=year,\n location=loc,\n tabletype=tabletype,\n estimate=estimate,\n varfile=varfile,\n debug=debug,\n )\n for year in year_range\n for loc in location\n ]\n )\n\n res = pd.concat(results)\n\n return res\n\n async def get_acs(\n self,\n vars: List[str],\n start_year: Union[int, str],\n end_year: Union[int, str],\n location: Union[Dict[str, str], List[Dict[str, str]]],\n translate_location: bool = False,\n tabletype: Union[str, List[str]] = None,\n infer_type: bool = True,\n varfile: Union[str, List[str]] = None,\n estimate: Union[int, str] = \"5\",\n join: bool = False,\n debug: bool = False,\n ):\n \"\"\"get_acs queries the ACS API and gathers data for any subject or data table into pandas dataframes\n\n Parameters\n ----------\n vars : List[str]\n List of tables we want to grab from ACS, example [\"S1001\", \"S1501\"]\n start_year : Union[int, str]\n Year we want to start collecting data from, earliest being \"2011\"\n end_year : Union[int, str]\n Last year we want to collect data from, latest being \"2019\". Must be >= year_start\n location : Union[Dict[str, str], List[Dict[str, str]]]\n Dictionary with the following keys to specify location:\n {\n \"state\": str, FIPS code of the state,\n \"msa\": str, code for the MSA,\n \"county\": str, FIPS code for the county,\n \"city\": str, FIPS code for the city of interest\n }\n NOTE: You may also pass a list of location dictionaries -- this is the preferred method, since it will parallelize easily\n translate_location: bool\n Whether or not we want to convert the location dictionary to FIPS codes. This essentially does\n location = lowe.locations.lookups.name2fips(location)\n Note that when passing in a dictionary with name vakues instead of FIPS values, all non-state values must have\n the state attached to it. That is, if I want to query for Palm Springs, I would do {city: \"palm springs, ca\"}\n For safety, always pass strings in as lowercase. Checks are in place for this but they may not be comprehensive\n tabletype : Union[str, List[str]], optional\n Table type to collect, must be one of [\"detail\", \"subject\", \"dprofile\", \"cprofile\"]\n Respectively, these are Detailed Tables, Subject Tables, Data Profiles, and Comparison Profiles\n If there are various types of tables being collected with one call, pass a list of length len(vars)\n Each entry of this list should correspond to the table type of the corresponding entry in\n NOTE: Pass as None if you want to infer the table type\n infer_type: bool, optional\n Whether or not we want to infer table types\n varfile: Union[str, List[str]]\n File (or list of files) that should be used to translate variable names\n NOTE: Pass None (default) if you want to infer the varfile.\n estimate: Union[int,str]\n ACS estimates to gather (1, 3, or 5-year)\n join: bool, optional\n Whether or not to join all the results together into one large table, by default True\n debug: bool, optional\n If True, prints out extra information useful for debugging\n\n Returns\n -------\n pd.DataFrame, List[pd.DataFrame]\n If only one table is called, then returns the dataframe. Else, return a list of dataframes\n \"\"\"\n # Split the vars into equal partitions\n if infer_type:\n tabletypes = [self._infer_table_type(var) for var in vars]\n if debug:\n print(tabletypes)\n\n # Translate the dictionary to FIPS values if necessary\n if translate_location:\n location = name2fips(location)\n\n if varfile is None: # We want to infer which file to use\n varfile = [\n self._infer_varfile(\n tabletype=tabletype, year=\"2019\"\n ) # NOTE: This may not work in later years\n for tabletype in tabletypes\n ]\n varfile = varfile[0] if len(varfile) == 1 else varfile\n\n if isinstance(varfile, str):\n dfs = await asyncio.gather(\n *[\n self._tables_range(\n tableid=table,\n start_year=start_year,\n end_year=end_year,\n location=location,\n tabletype=tabletypes[0],\n estimate=estimate,\n varfile=varfile,\n debug=debug,\n )\n for table in vars\n ]\n )\n elif isinstance(varfile, list):\n dfs = await asyncio.gather(\n *[\n self._tables_range(\n tableid=table,\n start_year=start_year,\n end_year=end_year,\n location=location,\n tabletype=tabletypes[i],\n varfile=varfile[i],\n estimate=estimate,\n debug=debug,\n )\n for i, table in enumerate(vars)\n ]\n )\n\n if join:\n # Iterate through the dfs and join them together on 'year'\n base = dfs[0]\n for df in dfs[1:]:\n intermediate = base.join(df, how=\"left\", on=[\"year\", \"state\"])\n return intermediate\n\n else:\n return dfs[0] if len(dfs) == 1 else dfs\n\n\n\"\"\"\nasync def main():\n subjects = [\"S2701\"]\n # dp = \"DP05\"\n # PALM_SPRINGS = \"55254\"\n # RANCHO_MIRAGE = \"59500\"\n # STATE = \"06\"\n\n client = ACSClient()\n await client.initialize()\n\n # locs = [{\"state\": str(st.fips)} for st in us.states.STATES]\n\n # locs = [{\"state\": \"06\"}, {\"state\": \"04\"}]\n\n responses = await client.get_acs(\n vars=subjects,\n start_year=\"2017\",\n end_year=\"2019\",\n location={},\n infer_type=True,\n estimate=\"5\",\n join=False,\n debug=True,\n )\n\n print(responses[\"location_key\"])\n\n await client.close()\n\n return responses\n\n\nasyncio.run(main())\n\"\"\"\n"
] |
[
[
"pandas.concat",
"pandas.DataFrame"
]
] |
FLY-CODE77/opencv
|
[
"5644e6c1ef43d81efb54ccde6c06f1adf000fb96"
] |
[
"TIL/gradient_edge.py"
] |
[
"# edge 검출 with gradient 크기를 사용 \n\nimport sys\nimport numpy as np\nimport cv2\n\nsrc = cv2.imread('HappyFish.jpg', cv2.IMREAD_GRAYSCALE)\n\nif src is None:\n print('error')\n sys.exit()\n\nkernel = np.array([[-1, 0 , 1], [-2,0,2], [-1,0,1]], dtype = np.float32)\n\n# sobel 안쓰고!\ndx = cv2.filter2D(src, cv2.CV_32F, kernel )\n\n# sobel 를 사용하면\ndy = cv2.Sobel(src, cv2.CV_32F, 0, 1 )\n\n# 그라이디언트 크기 계산 float 값 인자들을 받아서! float 형으로 변환 시켜준다 \nmag = cv2.magnitude(dx,dy)\n\n# 크기는 255 보다 커질수 있으니깐.. clip 을 시킨다 \nmag = np.clip(mag,0,255).astype(np.uint8)\n\n\nedge = np.zeros(mag.shape[:2], np.uint8)\n# 여기서 >120 이란 값을 잘 조절하면서 하면 찾고자 하는 형태의 엣지 값을 파악 가능하다\nedge[mag> 120] = 255\n\ncv2.imshow('src', src)\ncv2.imshow('mag', mag)\ncv2.imshow('edge', edge)\n\ncv2.waitKey()\n\ncv2.destroyAllWindows()"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.clip"
]
] |
Azure/LearnAI-CustomComputerVisionwithAML
|
[
"ce78cc0176ddc69b8857e603dd334e7732b2e1cc"
] |
[
"lab02.4-deployment/resources/score_aml.py"
] |
[
"import base64\nimport json\nimport numpy as np\nimport requests\nimport sys, traceback\nimport timeit\nfrom io import BytesIO\nfrom PIL import Image\n\"\"\"\nSample script to score a published AML webservice directly on test images.\n\"\"\"\n\ndef get_class(class_index):\n class_map = dict()\n class_map[0] = 'bowl'\n class_map[1] = 'cup'\n class_map[2] = 'cutlery'\n class_map[3] = 'plate'\n if class_index in class_map.keys():\n return class_map[class_index]\n\ndef score_service_endpoint_with_images(images, service_endpoint_url, parameters ={}, service_key=None):\n \"\"\"Score image list against a service endpoint\n\n Args:\n images(list): list of (input image file path)\n service_endpoint_url(str): endpoint url\n service_key(str): service key, None for local deployment.\n parameters(dict): service additional paramters in dictionary\n image_resize_dims(list or tuple): resize image if provided. Format: [width, height].\n \n Returns:\n result (list): list of result for each image\n \"\"\"\n routing_id = \"\"\n if service_key is None:\n headers = {'Content-Type': 'application/json',\n 'X-Marathon-App-Id': routing_id}\n else:\n headers = {'Content-Type': 'application/json',\n \"Authorization\": ('Bearer ' + service_key), 'X-Marathon-App-Id': routing_id}\n payload = []\n for image in images:\n encoded = None\n img = Image.open(image).convert('RGB')\n image_buffer = BytesIO()\n img.save(image_buffer, format=\"png\")\n encoded = base64.b64encode(image_buffer.getvalue())\n image_request = {\"image_in_base64\": \"{0}\".format(encoded), \"parameters\": parameters}\n payload.append(image_request)\n body = json.dumps(payload)\n r = requests.post(service_endpoint_url, data=body, headers=headers)\n try:\n result = json.loads(r.text)\n except:\n raise ValueError(\"Incorrect output format. Result cant not be parsed: \" +r.text)\n return result\n\n# Score images on disk using deployed endpoint. \ndef main():\n service_endpoint_url = \"http://40.84.40.11/api/v1/service/testdeployment/score\" # Please replace this with your service endpoint url\n service_key = \"73246bc47340467e97915fb2aed7c6d7\" # Please replace this with your service key\n parameters = {}\n test_images = [\n '../sample_data/imgs_recycling/cup/msft-plastic-cup20170725135025957.jpg',\n '../sample_data/imgs_recycling/cup/msft-plastic-cup20170725135335923.jpg',\n '../sample_data/imgs_recycling/cup/msft-plastic-cup20170725135216711.jpg',\n ]\n\n for img in test_images:\n tic = timeit.default_timer()\n return_json = score_service_endpoint_with_images([img], service_endpoint_url, parameters =parameters, service_key=service_key)[0]\n print('Scoring image {}'.format(img))\n print(\" Time for scoring call: {:.2f} seconds\".format(timeit.default_timer() - tic))\n # parse returned json string\n result = json.loads(return_json)\n class_index = np.argmax(np.array(result))\n print(return_json)\n print(\"classified label: {}\".format(get_class(class_index)))\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.array"
]
] |
52North/MariDataHarvest
|
[
"e8bed1802ed19fb242d10d0e592cd6dde4c54544"
] |
[
"EnvironmentalData/env_data_validator.py"
] |
[
"import logging\nimport random\nimport sys\nimport pandas as pd\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom utilities import helper_functions\nfrom EnvironmentalData.weather import get_GFS, get_global_wave, get_global_phy_daily, get_global_wind, select_grid_point\n\nlogger = logging.getLogger(__name__)\n\n\ndef validate_random_rows(df: pd.DataFrame, num_of_rows=10):\n df = df.dropna(how='all').fillna(value=0)\n gfs, wave, phy, wind = [], [], [], []\n for i in range(num_of_rows):\n if len(df) > 1:\n rand_row = df.loc[[random.randint(0, len(df) - 1)]]\n lat = rand_row.LAT.values[0]\n lon = rand_row.LON.values[0]\n date = pd.to_datetime(str(rand_row.BaseDateTime.values[0])).replace(tzinfo=None)\n logger.debug('Validating the interpolation of AIS point with LAT = %s, LON = %s, Timestamp = %s' % (\n str(lat), str(lon), str(date)))\n gfs_data = select_grid_point(*get_GFS(date, date, lat, lat, lon, lon), date, lat, lon)\n gfs.append(sum(cosine_similarity(rand_row[gfs_data.columns].values, gfs_data.values))[0])\n wave_data = select_grid_point(*get_global_wave(date, date, lat, lat, lon, lon), date, lat, lon)\n wave.append(sum(cosine_similarity(rand_row[wave_data.columns].values, wave_data.values))[0])\n\n phy_data = select_grid_point(*get_global_phy_daily(date, date, lat, lat, lon, lon), date, lat, lon)\n phy.append(sum(cosine_similarity(rand_row[phy_data.columns].values, phy_data.values))[0])\n wind_data = select_grid_point(*get_global_wind(date, date, lat, lat, lon, lon), date, lat, lon)\n wind.append(sum(cosine_similarity(rand_row[wind_data.columns].values, wind_data.values))[0])\n logger.debug('Avg. Similarity gfs %.2f' % sum(gfs) / len(gfs))\n logger.debug('Avg. Similarity wave %.2f' % sum(wave) / len(wave))\n logger.debug('Avg. Similarity wind %.2f' % sum(wind) / len(wind))\n logger.debug('Avg. Similarity phy %.2f' % sum(phy) / len(phy))\n\n\nif __name__ == '__main__':\n validate_random_rows(\n pd.read_csv(sys.argv[1], parse_dates=['BaseDateTime'], date_parser=helper_functions.str_to_date))\n"
] |
[
[
"pandas.read_csv",
"sklearn.metrics.pairwise.cosine_similarity"
]
] |
egorzakharov/pytorch3d
|
[
"ef21a6f6aaeae499f60af9eb3e57ba41040aac11"
] |
[
"pytorch3d/io/obj_io.py"
] |
[
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n\"\"\"This module implements utility functions for loading and saving meshes.\"\"\"\nimport os\nimport warnings\nfrom collections import namedtuple\nfrom pathlib import Path\nfrom typing import List, Optional\n\nimport numpy as np\nimport torch\nfrom iopath.common.file_io import PathManager\nfrom PIL import Image\nfrom pytorch3d.common.types import Device\nfrom pytorch3d.io.mtl_io import load_mtl, make_mesh_texture_atlas\nfrom pytorch3d.io.utils import PathOrStr, _check_faces_indices, _make_tensor, _open_file\nfrom pytorch3d.renderer import TexturesAtlas, TexturesUV\nfrom pytorch3d.structures import Meshes, join_meshes_as_batch\n\nfrom .pluggable_formats import MeshFormatInterpreter, endswith\n\n\n# Faces & Aux type returned from load_obj function.\n_Faces = namedtuple(\"Faces\", \"verts_idx normals_idx textures_idx materials_idx\")\n_Aux = namedtuple(\n \"Properties\", \"normals verts_uvs material_colors texture_images texture_atlas\"\n)\n\n\ndef _format_faces_indices(faces_indices, max_index: int, device, pad_value=None):\n \"\"\"\n Format indices and check for invalid values. Indices can refer to\n values in one of the face properties: vertices, textures or normals.\n See comments of the load_obj function for more details.\n\n Args:\n faces_indices: List of ints of indices.\n max_index: Max index for the face property.\n pad_value: if any of the face_indices are padded, specify\n the value of the padding (e.g. -1). This is only used\n for texture indices indices where there might\n not be texture information for all the faces.\n\n Returns:\n faces_indices: List of ints of indices.\n\n Raises:\n ValueError if indices are not in a valid range.\n \"\"\"\n faces_indices = _make_tensor(\n faces_indices, cols=3, dtype=torch.int64, device=device\n )\n\n if pad_value is not None:\n # pyre-fixme[28]: Unexpected keyword argument `dim`.\n mask = faces_indices.eq(pad_value).all(dim=-1)\n\n # Change to 0 based indexing.\n faces_indices[(faces_indices > 0)] -= 1\n\n # Negative indexing counts from the end.\n faces_indices[(faces_indices < 0)] += max_index\n\n if pad_value is not None:\n # pyre-fixme[61]: `mask` is undefined, or not always defined.\n faces_indices[mask] = pad_value\n\n return _check_faces_indices(faces_indices, max_index, pad_value)\n\n\ndef load_obj(\n f,\n load_textures: bool = True,\n create_texture_atlas: bool = False,\n texture_atlas_size: int = 4,\n texture_wrap: Optional[str] = \"repeat\",\n device: Device = \"cpu\",\n path_manager: Optional[PathManager] = None,\n):\n \"\"\"\n Load a mesh from a .obj file and optionally textures from a .mtl file.\n Currently this handles verts, faces, vertex texture uv coordinates, normals,\n texture images and material reflectivity values.\n\n Note .obj files are 1-indexed. The tensors returned from this function\n are 0-indexed. OBJ spec reference: http://www.martinreddy.net/gfx/3d/OBJ.spec\n\n Example .obj file format:\n ::\n # this is a comment\n v 1.000000 -1.000000 -1.000000\n v 1.000000 -1.000000 1.000000\n v -1.000000 -1.000000 1.000000\n v -1.000000 -1.000000 -1.000000\n v 1.000000 1.000000 -1.000000\n vt 0.748573 0.750412\n vt 0.749279 0.501284\n vt 0.999110 0.501077\n vt 0.999455 0.750380\n vn 0.000000 0.000000 -1.000000\n vn -1.000000 -0.000000 -0.000000\n vn -0.000000 -0.000000 1.000000\n f 5/2/1 1/2/1 4/3/1\n f 5/1/1 4/3/1 2/4/1\n\n The first character of the line denotes the type of input:\n ::\n - v is a vertex\n - vt is the texture coordinate of one vertex\n - vn is the normal of one vertex\n - f is a face\n\n Faces are interpreted as follows:\n ::\n 5/2/1 describes the first vertex of the first triangle\n - 5: index of vertex [1.000000 1.000000 -1.000000]\n - 2: index of texture coordinate [0.749279 0.501284]\n - 1: index of normal [0.000000 0.000000 -1.000000]\n\n If there are faces with more than 3 vertices\n they are subdivided into triangles. Polygonal faces are assumed to have\n vertices ordered counter-clockwise so the (right-handed) normal points\n out of the screen e.g. a proper rectangular face would be specified like this:\n ::\n 0_________1\n | |\n | |\n 3 ________2\n\n The face would be split into two triangles: (0, 2, 1) and (0, 3, 2),\n both of which are also oriented counter-clockwise and have normals\n pointing out of the screen.\n\n Args:\n f: A file-like object (with methods read, readline, tell, and seek),\n a pathlib path or a string containing a file name.\n load_textures: Boolean indicating whether material files are loaded\n create_texture_atlas: Bool, If True a per face texture map is created and\n a tensor `texture_atlas` is also returned in `aux`.\n texture_atlas_size: Int specifying the resolution of the texture map per face\n when `create_texture_atlas=True`. A (texture_size, texture_size, 3)\n map is created per face.\n texture_wrap: string, one of [\"repeat\", \"clamp\"]. This applies when computing\n the texture atlas.\n If `texture_mode=\"repeat\"`, for uv values outside the range [0, 1] the integer part\n is ignored and a repeating pattern is formed.\n If `texture_mode=\"clamp\"` the values are clamped to the range [0, 1].\n If None, then there is no transformation of the texture values.\n device: Device (as str or torch.device) on which to return the new tensors.\n path_manager: optionally a PathManager object to interpret paths.\n\n Returns:\n 6-element tuple containing\n\n - **verts**: FloatTensor of shape (V, 3).\n - **faces**: NamedTuple with fields:\n - verts_idx: LongTensor of vertex indices, shape (F, 3).\n - normals_idx: (optional) LongTensor of normal indices, shape (F, 3).\n - textures_idx: (optional) LongTensor of texture indices, shape (F, 3).\n This can be used to index into verts_uvs.\n - materials_idx: (optional) List of indices indicating which\n material the texture is derived from for each face.\n If there is no material for a face, the index is -1.\n This can be used to retrieve the corresponding values\n in material_colors/texture_images after they have been\n converted to tensors or Materials/Textures data\n structures - see textures.py and materials.py for\n more info.\n - **aux**: NamedTuple with fields:\n - normals: FloatTensor of shape (N, 3)\n - verts_uvs: FloatTensor of shape (T, 2), giving the uv coordinate per\n vertex. If a vertex is shared between two faces, it can have\n a different uv value for each instance. Therefore it is\n possible that the number of verts_uvs is greater than\n num verts i.e. T > V.\n vertex.\n - material_colors: if `load_textures=True` and the material has associated\n properties this will be a dict of material names and properties of the form:\n\n .. code-block:: python\n\n {\n material_name_1: {\n \"ambient_color\": tensor of shape (1, 3),\n \"diffuse_color\": tensor of shape (1, 3),\n \"specular_color\": tensor of shape (1, 3),\n \"shininess\": tensor of shape (1)\n },\n material_name_2: {},\n ...\n }\n\n If a material does not have any properties it will have an\n empty dict. If `load_textures=False`, `material_colors` will None.\n\n - texture_images: if `load_textures=True` and the material has a texture map,\n this will be a dict of the form:\n\n .. code-block:: python\n\n {\n material_name_1: (H, W, 3) image,\n ...\n }\n If `load_textures=False`, `texture_images` will None.\n - texture_atlas: if `load_textures=True` and `create_texture_atlas=True`,\n this will be a FloatTensor of the form: (F, texture_size, textures_size, 3)\n If the material does not have a texture map, then all faces\n will have a uniform white texture. Otherwise `texture_atlas` will be\n None.\n \"\"\"\n data_dir = \"./\"\n if isinstance(f, (str, bytes, Path)):\n data_dir = os.path.dirname(f)\n if path_manager is None:\n path_manager = PathManager()\n with _open_file(f, path_manager, \"r\") as f:\n return _load_obj(\n f,\n data_dir=data_dir,\n load_textures=load_textures,\n create_texture_atlas=create_texture_atlas,\n texture_atlas_size=texture_atlas_size,\n texture_wrap=texture_wrap,\n path_manager=path_manager,\n device=device,\n )\n\n\ndef load_objs_as_meshes(\n files: list,\n device: Optional[Device] = None,\n load_textures: bool = True,\n create_texture_atlas: bool = False,\n texture_atlas_size: int = 4,\n texture_wrap: Optional[str] = \"repeat\",\n path_manager: Optional[PathManager] = None,\n):\n \"\"\"\n Load meshes from a list of .obj files using the load_obj function, and\n return them as a Meshes object. This only works for meshes which have a\n single texture image for the whole mesh. See the load_obj function for more\n details. material_colors and normals are not stored.\n\n Args:\n files: A list of file-like objects (with methods read, readline, tell,\n and seek), pathlib paths or strings containing file names.\n device: Desired device of returned Meshes. Default:\n uses the current device for the default tensor type.\n load_textures: Boolean indicating whether material files are loaded\n create_texture_atlas, texture_atlas_size, texture_wrap: as for load_obj.\n path_manager: optionally a PathManager object to interpret paths.\n\n Returns:\n New Meshes object.\n \"\"\"\n mesh_list = []\n for f_obj in files:\n verts, faces, aux = load_obj(\n f_obj,\n load_textures=load_textures,\n create_texture_atlas=create_texture_atlas,\n texture_atlas_size=texture_atlas_size,\n texture_wrap=texture_wrap,\n path_manager=path_manager,\n )\n tex = None\n if create_texture_atlas:\n # TexturesAtlas type\n tex = TexturesAtlas(atlas=[aux.texture_atlas.to(device)])\n else:\n # TexturesUV type\n tex_maps = aux.texture_images\n if tex_maps is not None and len(tex_maps) > 0:\n verts_uvs = aux.verts_uvs.to(device) # (V, 2)\n faces_uvs = faces.textures_idx.to(device) # (F, 3)\n image = list(tex_maps.values())[0].to(device)[None]\n tex = TexturesUV(\n verts_uvs=[verts_uvs], faces_uvs=[faces_uvs], maps=image\n )\n\n mesh = Meshes(\n verts=[verts.to(device)], faces=[faces.verts_idx.to(device)], textures=tex\n )\n mesh_list.append(mesh)\n if len(mesh_list) == 1:\n return mesh_list[0]\n return join_meshes_as_batch(mesh_list)\n\n\nclass MeshObjFormat(MeshFormatInterpreter):\n def __init__(self) -> None:\n self.known_suffixes = (\".obj\",)\n\n def read(\n self,\n path: PathOrStr,\n include_textures: bool,\n device: Device,\n path_manager: PathManager,\n create_texture_atlas: bool = False,\n texture_atlas_size: int = 4,\n texture_wrap: Optional[str] = \"repeat\",\n **kwargs,\n ) -> Optional[Meshes]:\n if not endswith(path, self.known_suffixes):\n return None\n mesh = load_objs_as_meshes(\n files=[path],\n device=device,\n load_textures=include_textures,\n create_texture_atlas=create_texture_atlas,\n texture_atlas_size=texture_atlas_size,\n texture_wrap=texture_wrap,\n path_manager=path_manager,\n )\n return mesh\n\n def save(\n self,\n data: Meshes,\n path: PathOrStr,\n path_manager: PathManager,\n binary: Optional[bool],\n decimal_places: Optional[int] = None,\n **kwargs,\n ) -> bool:\n if not endswith(path, self.known_suffixes):\n return False\n\n verts = data.verts_list()[0]\n faces = data.faces_list()[0]\n save_obj(\n f=path,\n verts=verts,\n faces=faces,\n decimal_places=decimal_places,\n path_manager=path_manager,\n )\n return True\n\n\ndef _parse_face(\n line,\n tokens,\n material_idx,\n faces_verts_idx,\n faces_normals_idx,\n faces_textures_idx,\n faces_materials_idx,\n) -> None:\n face = tokens[1:]\n face_list = [f.split(\"/\") for f in face]\n face_verts = []\n face_normals = []\n face_textures = []\n\n for vert_props in face_list:\n # Vertex index.\n face_verts.append(int(vert_props[0]))\n if len(vert_props) > 1:\n if vert_props[1] != \"\":\n # Texture index is present e.g. f 4/1/1.\n face_textures.append(int(vert_props[1]))\n if len(vert_props) > 2:\n # Normal index present e.g. 4/1/1 or 4//1.\n face_normals.append(int(vert_props[2]))\n if len(vert_props) > 3:\n raise ValueError(\n \"Face vertices can only have 3 properties. \\\n Face vert %s, Line: %s\"\n % (str(vert_props), str(line))\n )\n\n # Triplets must be consistent for all vertices in a face e.g.\n # legal statement: f 4/1/1 3/2/1 2/1/1.\n # illegal statement: f 4/1/1 3//1 2//1.\n # If the face does not have normals or textures indices\n # fill with pad value = -1. This will ensure that\n # all the face index tensors will have F values where\n # F is the number of faces.\n if len(face_normals) > 0:\n if not (len(face_verts) == len(face_normals)):\n raise ValueError(\n \"Face %s is an illegal statement. \\\n Vertex properties are inconsistent. Line: %s\"\n % (str(face), str(line))\n )\n else:\n face_normals = [-1] * len(face_verts) # Fill with -1\n if len(face_textures) > 0:\n if not (len(face_verts) == len(face_textures)):\n raise ValueError(\n \"Face %s is an illegal statement. \\\n Vertex properties are inconsistent. Line: %s\"\n % (str(face), str(line))\n )\n else:\n face_textures = [-1] * len(face_verts) # Fill with -1\n\n # Subdivide faces with more than 3 vertices.\n # See comments of the load_obj function for more details.\n for i in range(len(face_verts) - 2):\n faces_verts_idx.append((face_verts[0], face_verts[i + 1], face_verts[i + 2]))\n faces_normals_idx.append(\n (face_normals[0], face_normals[i + 1], face_normals[i + 2])\n )\n faces_textures_idx.append(\n (face_textures[0], face_textures[i + 1], face_textures[i + 2])\n )\n faces_materials_idx.append(material_idx)\n\n\ndef _parse_obj(f, data_dir: str):\n \"\"\"\n Load a mesh from a file-like object. See load_obj function for more details\n about the return values.\n \"\"\"\n verts, normals, verts_uvs = [], [], []\n faces_verts_idx, faces_normals_idx, faces_textures_idx = [], [], []\n faces_materials_idx = []\n material_names = []\n mtl_path = None\n\n lines = [line.strip() for line in f]\n\n # startswith expects each line to be a string. If the file is read in as\n # bytes then first decode to strings.\n if lines and isinstance(lines[0], bytes):\n lines = [el.decode(\"utf-8\") for el in lines]\n\n materials_idx = -1\n\n for line in lines:\n tokens = line.strip().split()\n if line.startswith(\"mtllib\"):\n if len(tokens) < 2:\n raise ValueError(\"material file name is not specified\")\n # NOTE: only allow one .mtl file per .obj.\n # Definitions for multiple materials can be included\n # in this one .mtl file.\n mtl_path = line[len(tokens[0]) :].strip() # Take the remainder of the line\n mtl_path = os.path.join(data_dir, mtl_path)\n elif len(tokens) and tokens[0] == \"usemtl\":\n material_name = tokens[1]\n # materials are often repeated for different parts\n # of a mesh.\n if material_name not in material_names:\n material_names.append(material_name)\n materials_idx = len(material_names) - 1\n else:\n materials_idx = material_names.index(material_name)\n elif line.startswith(\"v \"): # Line is a vertex.\n vert = [float(x) for x in tokens[1:4]]\n if len(vert) != 3:\n msg = \"Vertex %s does not have 3 values. Line: %s\"\n raise ValueError(msg % (str(vert), str(line)))\n verts.append(vert)\n elif line.startswith(\"vt \"): # Line is a texture.\n tx = [float(x) for x in tokens[1:3]]\n if len(tx) != 2:\n raise ValueError(\n \"Texture %s does not have 2 values. Line: %s\" % (str(tx), str(line))\n )\n verts_uvs.append(tx)\n elif line.startswith(\"vn \"): # Line is a normal.\n norm = [float(x) for x in tokens[1:4]]\n if len(norm) != 3:\n msg = \"Normal %s does not have 3 values. Line: %s\"\n raise ValueError(msg % (str(norm), str(line)))\n normals.append(norm)\n elif line.startswith(\"f \"): # Line is a face.\n # Update face properties info.\n _parse_face(\n line,\n tokens,\n materials_idx,\n faces_verts_idx,\n faces_normals_idx,\n faces_textures_idx,\n faces_materials_idx,\n )\n\n return (\n verts,\n normals,\n verts_uvs,\n faces_verts_idx,\n faces_normals_idx,\n faces_textures_idx,\n faces_materials_idx,\n material_names,\n mtl_path,\n )\n\n\ndef _load_materials(\n material_names: List[str],\n f: Optional[str],\n *,\n data_dir: str,\n load_textures: bool,\n device: Device,\n path_manager: PathManager,\n):\n \"\"\"\n Load materials and optionally textures from the specified path.\n\n Args:\n material_names: a list of the material names found in the .obj file.\n f: path to the material information.\n data_dir: the directory where the material texture files are located.\n load_textures: whether textures should be loaded.\n device: Device (as str or torch.device) on which to return the new tensors.\n path_manager: PathManager object to interpret paths.\n\n Returns:\n material_colors: dict of properties for each material.\n texture_images: dict of material names and texture images.\n \"\"\"\n if not load_textures:\n return None, None\n\n if f is None:\n warnings.warn(\"No mtl file provided\")\n return None, None\n\n if not path_manager.exists(f):\n warnings.warn(f\"Mtl file does not exist: {f}\")\n return None, None\n\n # Texture mode uv wrap\n return load_mtl(\n f,\n material_names=material_names,\n data_dir=data_dir,\n path_manager=path_manager,\n device=device,\n )\n\n\ndef _load_obj(\n f_obj,\n *,\n data_dir: str,\n load_textures: bool = True,\n create_texture_atlas: bool = False,\n texture_atlas_size: int = 4,\n texture_wrap: Optional[str] = \"repeat\",\n path_manager: PathManager,\n device: Device = \"cpu\",\n):\n \"\"\"\n Load a mesh from a file-like object. See load_obj function more details.\n Any material files associated with the obj are expected to be in the\n directory given by data_dir.\n \"\"\"\n\n if texture_wrap is not None and texture_wrap not in [\"repeat\", \"clamp\"]:\n msg = \"texture_wrap must be one of ['repeat', 'clamp'] or None, got %s\"\n raise ValueError(msg % texture_wrap)\n\n (\n verts,\n normals,\n verts_uvs,\n faces_verts_idx,\n faces_normals_idx,\n faces_textures_idx,\n faces_materials_idx,\n material_names,\n mtl_path,\n ) = _parse_obj(f_obj, data_dir)\n\n verts = _make_tensor(verts, cols=3, dtype=torch.float32, device=device) # (V, 3)\n normals = _make_tensor(\n normals,\n cols=3,\n dtype=torch.float32,\n device=device,\n ) # (N, 3)\n verts_uvs = _make_tensor(\n verts_uvs,\n cols=2,\n dtype=torch.float32,\n device=device,\n ) # (T, 2)\n\n faces_verts_idx = _format_faces_indices(\n faces_verts_idx, verts.shape[0], device=device\n )\n\n # Repeat for normals and textures if present.\n if len(faces_normals_idx):\n faces_normals_idx = _format_faces_indices(\n faces_normals_idx, normals.shape[0], device=device, pad_value=-1\n )\n if len(faces_textures_idx):\n faces_textures_idx = _format_faces_indices(\n faces_textures_idx, verts_uvs.shape[0], device=device, pad_value=-1\n )\n if len(faces_materials_idx):\n faces_materials_idx = torch.tensor(\n faces_materials_idx, dtype=torch.int64, device=device\n )\n\n texture_atlas = None\n material_colors, texture_images = _load_materials(\n material_names,\n mtl_path,\n data_dir=data_dir,\n load_textures=load_textures,\n path_manager=path_manager,\n device=device,\n )\n\n if material_colors and not material_names:\n # usemtl was not present but single material was present in the .mtl file\n material_names.append(next(iter(material_colors.keys())))\n # replace all -1 by 0 material idx\n if torch.is_tensor(faces_materials_idx):\n faces_materials_idx.clamp_(min=0)\n\n if create_texture_atlas:\n # Using the images and properties from the\n # material file make a per face texture map.\n\n # Create an array of strings of material names for each face.\n # If faces_materials_idx == -1 then that face doesn't have a material.\n idx = faces_materials_idx.cpu().numpy()\n face_material_names = np.array(material_names)[idx] # (F,)\n face_material_names[idx == -1] = \"\"\n\n # Construct the atlas.\n texture_atlas = make_mesh_texture_atlas(\n material_colors,\n texture_images,\n face_material_names,\n faces_textures_idx,\n verts_uvs,\n texture_atlas_size,\n texture_wrap,\n )\n\n faces = _Faces(\n verts_idx=faces_verts_idx,\n normals_idx=faces_normals_idx,\n textures_idx=faces_textures_idx,\n materials_idx=faces_materials_idx,\n )\n aux = _Aux(\n normals=normals if len(normals) else None,\n verts_uvs=verts_uvs if len(verts_uvs) else None,\n material_colors=material_colors,\n texture_images=texture_images,\n texture_atlas=texture_atlas,\n )\n return verts, faces, aux\n\n\ndef save_obj(\n f: PathOrStr,\n verts,\n faces,\n decimal_places: Optional[int] = None,\n path_manager: Optional[PathManager] = None,\n *,\n verts_uvs: Optional[torch.Tensor] = None,\n faces_uvs: Optional[torch.Tensor] = None,\n texture_map: Optional[torch.Tensor] = None,\n) -> None:\n \"\"\"\n Save a mesh to an .obj file.\n\n Args:\n f: File (str or path) to which the mesh should be written.\n verts: FloatTensor of shape (V, 3) giving vertex coordinates.\n faces: LongTensor of shape (F, 3) giving faces.\n decimal_places: Number of decimal places for saving.\n path_manager: Optional PathManager for interpreting f if\n it is a str.\n verts_uvs: FloatTensor of shape (V, 2) giving the uv coordinate per vertex.\n faces_uvs: LongTensor of shape (F, 3) giving the index into verts_uvs for\n each vertex in the face.\n texture_map: FloatTensor of shape (H, W, 3) representing the texture map\n for the mesh which will be saved as an image. The values are expected\n to be in the range [0, 1],\n \"\"\"\n if len(verts) and (verts.dim() != 2 or verts.size(1) != 3):\n message = \"'verts' should either be empty or of shape (num_verts, 3).\"\n raise ValueError(message)\n\n if len(faces) and (faces.dim() != 2 or faces.size(1) != 3):\n message = \"'faces' should either be empty or of shape (num_faces, 3).\"\n raise ValueError(message)\n\n if faces_uvs is not None and (faces_uvs.dim() != 2 or faces_uvs.size(1) != 3):\n message = \"'faces_uvs' should either be empty or of shape (num_faces, 3).\"\n raise ValueError(message)\n\n if verts_uvs is not None and (verts_uvs.dim() != 2 or verts_uvs.size(1) != 2):\n message = \"'verts_uvs' should either be empty or of shape (num_verts, 2).\"\n raise ValueError(message)\n\n if texture_map is not None and (texture_map.dim() != 3 or texture_map.size(2) != 3):\n message = \"'texture_map' should either be empty or of shape (H, W, 3).\"\n raise ValueError(message)\n\n if path_manager is None:\n path_manager = PathManager()\n\n save_texture = all([t is not None for t in [faces_uvs, verts_uvs, texture_map]])\n output_path = Path(f)\n\n # Save the .obj file\n with _open_file(f, path_manager, \"w\") as f:\n if save_texture:\n # Add the header required for the texture info to be loaded correctly\n obj_header = \"\\nmtllib {0}.mtl\\nusemtl mesh\\n\\n\".format(output_path.stem)\n f.write(obj_header)\n _save(\n f,\n verts,\n faces,\n decimal_places,\n verts_uvs=verts_uvs,\n faces_uvs=faces_uvs,\n save_texture=save_texture,\n )\n\n # Save the .mtl and .png files associated with the texture\n if save_texture:\n image_path = output_path.with_suffix(\".png\")\n mtl_path = output_path.with_suffix(\".mtl\")\n if isinstance(f, str):\n # Back to str for iopath interpretation.\n image_path = str(image_path)\n mtl_path = str(mtl_path)\n\n # Save texture map to output folder\n # pyre-fixme[16] # undefined attribute cpu\n texture_map = texture_map.detach().cpu() * 255.0\n image = Image.fromarray(texture_map.numpy().astype(np.uint8))\n with _open_file(image_path, path_manager, \"wb\") as im_f:\n image.save(im_f)\n\n # Create .mtl file with the material name and texture map filename\n # TODO: enable material properties to also be saved.\n with _open_file(mtl_path, path_manager, \"w\") as f_mtl:\n lines = f\"newmtl mesh\\n\" f\"map_Kd {output_path.stem}.png\\n\"\n f_mtl.write(lines)\n\n\n# TODO (nikhilar) Speed up this function.\ndef _save(\n f,\n verts,\n faces,\n decimal_places: Optional[int] = None,\n *,\n verts_uvs: Optional[torch.Tensor] = None,\n faces_uvs: Optional[torch.Tensor] = None,\n save_texture: bool = False,\n) -> None:\n\n if len(verts) and (verts.dim() != 2 or verts.size(1) != 3):\n message = \"'verts' should either be empty or of shape (num_verts, 3).\"\n raise ValueError(message)\n\n if len(faces) and (faces.dim() != 2 or faces.size(1) != 3):\n message = \"'faces' should either be empty or of shape (num_faces, 3).\"\n raise ValueError(message)\n\n if not (len(verts) or len(faces)):\n warnings.warn(\"Empty 'verts' and 'faces' arguments provided\")\n return\n\n verts, faces = verts.cpu(), faces.cpu()\n\n lines = \"\"\n\n if len(verts):\n if decimal_places is None:\n float_str = \"%f\"\n else:\n float_str = \"%\" + \".%df\" % decimal_places\n\n V, D = verts.shape\n for i in range(V):\n vert = [float_str % verts[i, j] for j in range(D)]\n lines += \"v %s\\n\" % \" \".join(vert)\n\n if save_texture:\n if faces_uvs is not None and (faces_uvs.dim() != 2 or faces_uvs.size(1) != 3):\n message = \"'faces_uvs' should either be empty or of shape (num_faces, 3).\"\n raise ValueError(message)\n\n if verts_uvs is not None and (verts_uvs.dim() != 2 or verts_uvs.size(1) != 2):\n message = \"'verts_uvs' should either be empty or of shape (num_verts, 2).\"\n raise ValueError(message)\n\n # pyre-fixme[16] # undefined attribute cpu\n verts_uvs, faces_uvs = verts_uvs.cpu(), faces_uvs.cpu()\n\n # Save verts uvs after verts\n if len(verts_uvs):\n uV, uD = verts_uvs.shape\n for i in range(uV):\n uv = [float_str % verts_uvs[i, j] for j in range(uD)]\n lines += \"vt %s\\n\" % \" \".join(uv)\n\n if torch.any(faces >= verts.shape[0]) or torch.any(faces < 0):\n warnings.warn(\"Faces have invalid indices\")\n\n if len(faces):\n F, P = faces.shape\n for i in range(F):\n if save_texture:\n # Format faces as {verts_idx}/{verts_uvs_idx}\n face = [\n \"%d/%d\" % (faces[i, j] + 1, faces_uvs[i, j] + 1) for j in range(P)\n ]\n else:\n face = [\"%d\" % (faces[i, j] + 1) for j in range(P)]\n\n if i + 1 < F:\n lines += \"f %s\\n\" % \" \".join(face)\n\n elif i + 1 == F:\n # No newline at the end of the file.\n lines += \"f %s\" % \" \".join(face)\n\n f.write(lines)\n"
] |
[
[
"torch.any",
"numpy.array",
"torch.is_tensor",
"torch.tensor"
]
] |
fpsawicki/MLOps_Transformers
|
[
"988c73cd8d2d9b0f58c2a7d9ee03cdf12983c547"
] |
[
"src/models/lightning/train_model_lit.py"
] |
[
"import pytorch_lightning as pl\nimport torch\nfrom pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint\nfrom pytorch_lightning.loggers import WandbLogger\n\nfrom src.data.make_dataset_lit_wrapper import DBPediaDataModule\nfrom src.models.lightning.model_wrapper_lit import LightningTextNet\nfrom src.paths import MODELS_PATH\n\n\ndef train_model(config: dict):\n model = LightningTextNet(\n config['model']['layers'],\n config['model']['dropout'],\n config['model']['lr'],\n config['model']['optimizer']\n )\n datamodule = DBPediaDataModule(config)\n checkpoint = ModelCheckpoint(\n monitor='val_acc_epoch',\n dirpath=str(MODELS_PATH),\n filename=f\"{config['model']['name']}\",\n save_top_k=1\n )\n early_stopping = EarlyStopping(monitor='val_acc_epoch')\n logger = WandbLogger(name=config['model']['name'], project='mlops_distilbert')\n epochs = int(config['model']['epochs'])\n trainer = pl.Trainer(\n gpus=-1 if torch.cuda.is_available() else None,\n logger=logger,\n min_epochs=epochs // 2,\n max_epochs=epochs,\n callbacks=[early_stopping, checkpoint]\n )\n trainer.fit(model, datamodule=datamodule)\n"
] |
[
[
"torch.cuda.is_available"
]
] |
HuynhThanhQuan/graph-network
|
[
"e429a641e7baecad9765700cac580cfbdedbe1bd"
] |
[
"core/graph_evaluation.py"
] |
[
"import pandas as pd\nfrom graph import util\nfrom graph.core import constraint\n\n\nclass Evaluation:\n def __init__(self, graph_parser):\n self.graph_parser = graph_parser\n self.num_error_logs = 0\n self.num_unique_stacktrace = 0\n self.percent_unique_stacktrace = 0\n self.unique_stacktrace = None\n self.match_exactly = False\n self.representation = None\n\n def fit(self, error_logs):\n self.num_error_logs = len(error_logs)\n assert self.num_error_logs > 0, \"No input data found\"\n list_stacktrace, valid_indices, exclude = self.graph_parser.validate(error_logs)\n assert len(list_stacktrace) > 0, \"Input Data is not valid\"\n self.unique_stacktrace = set([' '.join(stacktrace) for stacktrace in list_stacktrace])\n self.num_unique_stacktrace = len(self.unique_stacktrace)\n self.percent_unique_stacktrace = self.num_unique_stacktrace / self.num_error_logs\n self.match_exactly = self.num_unique_stacktrace == 1\n self.representation = util.find_stacktrace_representation(list_stacktrace)\n\n @staticmethod\n def generate_evaluation_report(graph_parser, valid_error_logs, cluster_report):\n clusterid_docids_map = cluster_report.clusterid_docids_map\n clusterid_evaluation_map = {}\n for clusterid, docids in clusterid_docids_map.items():\n error_log_ids = cluster_report.clusterid_errorlogids_map[clusterid] if \\\n cluster_report.clusterid_errorlogids_map is not None else None\n sub_error_logs = [valid_error_logs[docid] for docid in docids]\n evaluation = Evaluation(graph_parser=graph_parser)\n evaluation.fit(pd.Series(sub_error_logs))\n evaluation.docids = docids\n evaluation.error_log_ids = error_log_ids\n clusterid_evaluation_map[clusterid] = evaluation\n return clusterid_evaluation_map\n\n @staticmethod\n def validate(tokens_list):\n result = []\n result_indices = []\n exclude = []\n for i, tokens in enumerate(tokens_list):\n valid_tokens = [token for token in tokens if token not in constraint.Constraint.VALUES]\n if len(valid_tokens) > 0:\n result_indices.append(i)\n result.append(valid_tokens)\n else:\n exclude.append(i)\n return result, result_indices, exclude\n"
] |
[
[
"pandas.Series"
]
] |
schottkey7/deep-reinforcement-learning
|
[
"92c97fadbb5b95caa3fd3813a0757debc2c2747a"
] |
[
"p1_navigation/train.py"
] |
[
"from unityagents import UnityEnvironment\nfrom collections import namedtuple, deque\nfrom time import time\n\nfrom model import QNetwork\nfrom agent import Agent\n\nimport numpy as np\nimport random\n\nimport matplotlib.pyplot as plt\n\nimport torch\n\n\ndef train(\n env,\n agent,\n brain_name,\n n_episodes=2000,\n max_t=1000,\n eps_start=1.0,\n eps_end=0.01,\n eps_decay=0.995,\n):\n \"\"\"Deep Q-Learning.\n\n Params\n ======\n env (UnityEnvironment): instantiated unity environment\n agent: agent for the network\n brain_name (str): brain name\n n_episodes (int): maximum number of training episodes\n max_t (int): maximum number of timesteps per episode\n eps_start (float): starting value of epsilon, for epsilon-greedy action selection\n eps_end (float): minimum value of epsilon\n eps_decay (float): multiplicative factor (per episode) for decreasing epsilon\n \"\"\"\n\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n durations = []\n\n eps = eps_start # initialize epsilon\n for i_ep in range(1, n_episodes + 1):\n\n start = time()\n\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n\n for t in range(max_t):\n action = agent.act(state, eps) # select an action\n env_info = env.step(action)[\n brain_name\n ] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break\n\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n durations.append(time() - start)\n\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n\n template = \"Episode {}\\tAverage Score: {:.2f}\\tAverage Time: {:.2f}s\"\n\n print(\n (\"\\r\" + template).format(i_ep, np.mean(scores_window), np.mean(durations)),\n end=\"\",\n )\n if i_ep % 100 == 0:\n print(\n (\"\\r\" + template).format(\n i_ep, np.mean(scores_window), np.mean(durations)\n )\n )\n torch.save(agent.qnetwork_local.state_dict(), \"checkpoint.pth\")\n\n if np.mean(scores_window) > 13.0:\n print(\n \"\\nProject solved in {:d} episodes!\\tAverage Score: {:.2f}\\tAverage Time {:.2f}\".format(\n i_ep - 100, np.mean(scores_window), np.mean(durations)\n )\n )\n torch.save(agent.qnetwork_local.state_dict(), \"checkpoint.pth\")\n break\n\n if np.mean(scores_window) >= 200.0:\n print(\n \"\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}\\tAverage Time {:.2f}\".format(\n i_ep - 100, np.mean(scores_window), np.mean(durations)\n )\n )\n torch.save(agent.qnetwork_local.state_dict(), \"checkpoint.pth\")\n break\n return scores\n\n\ndef main():\n # Initialise the unity environment\n env = UnityEnvironment(file_name=\"Banana.app\")\n\n # get the default brain\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n\n # reset the environment\n env_info = env.reset(train_mode=True)[brain_name]\n\n # number of actions\n action_size = brain.vector_action_space_size\n\n # examine the state space\n state = env_info.vector_observations[0]\n state_size = len(state)\n\n print(\"\\n--> Training\\n\")\n\n agent = Agent(state_size=state_size, action_size=action_size, seed=0)\n scores = train(env=env, agent=agent, brain_name=brain_name)\n\n # Plot the scores after training\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel(\"Score\")\n plt.xlabel(\"Episode #\")\n plt.savefig(\"training.png\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] |
micahprice/codenames_ai
|
[
"97176d584b908ea44410fd29c62770703377c9d4"
] |
[
"operative.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom sklearn.neighbors import NearestNeighbors\nfrom itertools import permutations\n\nclass OperativeAI:\n\n def __init__(self, embed_module, game_words=None, metric='cosine',):\n self.embed_module = embed_module\n self.metric = metric\n self.game_words = game_words\n\n def _get_embeddings_list(self, game_words):\n with tf.Session() as sess:\n sess.run([tf.global_variables_initializer(), tf.tables_initializer()])\n embeddings = sess.run([self.embed_module(game_words)])[0]\n return np.array(embeddings).tolist()\n\n def _get_nearest(self, hint_embedding, embeddings, game_words, k):\n df = pd.DataFrame(embeddings, index=game_words)\n nn = NearestNeighbors(metric=self.metric).fit(df)\n distance, idx = nn.kneighbors(hint_embedding, n_neighbors=k)\n return distance, idx\n\n def remove_words(self, words):\n if isinstance(words, str): # if one word\n self.game_words.remove(words)\n else:\n for word in words:\n self.game_words.remove(word)\n\n def recommend_guess(hint, game_words=None, k=None):\n if game_words is None:\n game_words = self.game_words\n else:\n self.game_words = game_words\n k = len(game_words) if k is None else k\n\n print('\\nGame Words: {}'.format(game_words))\n print('Hint: {}'.format(hint))\n words = [hint] + game_words\n\n embeddings = self._get_embeddings_list(words)\n hint_embedding, embeddings = embeddings[0], embeddings[1:]\n distance, idx = self._get_nearest(hint_embedding, embeddings, game_words, k)\n print('Recommendations:')\n for i, d in zip(idx, dist):\n print(\"{:>}: {:.4f}\".format(game_words[i], d)) # might need dist[0]\n\n def ngram_recommend_guess(hint, n_gram=2, game_words=None, k=15):\n \"\"\"Ngram is a misnomer here. Oh well.\"\"\"\n # only works with USE\n # use vector addition for Word2Vec?\n if game_words is None:\n game_words = self.game_words\n else:\n self.game_words = game_words\n\n print('\\nGame Words: {}'.format(game_words))\n print('Hint: {}'.format(hint))\n perm = permutations(game_words, n_gram)\n n_grams = [\" \".join(x) for x in perm]\n words = [hint] + n_grams\n\n embeddings = self._get_embeddings_list(words)\n hint_embedding, embeddings = embeddings[0], embeddings[1:]\n distance, idx = self._get_nearest(hint_embedding, embeddings, n_grams, k)\n print('Recommendations:')\n for i, d in zip(idx, dist):\n print(\"{:>}: {:.4f}\".format(game_words[i], d)) # might need dist[0]\n"
] |
[
[
"pandas.DataFrame",
"tensorflow.global_variables_initializer",
"sklearn.neighbors.NearestNeighbors",
"tensorflow.Session",
"numpy.array",
"tensorflow.tables_initializer"
]
] |
FlyingQianMM/models
|
[
"1fac32c8062dfeaf8e2f69bcaed63bdadff69303"
] |
[
"PaddleCV/PaddleDetection/tools/train.py"
] |
[
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\nimport numpy as np\nimport datetime\nfrom collections import deque\n\n\ndef set_paddle_flags(**kwargs):\n for key, value in kwargs.items():\n if os.environ.get(key, None) is None:\n os.environ[key] = str(value)\n\n\n# NOTE(paddle-dev): All of these flags should be set before\n# `import paddle`. Otherwise, it would not take any effect.\nset_paddle_flags(\n FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory\n)\n\nfrom paddle import fluid\n\nfrom ppdet.experimental import mixed_precision_context\nfrom ppdet.core.workspace import load_config, merge_config, create\nfrom ppdet.data.data_feed import create_reader\n\nfrom ppdet.utils.cli import print_total_cfg\nfrom ppdet.utils import dist_utils\nfrom ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results\nfrom ppdet.utils.stats import TrainingStats\nfrom ppdet.utils.cli import ArgsParser\nfrom ppdet.utils.check import check_gpu\nimport ppdet.utils.checkpoint as checkpoint\nfrom ppdet.modeling.model_input import create_feed\n\nimport logging\nFORMAT = '%(asctime)s-%(levelname)s: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n env = os.environ\n FLAGS.dist = 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env\n if FLAGS.dist:\n trainer_id = int(env['PADDLE_TRAINER_ID'])\n import random\n local_seed = (99 + trainer_id)\n random.seed(local_seed)\n np.random.seed(local_seed)\n\n cfg = load_config(FLAGS.config)\n if 'architecture' in cfg:\n main_arch = cfg.architecture\n else:\n raise ValueError(\"'architecture' not specified in config file.\")\n\n merge_config(FLAGS.opt)\n\n if 'log_iter' not in cfg:\n cfg.log_iter = 20\n\n # check if set use_gpu=True in paddlepaddle cpu version\n check_gpu(cfg.use_gpu)\n if not FLAGS.dist or trainer_id == 0:\n print_total_cfg(cfg)\n\n if cfg.use_gpu:\n devices_num = fluid.core.get_cuda_device_count()\n else:\n devices_num = int(os.environ.get('CPU_NUM', 1))\n\n if 'train_feed' not in cfg:\n train_feed = create(main_arch + 'TrainFeed')\n else:\n train_feed = create(cfg.train_feed)\n\n if FLAGS.eval:\n if 'eval_feed' not in cfg:\n eval_feed = create(main_arch + 'EvalFeed')\n else:\n eval_feed = create(cfg.eval_feed)\n\n if 'FLAGS_selected_gpus' in env:\n device_id = int(env['FLAGS_selected_gpus'])\n else:\n device_id = 0\n place = fluid.CUDAPlace(device_id) if cfg.use_gpu else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n lr_builder = create('LearningRate')\n optim_builder = create('OptimizerBuilder')\n\n # build program\n startup_prog = fluid.Program()\n train_prog = fluid.Program()\n with fluid.program_guard(train_prog, startup_prog):\n with fluid.unique_name.guard():\n model = create(main_arch)\n train_pyreader, feed_vars = create_feed(train_feed)\n\n if FLAGS.fp16:\n assert (getattr(model.backbone, 'norm_type', None)\n != 'affine_channel'), \\\n '--fp16 currently does not support affine channel, ' \\\n ' please modify backbone settings to use batch norm'\n\n with mixed_precision_context(FLAGS.loss_scale, FLAGS.fp16) as ctx:\n train_fetches = model.train(feed_vars)\n\n loss = train_fetches['loss']\n if FLAGS.fp16:\n loss *= ctx.get_loss_scale_var()\n lr = lr_builder()\n optimizer = optim_builder(lr)\n optimizer.minimize(loss)\n if FLAGS.fp16:\n loss /= ctx.get_loss_scale_var()\n\n # parse train fetches\n train_keys, train_values, _ = parse_fetches(train_fetches)\n train_values.append(lr)\n\n if FLAGS.eval:\n eval_prog = fluid.Program()\n with fluid.program_guard(eval_prog, startup_prog):\n with fluid.unique_name.guard():\n model = create(main_arch)\n eval_pyreader, feed_vars = create_feed(eval_feed)\n fetches = model.eval(feed_vars)\n eval_prog = eval_prog.clone(True)\n\n eval_reader = create_reader(eval_feed, args_path=FLAGS.dataset_dir)\n eval_pyreader.decorate_sample_list_generator(eval_reader, place)\n\n # parse eval fetches\n extra_keys = []\n if cfg.metric == 'COCO':\n extra_keys = ['im_info', 'im_id', 'im_shape']\n if cfg.metric == 'VOC':\n extra_keys = ['gt_box', 'gt_label', 'is_difficult']\n if cfg.metric == 'WIDERFACE':\n extra_keys = ['im_id', 'im_shape', 'gt_box']\n eval_keys, eval_values, eval_cls = parse_fetches(fetches, eval_prog,\n extra_keys)\n\n # compile program for multi-devices\n build_strategy = fluid.BuildStrategy()\n build_strategy.fuse_all_optimizer_ops = False\n build_strategy.fuse_elewise_add_act_ops = True\n # only enable sync_bn in multi GPU devices\n sync_bn = getattr(model.backbone, 'norm_type', None) == 'sync_bn'\n build_strategy.sync_batch_norm = sync_bn and devices_num > 1 \\\n and cfg.use_gpu\n\n exec_strategy = fluid.ExecutionStrategy()\n # iteration number when CompiledProgram tries to drop local execution scopes.\n # Set it to be 1 to save memory usages, so that unused variables in\n # local execution scopes can be deleted after each iteration.\n exec_strategy.num_iteration_per_drop_scope = 1\n if FLAGS.dist:\n dist_utils.prepare_for_multi_process(exe, build_strategy, startup_prog,\n train_prog)\n exec_strategy.num_threads = 1\n\n exe.run(startup_prog)\n compiled_train_prog = fluid.CompiledProgram(train_prog).with_data_parallel(\n loss_name=loss.name,\n build_strategy=build_strategy,\n exec_strategy=exec_strategy)\n\n if FLAGS.eval:\n compiled_eval_prog = fluid.compiler.CompiledProgram(eval_prog)\n\n fuse_bn = getattr(model.backbone, 'norm_type', None) == 'affine_channel'\n\n ignore_params = cfg.finetune_exclude_pretrained_params \\\n if 'finetune_exclude_pretrained_params' in cfg else []\n\n start_iter = 0\n if FLAGS.resume_checkpoint:\n checkpoint.load_checkpoint(exe, train_prog, FLAGS.resume_checkpoint)\n start_iter = checkpoint.global_step()\n elif cfg.pretrain_weights and fuse_bn and not ignore_params:\n checkpoint.load_and_fusebn(exe, train_prog, cfg.pretrain_weights)\n elif cfg.pretrain_weights:\n checkpoint.load_params(\n exe, train_prog, cfg.pretrain_weights, ignore_params=ignore_params)\n\n train_reader = create_reader(train_feed, (cfg.max_iters - start_iter) *\n devices_num, FLAGS.dataset_dir)\n train_pyreader.decorate_sample_list_generator(train_reader, place)\n\n # whether output bbox is normalized in model output layer\n is_bbox_normalized = False\n if hasattr(model, 'is_bbox_normalized') and \\\n callable(model.is_bbox_normalized):\n is_bbox_normalized = model.is_bbox_normalized()\n\n # if map_type not set, use default 11point, only use in VOC eval\n map_type = cfg.map_type if 'map_type' in cfg else '11point'\n\n train_stats = TrainingStats(cfg.log_smooth_window, train_keys)\n train_pyreader.start()\n start_time = time.time()\n end_time = time.time()\n\n cfg_name = os.path.basename(FLAGS.config).split('.')[0]\n save_dir = os.path.join(cfg.save_dir, cfg_name)\n time_stat = deque(maxlen=cfg.log_smooth_window)\n best_box_ap_list = [0.0, 0] #[map, iter]\n\n # use tb-paddle to log data\n if FLAGS.use_tb:\n from tb_paddle import SummaryWriter\n tb_writer = SummaryWriter(FLAGS.tb_log_dir)\n tb_loss_step = 0\n tb_mAP_step = 0\n\n for it in range(start_iter, cfg.max_iters):\n start_time = end_time\n end_time = time.time()\n time_stat.append(end_time - start_time)\n time_cost = np.mean(time_stat)\n eta_sec = (cfg.max_iters - it) * time_cost\n eta = str(datetime.timedelta(seconds=int(eta_sec)))\n outs = exe.run(compiled_train_prog, fetch_list=train_values)\n stats = {k: np.array(v).mean() for k, v in zip(train_keys, outs[:-1])}\n\n # use tb-paddle to log loss\n if FLAGS.use_tb:\n if it % cfg.log_iter == 0:\n for loss_name, loss_value in stats.items():\n tb_writer.add_scalar(loss_name, loss_value, tb_loss_step)\n tb_loss_step += 1\n\n train_stats.update(stats)\n logs = train_stats.log()\n if it % cfg.log_iter == 0 and (not FLAGS.dist or trainer_id == 0):\n strs = 'iter: {}, lr: {:.6f}, {}, time: {:.3f}, eta: {}'.format(\n it, np.mean(outs[-1]), logs, time_cost, eta)\n logger.info(strs)\n\n if (it > 0 and it % cfg.snapshot_iter == 0 or it == cfg.max_iters - 1) \\\n and (not FLAGS.dist or trainer_id == 0):\n save_name = str(it) if it != cfg.max_iters - 1 else \"model_final\"\n checkpoint.save(exe, train_prog, os.path.join(save_dir, save_name))\n\n if FLAGS.eval:\n # evaluation\n results = eval_run(exe, compiled_eval_prog, eval_pyreader,\n eval_keys, eval_values, eval_cls)\n resolution = None\n if 'mask' in results[0]:\n resolution = model.mask_head.resolution\n box_ap_stats = eval_results(\n results, eval_feed, cfg.metric, cfg.num_classes, resolution,\n is_bbox_normalized, FLAGS.output_eval, map_type)\n\n # use tb_paddle to log mAP\n if FLAGS.use_tb:\n tb_writer.add_scalar(\"mAP\", box_ap_stats[0], tb_mAP_step)\n tb_mAP_step += 1\n\n if box_ap_stats[0] > best_box_ap_list[0]:\n best_box_ap_list[0] = box_ap_stats[0]\n best_box_ap_list[1] = it\n checkpoint.save(exe, train_prog,\n os.path.join(save_dir, \"best_model\"))\n logger.info(\"Best test box ap: {}, in iter: {}\".format(\n best_box_ap_list[0], best_box_ap_list[1]))\n\n train_pyreader.reset()\n\n\nif __name__ == '__main__':\n parser = ArgsParser()\n parser.add_argument(\n \"-r\",\n \"--resume_checkpoint\",\n default=None,\n type=str,\n help=\"Checkpoint path for resuming training.\")\n parser.add_argument(\n \"--fp16\",\n action='store_true',\n default=False,\n help=\"Enable mixed precision training.\")\n parser.add_argument(\n \"--loss_scale\",\n default=8.,\n type=float,\n help=\"Mixed precision training loss scale.\")\n parser.add_argument(\n \"--eval\",\n action='store_true',\n default=False,\n help=\"Whether to perform evaluation in train\")\n parser.add_argument(\n \"--output_eval\",\n default=None,\n type=str,\n help=\"Evaluation directory, default is current directory.\")\n parser.add_argument(\n \"-d\",\n \"--dataset_dir\",\n default=None,\n type=str,\n help=\"Dataset path, same as DataFeed.dataset.dataset_dir\")\n parser.add_argument(\n \"--use_tb\",\n type=bool,\n default=False,\n help=\"whether to record the data to Tensorboard.\")\n parser.add_argument(\n '--tb_log_dir',\n type=str,\n default=\"tb_log_dir/scalar\",\n help='Tensorboard logging directory for scalar.')\n FLAGS = parser.parse_args()\n main()\n"
] |
[
[
"numpy.array",
"numpy.mean",
"numpy.random.seed"
]
] |
j-scharrenbach/Trajectron-plus-plus
|
[
"37040ca6e3f386c80ab39fbb4aa9984915c94813"
] |
[
"trajectron/visualization/visualization.py"
] |
[
"from utils import prediction_output_to_trajectories\nfrom scipy import linalg\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport matplotlib.patheffects as pe\nimport numpy as np\nimport seaborn as sns\n\n\ndef plot_trajectories(ax,\n prediction_dict,\n histories_dict,\n futures_dict,\n line_alpha=0.7,\n line_width=0.2,\n edge_width=2,\n circle_edge_width=0.5,\n node_circle_size=0.3,\n batch_num=0,\n kde=False):\n\n cmap = ['k', 'b', 'y', 'g', 'r']\n\n for node in histories_dict:\n history = histories_dict[node]\n future = futures_dict[node]\n predictions = prediction_dict[node]\n\n if np.isnan(history[-1]).any():\n continue\n\n ax.plot(history[:, 0], history[:, 1], 'k--')\n\n for sample_num in range(prediction_dict[node].shape[1]):\n\n if kde and predictions.shape[1] >= 50:\n line_alpha = 0.2\n for t in range(predictions.shape[2]):\n sns.kdeplot(predictions[batch_num, :, t, 0], predictions[batch_num, :, t, 1],\n ax=ax, shade=True, shade_lowest=False,\n color=np.random.choice(cmap), alpha=0.8)\n\n ax.plot(predictions[batch_num, sample_num, :, 0], predictions[batch_num, sample_num, :, 1],\n color=cmap[node.type.value],\n linewidth=line_width, alpha=line_alpha)\n\n ax.plot(future[:, 0],\n future[:, 1],\n 'w--',\n path_effects=[pe.Stroke(linewidth=edge_width, foreground='k'), pe.Normal()])\n\n # Current Node Position\n circle = plt.Circle((history[-1, 0],\n history[-1, 1]),\n node_circle_size,\n facecolor='g',\n edgecolor='k',\n lw=circle_edge_width,\n zorder=3)\n ax.add_artist(circle)\n\n ax.axis('equal')\n\n\ndef visualize_prediction(ax,\n prediction_output_dict,\n dt,\n max_hl,\n ph,\n robot_node=None,\n map=None,\n **kwargs):\n\n prediction_dict, histories_dict, futures_dict = prediction_output_to_trajectories(prediction_output_dict,\n dt,\n max_hl,\n ph,\n map=map)\n\n assert(len(prediction_dict.keys()) <= 1)\n if len(prediction_dict.keys()) == 0:\n return\n ts_key = list(prediction_dict.keys())[0]\n\n prediction_dict = prediction_dict[ts_key]\n histories_dict = histories_dict[ts_key]\n futures_dict = futures_dict[ts_key]\n\n if map is not None:\n ax.imshow(map.as_image(), origin='lower', alpha=0.5)\n plot_trajectories(ax, prediction_dict, histories_dict, futures_dict, *kwargs)\n\n\ndef visualize_distribution(ax,\n prediction_distribution_dict,\n map=None,\n pi_threshold=0.05,\n **kwargs):\n if map is not None:\n ax.imshow(map.as_image(), origin='lower', alpha=0.5)\n\n for node, pred_dist in prediction_distribution_dict.items():\n if pred_dist.mus.shape[:2] != (1, 1):\n return\n\n means = pred_dist.mus.squeeze().cpu().numpy()\n covs = pred_dist.get_covariance_matrix().squeeze().cpu().numpy()\n pis = pred_dist.pis_cat_dist.probs.squeeze().cpu().numpy()\n\n for timestep in range(means.shape[0]):\n for z_val in range(means.shape[1]):\n mean = means[timestep, z_val]\n covar = covs[timestep, z_val]\n pi = pis[timestep, z_val]\n\n if pi < pi_threshold:\n continue\n\n v, w = linalg.eigh(covar)\n v = 2. * np.sqrt(2.) * np.sqrt(v)\n u = w[0] / linalg.norm(w[0])\n\n # Plot an ellipse to show the Gaussian component\n angle = np.arctan(u[1] / u[0])\n angle = 180. * angle / np.pi # convert to degrees\n ell = patches.Ellipse(mean, v[0], v[1], 180. + angle, color='blue' if node.type.name == 'VEHICLE' else 'orange')\n ell.set_edgecolor(None)\n ell.set_clip_box(ax.bbox)\n ell.set_alpha(pi/10)\n ax.add_artist(ell)\n"
] |
[
[
"matplotlib.patheffects.Normal",
"matplotlib.patches.Ellipse",
"numpy.sqrt",
"numpy.arctan",
"numpy.random.choice",
"numpy.isnan",
"matplotlib.pyplot.Circle",
"scipy.linalg.eigh",
"scipy.linalg.norm",
"matplotlib.patheffects.Stroke"
]
] |
theowu23451/DRAM
|
[
"5e64e9527dc692ef31049a94fb9861f2038e219c"
] |
[
"model/datasets.py"
] |
[
"import os\nimport posixpath\nimport utils\nimport struct\n\nfrom PIL import Image\nimport numpy as np\nimport tensorflow as tf\n\nfrom config import Config\n\nclass MNIST(object):\n \"\"\"\n Object to download and unzip MNIST dataset\n to local destination. \n\n Call with get_dataset(), returning training\n and testing datasets.\n \"\"\"\n def __init__(self, config):\n self.batch_size = config.batch_size\n self.mnist_folder = config.data_path\n\n def download_mnist(self, path):\n\n utils.make_dir(path)\n\n # file download information\n url = 'http://yann.lecun.com/exdb/mnist'\n\n filenames = [\"train-images-idx3-ubyte.gz\", \n \"train-labels-idx1-ubyte.gz\",\n \"t10k-images-idx3-ubyte.gz\",\n \"t10k-labels-idx1-ubyte.gz\"]\n\n expected_bytes = [9912422, 28881, 1648877, 4542]\n\n for filename, byte in zip(filenames, expected_bytes):\n download_url = posixpath.join(url, filename)\n local_path = os.path.join(path, filename)\n utils.download_file(download_url, local_path, byte, unzip=True)\n\n\n def create_data(self, path, dataset, flatten=True):\n if dataset != 'train' and dataset != 't10k':\n raise NameError(\"not a usable dataset: use 'train' or 't10k'\")\n if not os.path.exists(path):\n raise NameError(\"nonexistent file.\")\n\n label_file = os.path.join(path, dataset + '-labels-idx1-ubyte')\n with open(label_file, 'rb') as file:\n _, length = struct.unpack(\">II\", file.read(8))\n labels = np.fromfile(file, dtype=np.int8)\n\n \"\"\"For each image, correct labels to 1 and all others to 0\"\"\"\n new_labels = np.zeros((length, 10))\n new_labels[np.arange(length), labels] = 1\n\n img_file = os.path.join(path, dataset + '-images-idx3-ubyte')\n with open(img_file, 'rb') as file:\n _, length, rows, cols = struct.unpack(\">IIII\", file.read(16))\n imgs = np.fromfile(file, dtype=np.uint8)\n imgs.reshape(length, rows, cols)\n imgs = imgs.astype(np.float32) / 255.0\n if flatten:\n imgs = imgs.reshape([length, -1])\n\n return (imgs, new_labels)\n\n def read_mnist(self, path, flatten=True, num_training=55000):\n \"\"\" \n Download file, then read file information.\n return tuples of numpy arrays in format \n (imgs, labels).\n\n Split training set based on num_training, leaving rest as validation set.\n \"\"\"\n self.download_mnist(path)\n\n # Process training data\n (imgs, labels) = self.create_data(path, 'train', flatten)\n\n #Shuffle training set.\n indices = np.random.permutation(labels.shape[0])\n train_idx, val_idx = indices[:num_training], indices[num_training:]\n\n train_set, train_labels = imgs[train_idx, :], labels[train_idx, :]\n val_set, val_labels = imgs[val_idx, :], labels[val_idx, :]\n\n # Process test data\n test = self.create_data(path, 't10k', flatten)\n return (train_set, train_labels), (val_set, val_labels), test\n\n def get_dataset(self):\n \"\"\" \n Given batch_size, returns \n Dataset object from mnist data\n with batch sizes of input batch_size\n \"\"\"\n\n train, val, test = self.read_mnist(self.mnist_folder)\n\n # Create tf Datasets for each.\n train_data = utils.convert_to_dataset(train, self.batch_size)\n\n test_data = utils.convert_to_dataset(test, self.batch_size)\n\n return train_data, test_data\n\nclass Camelyon():\n \"\"\"\n Object to read Camelyon16 image data from local destination. \n\n Reads .jpg image files, so original .tif format of WSI must be\n converted to .jpg. Tiling of original image should be done to \n make computation feasible.\n\n Call with get_dataset() to obtain appropraite train and test datasets. \n \"\"\"\n def __init__(self, config):\n self.batch_size = config.batch_size\n self.camelyon_folder = config.data_path\n self.img_h = config.height\n self.img_w = config.width\n\n def _parse_function(self, filename, label):\n image_string = tf.read_file(filename)\n image_decoded = tf.image.decode_jpeg(image_string)\n image_resized = tf.image.resize_images(image_decoded, [self.img_h, self.img_w])\n return image_resized, label\n\n def _get_label(self, filename):\n if filename.split('_', 1)[0] == 'normal':\n return 0\n elif filename.split('_', 1)[0] == 'tumor':\n return 1\n\n def get_dataset(self):\n data_dir = self.camelyon_folder\n if not os.path.exists(data_dir):\n utils.make_dir(data_dir)\n\n assert os.path.exists(data_dir)\n\n file_list = [f for (_, _, f) in os.walk(data_dir) if f[-4:] == '.jpg']\n label_list = [self._get_label(filename) for filename in file_list]\n\n filenames = tf.constant(file_list)\n labels = tf.constant(label_list)\n\n dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))\n dataset = dataset.map(self._parse_function)\n return dataset\n"
] |
[
[
"numpy.fromfile",
"tensorflow.constant",
"tensorflow.read_file",
"tensorflow.image.resize_images",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.arange",
"numpy.random.permutation",
"numpy.zeros",
"tensorflow.image.decode_jpeg"
]
] |
Sharabesh/trading
|
[
"6b58fd675c539818bd89b4a311f6b4a8b7953ccf"
] |
[
"backend/trading/dividends.py"
] |
[
"from config import *\n\npolygon = api.polygon\n\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime, timedelta\nimport calendar\n\n\ndef fetch_dividends(sym):\n \"\"\"Given a symbol returns a dataframe of dividends\"\"\"\n symbol_dividends = polygon.dividends(sym)\n fields = ['amount', 'declaredDate', 'exDate', 'indicated', 'paymentDate', 'qualified', 'recordDate', 'symbol',\n 'type']\n # Initialize general data structure\n output = {}\n for field in fields:\n output[field] = []\n\n for dividend in symbol_dividends:\n for field in fields:\n converted = dividend._raw\n try:\n output[field].append(converted[field])\n except:\n output[field].append(np.nan)\n\n df = pd.DataFrame.from_dict(output)\n for date_field in [x for x in fields if 'Date' in x]:\n df[date_field] = pd.to_datetime(df[date_field])\n return df\n\n\ndef get_current_positions():\n \"\"\"Get current positions returns a dataframe with columns:\n Date Purchased,\n Purchase Price,\n Quantity Held,\n Ex-dividend date\n Current Price,\n Symbol\n \"\"\"\n positions = api.list_positions()\n target_columns = ['avg_entry_price', 'current_price', 'symbol', 'qty']\n\n output = {x: [] for x in target_columns}\n for elem in positions:\n for col in target_columns:\n output[col].append(elem._raw[col])\n\n return pd.DataFrame.from_dict(output)\n\n\ndef get_next_weeks_dates(weeks = 2):\n \"\"\"\n :return: a list of dates of the form [2019-Jul-22, ...] for the next week for NASDAQ parsing\n \"\"\"\n def convert(timestamp):\n conversions = {\n \"01\": \"Jan\",\n \"02\": \"Feb\",\n \"03\": \"Mar\",\n \"04\": \"Apr\",\n \"05\": \"May\",\n \"06\": \"Jun\",\n \"07\": \"Jul\",\n \"08\": \"Aug\",\n \"09\": \"Sep\",\n \"10\": \"Oct\",\n \"11\": \"Nov\",\n \"12\": \"Dec\"\n }\n beginning_time = str(timestamp).split()[0].split(\"-\")\n return f\"{beginning_time[0]}-{conversions[beginning_time[1]]}-{beginning_time[2]}\"\n\n output = []\n i = 0\n num_weekends = 0\n while True:\n target_date = datetime.now() + timedelta(i)\n\n # Ignore weekends\n if target_date.weekday() in {5,6}:\n num_weekends += 1\n if num_weekends == weeks:\n break\n else:\n i += 1 if target_date.weekday() == 6 else 2\n continue\n\n output.append(convert(target_date))\n i += 1\n return output\n\n\ndef filter_by_recovery(dividends):\n \"\"\"\n Takes a dataframe and returns those stock symbols who have historically recovered well\n after dividend distributions\n :param dividends:\n :return:\n \"\"\"\n # TODO: Write this entire method;\n return dividends\n\ndef load_dividend_targets(weeks = 2):\n # Get 1000 top dividend yield stocks\n dividend_data = pd.read_csv(\"../notebooks/dividend-stocks.csv\")\n target_symbols = set(dividend_data.Symbol)\n\n # Get one months worth of future dividends\n DIVIDEND_BASE_URL = \"https://www.nasdaq.com/dividend-stocks/dividend-calendar.aspx?date=\"\n future_dividend_dates = pd.concat([\n pd.read_html(f\"{DIVIDEND_BASE_URL + date}\")[0] for date in get_next_weeks_dates(weeks)\n ])\n future_dividend_dates = future_dividend_dates.reset_index()\n future_dividend_dates['symbol'] = future_dividend_dates[\"Company (Symbol)\"].str.extract(r\"\\s\\(([A-Z]+)\\)$\")\n\n # Filter by top 1000 dividends of upcoming distributions\n target_dividends = future_dividend_dates[future_dividend_dates['symbol'].isin(target_symbols)]\n\n # Evaluate ability of each stock to \"bounce back after each distribution\"\n # TODO: Write this evaluation\n target_dividends = filter_by_recovery(target_dividends)\n\n # Of all stocks that reasonably bounce back, sort by maximal percentage returns\n prices = dividend_data[['Symbol', 'LastSale']]\n\n\n #TODO Current error is that the symbols in target_dividends are not in dividend data???\n price_and_dividend_data = pd.merge(target_dividends, prices, left_on='symbol', right_on='Symbol').drop(columns=['Symbol', 'Company (Symbol)', 'Record Date', 'Announcement Date'], axis =1)\n\n # Note this factors in quarterly vs monthly dividends\n price_and_dividend_data['yield'] = price_and_dividend_data['Dividend'] / price_and_dividend_data['LastSale']\n\n return price_and_dividend_data.sort_values(by=\"yield\", ascending=False)\n\n\n\n\ndef manage_positions():\n \"\"\"\n Runs continuously and manages existing positions trying to sell\n as soon as a stock is past it's ex-dividend date and has value\n greater than or equal to it's current value\n :return:\n \"\"\"\n done = None\n while True:\n clock = api.get_clock()\n now = clock.timestamp\n if clock.is_open() and done != now.strftime(\"%Y-%m-%d\"):\n # TODO: Execute trades\n positions = get_current_positions()\n\n\n\n"
] |
[
[
"pandas.merge",
"pandas.read_csv",
"pandas.to_datetime",
"pandas.read_html",
"pandas.DataFrame.from_dict"
]
] |
raketenolli/scipy
|
[
"ecad19c6d82fc9a7fa53a9d63bd5e2c532763aae"
] |
[
"scipy/stats/tests/test_distributions.py"
] |
[
"\"\"\" Test functions for stats module\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport warnings\nimport re\nimport sys\nimport pickle\nimport os\n\nfrom numpy.testing import (assert_equal, assert_array_equal,\n assert_almost_equal, assert_array_almost_equal,\n assert_allclose, assert_, assert_warns)\nimport pytest\nfrom pytest import raises as assert_raises\nfrom scipy._lib._numpy_compat import suppress_warnings\n\nimport numpy\nimport numpy as np\nfrom numpy import typecodes, array\nfrom numpy.lib.recfunctions import rec_append_fields\nfrom scipy import special\nfrom scipy.integrate import IntegrationWarning\nimport scipy.stats as stats\nfrom scipy.stats._distn_infrastructure import argsreduce\nimport scipy.stats.distributions\n\nfrom scipy.special import xlogy\nfrom .test_continuous_basic import distcont\n\n# python -OO strips docstrings\nDOCSTRINGS_STRIPPED = sys.flags.optimize > 1\n\n\n# Generate test cases to test cdf and distribution consistency.\n# Note that this list does not include all distributions.\ndists = ['uniform', 'norm', 'lognorm', 'expon', 'beta',\n 'powerlaw', 'bradford', 'burr', 'fisk', 'cauchy', 'halfcauchy',\n 'foldcauchy', 'gamma', 'gengamma', 'loggamma',\n 'alpha', 'anglit', 'arcsine', 'betaprime', 'dgamma', 'moyal',\n 'exponnorm', 'exponweib', 'exponpow', 'frechet_l', 'frechet_r',\n 'gilbrat', 'f', 'ncf', 'chi2', 'chi', 'nakagami', 'genpareto',\n 'genextreme', 'genhalflogistic', 'pareto', 'lomax', 'halfnorm',\n 'halflogistic', 'fatiguelife', 'foldnorm', 'ncx2', 't', 'nct',\n 'weibull_min', 'weibull_max', 'dweibull', 'maxwell', 'rayleigh',\n 'genlogistic', 'logistic', 'gumbel_l', 'gumbel_r', 'gompertz',\n 'hypsecant', 'laplace', 'reciprocal', 'trapz', 'triang',\n 'tukeylambda', 'vonmises', 'vonmises_line', 'pearson3', 'gennorm',\n 'halfgennorm', 'rice', 'kappa4', 'kappa3', 'truncnorm', 'argus',\n 'crystalball']\n\n\ndef _assert_hasattr(a, b, msg=None):\n if msg is None:\n msg = '%s does not have attribute %s' % (a, b)\n assert_(hasattr(a, b), msg=msg)\n\n\ndef test_api_regression():\n # https://github.com/scipy/scipy/issues/3802\n _assert_hasattr(scipy.stats.distributions, 'f_gen')\n\n\n# check function for test generator\ndef check_distribution(dist, args, alpha):\n with suppress_warnings() as sup:\n # frechet_l and frechet_r are deprecated, so all their\n # methods generate DeprecationWarnings.\n sup.filter(category=DeprecationWarning, message=\".*frechet_\")\n D, pval = stats.kstest(dist, '', args=args, N=1000)\n if (pval < alpha):\n D, pval = stats.kstest(dist, '', args=args, N=1000)\n assert_(pval > alpha,\n msg=\"D = {}; pval = {}; alpha = {}; args = {}\".format(\n D, pval, alpha, args))\n\n\ndef cases_test_all_distributions():\n np.random.seed(1234)\n\n for dist in dists:\n distfunc = getattr(stats, dist)\n nargs = distfunc.numargs\n alpha = 0.01\n if dist == 'fatiguelife':\n alpha = 0.001\n\n if dist == 'trapz':\n args = tuple(np.sort(np.random.random(nargs)))\n elif dist == 'triang':\n args = tuple(np.random.random(nargs))\n elif dist == 'reciprocal' or dist == 'truncnorm':\n vals = np.random.random(nargs)\n vals[1] = vals[0] + 1.0\n args = tuple(vals)\n elif dist == 'vonmises':\n yield dist, (10,), alpha\n yield dist, (101,), alpha\n args = tuple(1.0 + np.random.random(nargs))\n else:\n args = tuple(1.0 + np.random.random(nargs))\n\n yield dist, args, alpha\n\n\[email protected]('dist,args,alpha', cases_test_all_distributions())\ndef test_all_distributions(dist, args, alpha):\n check_distribution(dist, args, alpha)\n\n\ndef check_vonmises_pdf_periodic(k, l, s, x):\n vm = stats.vonmises(k, loc=l, scale=s)\n assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s)))\n\n\ndef check_vonmises_cdf_periodic(k, l, s, x):\n vm = stats.vonmises(k, loc=l, scale=s)\n assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1)\n\n\ndef test_vonmises_pdf_periodic():\n for k in [0.1, 1, 101]:\n for x in [0, 1, numpy.pi, 10, 100]:\n check_vonmises_pdf_periodic(k, 0, 1, x)\n check_vonmises_pdf_periodic(k, 1, 1, x)\n check_vonmises_pdf_periodic(k, 0, 10, x)\n\n check_vonmises_cdf_periodic(k, 0, 1, x)\n check_vonmises_cdf_periodic(k, 1, 1, x)\n check_vonmises_cdf_periodic(k, 0, 10, x)\n\n\ndef test_vonmises_line_support():\n assert_equal(stats.vonmises_line.a, -np.pi)\n assert_equal(stats.vonmises_line.b, np.pi)\n\n\ndef test_vonmises_numerical():\n vm = stats.vonmises(800)\n assert_almost_equal(vm.cdf(0), 0.5)\n\n\[email protected]('dist',\n ['alpha', 'betaprime', 'burr', 'burr12',\n 'fatiguelife', 'invgamma', 'invgauss', 'invweibull',\n 'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gilbrat',\n 'powerlognorm', 'rayleigh', 'wald'])\ndef test_support(dist):\n \"\"\"gh-6235\"\"\"\n dct = dict(distcont)\n args = dct[dist]\n\n dist = getattr(stats, dist)\n\n assert_almost_equal(dist.pdf(dist.a, *args), 0)\n assert_equal(dist.logpdf(dist.a, *args), -np.inf)\n assert_almost_equal(dist.pdf(dist.b, *args), 0)\n assert_equal(dist.logpdf(dist.b, *args), -np.inf)\n\n\nclass TestRandInt(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.randint.rvs(5, 30, size=100)\n assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))\n assert_(len(vals) == 100)\n vals = stats.randint.rvs(5, 30, size=(2, 50))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.randint.rvs(15, 46)\n assert_((val >= 15) & (val < 46))\n assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))\n val = stats.randint(15, 46).rvs(3)\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_pdf(self):\n k = numpy.r_[0:36]\n out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)\n vals = stats.randint.pmf(k, 5, 30)\n assert_array_almost_equal(vals, out)\n\n def test_cdf(self):\n x = np.linspace(0,36,100)\n k = numpy.floor(x)\n out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)\n vals = stats.randint.cdf(x, 5, 30)\n assert_array_almost_equal(vals, out, decimal=12)\n\n\nclass TestBinom(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.binom.rvs(10, 0.75, size=(2, 50))\n assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.binom.rvs(10, 0.75)\n assert_(isinstance(val, int))\n val = stats.binom(10, 0.75).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_pmf(self):\n # regression test for Ticket #1842\n vals1 = stats.binom.pmf(100, 100, 1)\n vals2 = stats.binom.pmf(0, 100, 0)\n assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)\n assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)\n\n def test_entropy(self):\n # Basic entropy tests.\n b = stats.binom(2, 0.5)\n expected_p = np.array([0.25, 0.5, 0.25])\n expected_h = -sum(xlogy(expected_p, expected_p))\n h = b.entropy()\n assert_allclose(h, expected_h)\n\n b = stats.binom(2, 0.0)\n h = b.entropy()\n assert_equal(h, 0.0)\n\n b = stats.binom(2, 1.0)\n h = b.entropy()\n assert_equal(h, 0.0)\n\n def test_warns_p0(self):\n # no spurious warnigns are generated for p=0; gh-3817\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", RuntimeWarning)\n assert_equal(stats.binom(n=2, p=0).mean(), 0)\n assert_equal(stats.binom(n=2, p=0).std(), 0)\n\n\nclass TestBernoulli(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.bernoulli.rvs(0.75, size=(2, 50))\n assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.bernoulli.rvs(0.75)\n assert_(isinstance(val, int))\n val = stats.bernoulli(0.75).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_entropy(self):\n # Simple tests of entropy.\n b = stats.bernoulli(0.25)\n expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)\n h = b.entropy()\n assert_allclose(h, expected_h)\n\n b = stats.bernoulli(0.0)\n h = b.entropy()\n assert_equal(h, 0.0)\n\n b = stats.bernoulli(1.0)\n h = b.entropy()\n assert_equal(h, 0.0)\n\n\nclass TestBradford(object):\n # gh-6216\n def test_cdf_ppf(self):\n c = 0.1\n x = np.logspace(-20, -4)\n q = stats.bradford.cdf(x, c)\n xx = stats.bradford.ppf(q, c)\n assert_allclose(x, xx)\n\n\nclass TestNBinom(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))\n assert_(numpy.all(vals >= 0))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.nbinom.rvs(10, 0.75)\n assert_(isinstance(val, int))\n val = stats.nbinom(10, 0.75).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_pmf(self):\n # regression test for ticket 1779\n assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),\n stats.nbinom.pmf(700, 721, 0.52))\n # logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)\n val = scipy.stats.nbinom.logpmf(0, 1, 1)\n assert_equal(val, 0)\n\n\nclass TestNormInvGauss(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_cdf_R(self):\n # test pdf and cdf vals against R\n # require(\"GeneralizedHyperbolic\")\n # x_test <- c(-7, -5, 0, 8, 15)\n # r_cdf <- GeneralizedHyperbolic::pnig(x_test, mu = 0, a = 1, b = 0.5)\n # r_pdf <- GeneralizedHyperbolic::dnig(x_test, mu = 0, a = 1, b = 0.5)\n r_cdf = np.array([8.034920282e-07, 2.512671945e-05, 3.186661051e-01,\n 9.988650664e-01, 9.999848769e-01])\n x_test = np.array([-7, -5, 0, 8, 15])\n vals_cdf = stats.norminvgauss.cdf(x_test, a=1, b=0.5)\n assert_allclose(vals_cdf, r_cdf, atol=1e-9)\n\n def test_pdf_R(self):\n # values from R as defined in test_cdf_R\n r_pdf = np.array([1.359600783e-06, 4.413878805e-05, 4.555014266e-01,\n 7.450485342e-04, 8.917889931e-06])\n x_test = np.array([-7, -5, 0, 8, 15])\n vals_pdf = stats.norminvgauss.pdf(x_test, a=1, b=0.5)\n assert_allclose(vals_pdf, r_pdf, atol=1e-9)\n\n def test_stats(self):\n a, b = 1, 0.5\n gamma = np.sqrt(a**2 - b**2)\n v_stats = (b / gamma, a**2 / gamma**3, 3.0 * b / (a * np.sqrt(gamma)),\n 3.0 * (1 + 4 * b**2 / a**2) / gamma)\n assert_equal(v_stats, stats.norminvgauss.stats(a, b, moments='mvsk'))\n\n def test_ppf(self):\n a, b = 1, 0.5\n x_test = np.array([0.001, 0.5, 0.999])\n vals = stats.norminvgauss.ppf(x_test, a, b)\n assert_allclose(x_test, stats.norminvgauss.cdf(vals, a, b))\n\n\nclass TestGeom(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.geom.rvs(0.75, size=(2, 50))\n assert_(numpy.all(vals >= 0))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.geom.rvs(0.75)\n assert_(isinstance(val, int))\n val = stats.geom(0.75).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_pmf(self):\n vals = stats.geom.pmf([1, 2, 3], 0.5)\n assert_array_almost_equal(vals, [0.5, 0.25, 0.125])\n\n def test_logpmf(self):\n # regression test for ticket 1793\n vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))\n vals2 = stats.geom.logpmf([1, 2, 3], 0.5)\n assert_allclose(vals1, vals2, rtol=1e-15, atol=0)\n\n # regression test for gh-4028\n val = stats.geom.logpmf(1, 1)\n assert_equal(val, 0.0)\n\n def test_cdf_sf(self):\n vals = stats.geom.cdf([1, 2, 3], 0.5)\n vals_sf = stats.geom.sf([1, 2, 3], 0.5)\n expected = array([0.5, 0.75, 0.875])\n assert_array_almost_equal(vals, expected)\n assert_array_almost_equal(vals_sf, 1-expected)\n\n def test_logcdf_logsf(self):\n vals = stats.geom.logcdf([1, 2, 3], 0.5)\n vals_sf = stats.geom.logsf([1, 2, 3], 0.5)\n expected = array([0.5, 0.75, 0.875])\n assert_array_almost_equal(vals, np.log(expected))\n assert_array_almost_equal(vals_sf, np.log1p(-expected))\n\n def test_ppf(self):\n vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)\n expected = array([1.0, 2.0, 3.0])\n assert_array_almost_equal(vals, expected)\n\n def test_ppf_underflow(self):\n # this should not underflow\n assert_allclose(stats.geom.ppf(1e-20, 1e-20), 1.0, atol=1e-14)\n\n\nclass TestPlanck(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_sf(self):\n vals = stats.planck.sf([1, 2, 3], 5.)\n expected = array([4.5399929762484854e-05,\n 3.0590232050182579e-07,\n 2.0611536224385579e-09])\n assert_array_almost_equal(vals, expected)\n\n def test_logsf(self):\n vals = stats.planck.logsf([1000., 2000., 3000.], 1000.)\n expected = array([-1001000., -2001000., -3001000.])\n assert_array_almost_equal(vals, expected)\n\n\nclass TestGennorm(object):\n def test_laplace(self):\n # test against Laplace (special case for beta=1)\n points = [1, 2, 3]\n pdf1 = stats.gennorm.pdf(points, 1)\n pdf2 = stats.laplace.pdf(points)\n assert_almost_equal(pdf1, pdf2)\n\n def test_norm(self):\n # test against normal (special case for beta=2)\n points = [1, 2, 3]\n pdf1 = stats.gennorm.pdf(points, 2)\n pdf2 = stats.norm.pdf(points, scale=2**-.5)\n assert_almost_equal(pdf1, pdf2)\n\n\nclass TestHalfgennorm(object):\n def test_expon(self):\n # test against exponential (special case for beta=1)\n points = [1, 2, 3]\n pdf1 = stats.halfgennorm.pdf(points, 1)\n pdf2 = stats.expon.pdf(points)\n assert_almost_equal(pdf1, pdf2)\n\n def test_halfnorm(self):\n # test against half normal (special case for beta=2)\n points = [1, 2, 3]\n pdf1 = stats.halfgennorm.pdf(points, 2)\n pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)\n assert_almost_equal(pdf1, pdf2)\n\n def test_gennorm(self):\n # test against generalized normal\n points = [1, 2, 3]\n pdf1 = stats.halfgennorm.pdf(points, .497324)\n pdf2 = stats.gennorm.pdf(points, .497324)\n assert_almost_equal(pdf1, 2*pdf2)\n\n\nclass TestTruncnorm(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_ppf_ticket1131(self):\n vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,\n loc=[3]*7, scale=2)\n expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])\n assert_array_almost_equal(vals, expected)\n\n def test_isf_ticket1131(self):\n vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,\n loc=[3]*7, scale=2)\n expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])\n assert_array_almost_equal(vals, expected)\n\n def test_gh_2477_small_values(self):\n # Check a case that worked in the original issue.\n low, high = -11, -10\n x = stats.truncnorm.rvs(low, high, 0, 1, size=10)\n assert_(low < x.min() < x.max() < high)\n # Check a case that failed in the original issue.\n low, high = 10, 11\n x = stats.truncnorm.rvs(low, high, 0, 1, size=10)\n assert_(low < x.min() < x.max() < high)\n\n @pytest.mark.xfail(reason=\"truncnorm rvs is know to fail at extreme tails\")\n def test_gh_2477_large_values(self):\n # Check a case that fails because of extreme tailness.\n low, high = 100, 101\n with np.errstate(divide='ignore'):\n x = stats.truncnorm.rvs(low, high, 0, 1, size=10)\n assert_(low < x.min() < x.max() < high)\n\n def test_gh_1489_trac_962_rvs(self):\n # Check the original example.\n low, high = 10, 15\n x = stats.truncnorm.rvs(low, high, 0, 1, size=10)\n assert_(low < x.min() < x.max() < high)\n\n\nclass TestHypergeom(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))\n assert_(numpy.all(vals >= 0) &\n numpy.all(vals <= 3))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.hypergeom.rvs(20, 3, 10)\n assert_(isinstance(val, int))\n val = stats.hypergeom(20, 3, 10).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_precision(self):\n # comparison number from mpmath\n M = 2500\n n = 50\n N = 500\n tot = M\n good = n\n hgpmf = stats.hypergeom.pmf(2, tot, good, N)\n assert_almost_equal(hgpmf, 0.0010114963068932233, 11)\n\n def test_args(self):\n # test correct output for corner cases of arguments\n # see gh-2325\n assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)\n assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)\n\n assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)\n assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)\n\n def test_cdf_above_one(self):\n # for some values of parameters, hypergeom cdf was >1, see gh-2238\n assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)\n\n def test_precision2(self):\n # Test hypergeom precision for large numbers. See #1218.\n # Results compared with those from R.\n oranges = 9.9e4\n pears = 1.1e5\n fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4\n quantile = 2e4\n res = []\n for eaten in fruits_eaten:\n res.append(stats.hypergeom.sf(quantile, oranges + pears, oranges,\n eaten))\n expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,\n 8.265601e-11, 0.1237904, 1])\n assert_allclose(res, expected, atol=0, rtol=5e-7)\n\n # Test with array_like first argument\n quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]\n res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)\n expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]\n assert_allclose(res2, expected2, atol=0, rtol=5e-7)\n\n def test_entropy(self):\n # Simple tests of entropy.\n hg = stats.hypergeom(4, 1, 1)\n h = hg.entropy()\n expected_p = np.array([0.75, 0.25])\n expected_h = -np.sum(xlogy(expected_p, expected_p))\n assert_allclose(h, expected_h)\n\n hg = stats.hypergeom(1, 1, 1)\n h = hg.entropy()\n assert_equal(h, 0.0)\n\n def test_logsf(self):\n # Test logsf for very large numbers. See issue #4982\n # Results compare with those from R (v3.2.0):\n # phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)\n # -2239.771\n\n k = 1e4\n M = 1e7\n n = 1e6\n N = 5e4\n\n result = stats.hypergeom.logsf(k, M, n, N)\n exspected = -2239.771 # From R\n assert_almost_equal(result, exspected, decimal=3)\n\n\nclass TestLoggamma(object):\n\n def test_stats(self):\n # The following precomputed values are from the table in section 2.2\n # of \"A Statistical Study of Log-Gamma Distribution\", by Ping Shing\n # Chan (thesis, McMaster University, 1993).\n table = np.array([\n # c, mean, var, skew, exc. kurt.\n 0.5, -1.9635, 4.9348, -1.5351, 4.0000,\n 1.0, -0.5772, 1.6449, -1.1395, 2.4000,\n 12.0, 2.4427, 0.0869, -0.2946, 0.1735,\n ]).reshape(-1, 5)\n for c, mean, var, skew, kurt in table:\n computed = stats.loggamma.stats(c, moments='msvk')\n assert_array_almost_equal(computed, [mean, var, skew, kurt],\n decimal=4)\n\n\nclass TestLogistic(object):\n # gh-6226\n def test_cdf_ppf(self):\n x = np.linspace(-20, 20)\n y = stats.logistic.cdf(x)\n xx = stats.logistic.ppf(y)\n assert_allclose(x, xx)\n\n def test_sf_isf(self):\n x = np.linspace(-20, 20)\n y = stats.logistic.sf(x)\n xx = stats.logistic.isf(y)\n assert_allclose(x, xx)\n\n def test_extreme_values(self):\n # p is chosen so that 1 - (1 - p) == p in double precision\n p = 9.992007221626409e-16\n desired = 34.53957599234088\n assert_allclose(stats.logistic.ppf(1 - p), desired)\n assert_allclose(stats.logistic.isf(p), desired)\n\n\nclass TestLogser(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.logser.rvs(0.75, size=(2, 50))\n assert_(numpy.all(vals >= 1))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.logser.rvs(0.75)\n assert_(isinstance(val, int))\n val = stats.logser(0.75).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_pmf_small_p(self):\n m = stats.logser.pmf(4, 1e-20)\n # The expected value was computed using mpmath:\n # >>> import mpmath\n # >>> mpmath.mp.dps = 64\n # >>> k = 4\n # >>> p = mpmath.mpf('1e-20')\n # >>> float(-(p**k)/k/mpmath.log(1-p))\n # 2.5e-61\n # It is also clear from noticing that for very small p,\n # log(1-p) is approximately -p, and the formula becomes\n # p**(k-1) / k\n assert_allclose(m, 2.5e-61)\n\n def test_mean_small_p(self):\n m = stats.logser.mean(1e-8)\n # The expected mean was computed using mpmath:\n # >>> import mpmath\n # >>> mpmath.dps = 60\n # >>> p = mpmath.mpf('1e-8')\n # >>> float(-p / ((1 - p)*mpmath.log(1 - p)))\n # 1.000000005\n assert_allclose(m, 1.000000005)\n\n\nclass TestPareto(object):\n def test_stats(self):\n # Check the stats() method with some simple values. Also check\n # that the calculations do not trigger RuntimeWarnings.\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", RuntimeWarning)\n\n m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')\n assert_equal(m, np.inf)\n assert_equal(v, np.inf)\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')\n assert_equal(m, np.inf)\n assert_equal(v, np.inf)\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')\n assert_equal(m, 3.0)\n assert_equal(v, np.inf)\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')\n assert_equal(m, 2.0)\n assert_equal(v, np.inf)\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')\n assert_allclose(m, 2.5 / 1.5)\n assert_allclose(v, 2.5 / (1.5*1.5*0.5))\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')\n assert_allclose(m, 1.5)\n assert_allclose(v, 0.75)\n assert_equal(s, np.nan)\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')\n assert_allclose(m, 3.5 / 2.5)\n assert_allclose(v, 3.5 / (2.5*2.5*1.5))\n assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')\n assert_allclose(m, 4.0 / 3.0)\n assert_allclose(v, 4.0 / 18.0)\n assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))\n assert_equal(k, np.nan)\n\n m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')\n assert_allclose(m, 4.5 / 3.5)\n assert_allclose(v, 4.5 / (3.5*3.5*2.5))\n assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))\n assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))\n\n def test_sf(self):\n x = 1e9\n b = 2\n scale = 1.5\n p = stats.pareto.sf(x, b, loc=0, scale=scale)\n expected = (scale/x)**b # 2.25e-18\n assert_allclose(p, expected)\n\n\nclass TestGenpareto(object):\n def test_ab(self):\n # c >= 0: a, b = [0, inf]\n for c in [1., 0.]:\n c = np.asarray(c)\n stats.genpareto._argcheck(c) # ugh\n assert_equal(stats.genpareto.a, 0.)\n assert_(np.isposinf(stats.genpareto.b))\n\n # c < 0: a=0, b=1/|c|\n c = np.asarray(-2.)\n stats.genpareto._argcheck(c)\n assert_allclose([stats.genpareto.a, stats.genpareto.b], [0., 0.5])\n\n def test_c0(self):\n # with c=0, genpareto reduces to the exponential distribution\n rv = stats.genpareto(c=0.)\n x = np.linspace(0, 10., 30)\n assert_allclose(rv.pdf(x), stats.expon.pdf(x))\n assert_allclose(rv.cdf(x), stats.expon.cdf(x))\n assert_allclose(rv.sf(x), stats.expon.sf(x))\n\n q = np.linspace(0., 1., 10)\n assert_allclose(rv.ppf(q), stats.expon.ppf(q))\n\n def test_cm1(self):\n # with c=-1, genpareto reduces to the uniform distr on [0, 1]\n rv = stats.genpareto(c=-1.)\n x = np.linspace(0, 10., 30)\n assert_allclose(rv.pdf(x), stats.uniform.pdf(x))\n assert_allclose(rv.cdf(x), stats.uniform.cdf(x))\n assert_allclose(rv.sf(x), stats.uniform.sf(x))\n\n q = np.linspace(0., 1., 10)\n assert_allclose(rv.ppf(q), stats.uniform.ppf(q))\n\n # logpdf(1., c=-1) should be zero\n assert_allclose(rv.logpdf(1), 0)\n\n def test_x_inf(self):\n # make sure x=inf is handled gracefully\n rv = stats.genpareto(c=0.1)\n assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])\n assert_(np.isneginf(rv.logpdf(np.inf)))\n\n rv = stats.genpareto(c=0.)\n assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])\n assert_(np.isneginf(rv.logpdf(np.inf)))\n\n rv = stats.genpareto(c=-1.)\n assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])\n assert_(np.isneginf(rv.logpdf(np.inf)))\n\n def test_c_continuity(self):\n # pdf is continuous at c=0, -1\n x = np.linspace(0, 10, 30)\n for c in [0, -1]:\n pdf0 = stats.genpareto.pdf(x, c)\n for dc in [1e-14, -1e-14]:\n pdfc = stats.genpareto.pdf(x, c + dc)\n assert_allclose(pdf0, pdfc, atol=1e-12)\n\n cdf0 = stats.genpareto.cdf(x, c)\n for dc in [1e-14, 1e-14]:\n cdfc = stats.genpareto.cdf(x, c + dc)\n assert_allclose(cdf0, cdfc, atol=1e-12)\n\n def test_c_continuity_ppf(self):\n q = np.r_[np.logspace(1e-12, 0.01, base=0.1),\n np.linspace(0.01, 1, 30, endpoint=False),\n 1. - np.logspace(1e-12, 0.01, base=0.1)]\n for c in [0., -1.]:\n ppf0 = stats.genpareto.ppf(q, c)\n for dc in [1e-14, -1e-14]:\n ppfc = stats.genpareto.ppf(q, c + dc)\n assert_allclose(ppf0, ppfc, atol=1e-12)\n\n def test_c_continuity_isf(self):\n q = np.r_[np.logspace(1e-12, 0.01, base=0.1),\n np.linspace(0.01, 1, 30, endpoint=False),\n 1. - np.logspace(1e-12, 0.01, base=0.1)]\n for c in [0., -1.]:\n isf0 = stats.genpareto.isf(q, c)\n for dc in [1e-14, -1e-14]:\n isfc = stats.genpareto.isf(q, c + dc)\n assert_allclose(isf0, isfc, atol=1e-12)\n\n def test_cdf_ppf_roundtrip(self):\n # this should pass with machine precision. hat tip @pbrod\n q = np.r_[np.logspace(1e-12, 0.01, base=0.1),\n np.linspace(0.01, 1, 30, endpoint=False),\n 1. - np.logspace(1e-12, 0.01, base=0.1)]\n for c in [1e-8, -1e-18, 1e-15, -1e-15]:\n assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),\n q, atol=1e-15)\n\n def test_logsf(self):\n logp = stats.genpareto.logsf(1e10, .01, 0, 1)\n assert_allclose(logp, -1842.0680753952365)\n\n\nclass TestPearson3(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.pearson3.rvs(0.1, size=(2, 50))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllFloat'])\n val = stats.pearson3.rvs(0.5)\n assert_(isinstance(val, float))\n val = stats.pearson3(0.5).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllFloat'])\n assert_(len(val) == 3)\n\n def test_pdf(self):\n vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])\n assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),\n atol=1e-6)\n vals = stats.pearson3.pdf(-3, 0.1)\n assert_allclose(vals, np.array([0.00313791]), atol=1e-6)\n vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)\n assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,\n 0.39885918, 0.23413173]), atol=1e-6)\n\n def test_cdf(self):\n vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])\n assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),\n atol=1e-6)\n vals = stats.pearson3.cdf(-3, 0.1)\n assert_allclose(vals, [0.00082256], atol=1e-6)\n vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)\n assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,\n 5.06649130e-01, 8.41442111e-01], atol=1e-6)\n\n\nclass TestKappa4(object):\n def test_cdf_genpareto(self):\n # h = 1 and k != 0 is generalized Pareto\n x = [0.0, 0.1, 0.2, 0.5]\n h = 1.0\n for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0,\n 1.9]:\n vals = stats.kappa4.cdf(x, h, k)\n # shape parameter is opposite what is expected\n vals_comp = stats.genpareto.cdf(x, -k)\n assert_allclose(vals, vals_comp)\n\n def test_cdf_genextreme(self):\n # h = 0 and k != 0 is generalized extreme value\n x = np.linspace(-5, 5, 10)\n h = 0.0\n k = np.linspace(-3, 3, 10)\n vals = stats.kappa4.cdf(x, h, k)\n vals_comp = stats.genextreme.cdf(x, k)\n assert_allclose(vals, vals_comp)\n\n def test_cdf_expon(self):\n # h = 1 and k = 0 is exponential\n x = np.linspace(0, 10, 10)\n h = 1.0\n k = 0.0\n vals = stats.kappa4.cdf(x, h, k)\n vals_comp = stats.expon.cdf(x)\n assert_allclose(vals, vals_comp)\n\n def test_cdf_gumbel_r(self):\n # h = 0 and k = 0 is gumbel_r\n x = np.linspace(-5, 5, 10)\n h = 0.0\n k = 0.0\n vals = stats.kappa4.cdf(x, h, k)\n vals_comp = stats.gumbel_r.cdf(x)\n assert_allclose(vals, vals_comp)\n\n def test_cdf_logistic(self):\n # h = -1 and k = 0 is logistic\n x = np.linspace(-5, 5, 10)\n h = -1.0\n k = 0.0\n vals = stats.kappa4.cdf(x, h, k)\n vals_comp = stats.logistic.cdf(x)\n assert_allclose(vals, vals_comp)\n\n def test_cdf_uniform(self):\n # h = 1 and k = 1 is uniform\n x = np.linspace(-5, 5, 10)\n h = 1.0\n k = 1.0\n vals = stats.kappa4.cdf(x, h, k)\n vals_comp = stats.uniform.cdf(x)\n assert_allclose(vals, vals_comp)\n\n def test_integers_ctor(self):\n # regression test for gh-7416: _argcheck fails for integer h and k\n # in numpy 1.12\n stats.kappa4(1, 2)\n\n\nclass TestPoisson(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_pmf_basic(self):\n # Basic case\n ln2 = np.log(2)\n vals = stats.poisson.pmf([0, 1, 2], ln2)\n expected = [0.5, ln2/2, ln2**2/4]\n assert_allclose(vals, expected)\n\n def test_mu0(self):\n # Edge case: mu=0\n vals = stats.poisson.pmf([0, 1, 2], 0)\n expected = [1, 0, 0]\n assert_array_equal(vals, expected)\n\n interval = stats.poisson.interval(0.95, 0)\n assert_equal(interval, (0, 0))\n\n def test_rvs(self):\n vals = stats.poisson.rvs(0.5, size=(2, 50))\n assert_(numpy.all(vals >= 0))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.poisson.rvs(0.5)\n assert_(isinstance(val, int))\n val = stats.poisson(0.5).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_stats(self):\n mu = 16.0\n result = stats.poisson.stats(mu, moments='mvsk')\n assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])\n\n mu = np.array([0.0, 1.0, 2.0])\n result = stats.poisson.stats(mu, moments='mvsk')\n expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])\n assert_allclose(result, expected)\n\n\nclass TestZipf(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.zipf.rvs(1.5, size=(2, 50))\n assert_(numpy.all(vals >= 1))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.zipf.rvs(1.5)\n assert_(isinstance(val, int))\n val = stats.zipf(1.5).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n\n def test_moments(self):\n # n-th moment is finite iff a > n + 1\n m, v = stats.zipf.stats(a=2.8)\n assert_(np.isfinite(m))\n assert_equal(v, np.inf)\n\n s, k = stats.zipf.stats(a=4.8, moments='sk')\n assert_(not np.isfinite([s, k]).all())\n\n\nclass TestDLaplace(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n vals = stats.dlaplace.rvs(1.5, size=(2, 50))\n assert_(numpy.shape(vals) == (2, 50))\n assert_(vals.dtype.char in typecodes['AllInteger'])\n val = stats.dlaplace.rvs(1.5)\n assert_(isinstance(val, int))\n val = stats.dlaplace(1.5).rvs(3)\n assert_(isinstance(val, numpy.ndarray))\n assert_(val.dtype.char in typecodes['AllInteger'])\n assert_(stats.dlaplace.rvs(0.8) is not None)\n\n def test_stats(self):\n # compare the explicit formulas w/ direct summation using pmf\n a = 1.\n dl = stats.dlaplace(a)\n m, v, s, k = dl.stats('mvsk')\n\n N = 37\n xx = np.arange(-N, N+1)\n pp = dl.pmf(xx)\n m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)\n assert_equal((m, s), (0, 0))\n assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)\n\n def test_stats2(self):\n a = np.log(2.)\n dl = stats.dlaplace(a)\n m, v, s, k = dl.stats('mvsk')\n assert_equal((m, s), (0., 0.))\n assert_allclose((v, k), (4., 3.25))\n\n\nclass TestInvGamma(object):\n def test_invgamma_inf_gh_1866(self):\n # invgamma's moments are only finite for a>n\n # specific numbers checked w/ boost 1.54\n with warnings.catch_warnings():\n warnings.simplefilter('error', RuntimeWarning)\n mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')\n expected = [0.05461496450, 0.0001723162534, 1.020362676,\n 2.055616582]\n assert_allclose(mvsk, expected)\n\n a = [1.1, 3.1, 5.6]\n mvsk = stats.invgamma.stats(a=a, moments='mvsk')\n expected = ([10., 0.476190476, 0.2173913043], # mmm\n [np.inf, 0.2061430632, 0.01312749422], # vvv\n [np.nan, 41.95235392, 2.919025532], # sss\n [np.nan, np.nan, 24.51923076]) # kkk\n for x, y in zip(mvsk, expected):\n assert_almost_equal(x, y)\n\n def test_cdf_ppf(self):\n # gh-6245\n x = np.logspace(-2.6, 0)\n y = stats.invgamma.cdf(x, 1)\n xx = stats.invgamma.ppf(y, 1)\n assert_allclose(x, xx)\n\n def test_sf_isf(self):\n # gh-6245\n if sys.maxsize > 2**32:\n x = np.logspace(2, 100)\n else:\n # Invgamme roundtrip on 32-bit systems has relative accuracy\n # ~1e-15 until x=1e+15, and becomes inf above x=1e+18\n x = np.logspace(2, 18)\n\n y = stats.invgamma.sf(x, 1)\n xx = stats.invgamma.isf(y, 1)\n assert_allclose(x, xx, rtol=1.0)\n\n\nclass TestF(object):\n def test_f_moments(self):\n # n-th moment of F distributions is only finite for n < dfd / 2\n m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')\n assert_(np.isfinite(m))\n assert_(np.isfinite(v))\n assert_(np.isfinite(s))\n assert_(not np.isfinite(k))\n\n def test_moments_warnings(self):\n # no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)\n with warnings.catch_warnings():\n warnings.simplefilter('error', RuntimeWarning)\n stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')\n\n @pytest.mark.xfail(reason='f stats does not properly broadcast')\n def test_stats_broadcast(self):\n # stats do not fully broadcast just yet\n mv = stats.f.stats(dfn=11, dfd=[11, 12])\n\n\ndef test_rvgeneric_std():\n # Regression test for #1191\n assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])\n\n\ndef test_moments_t():\n # regression test for #8786\n assert_equal(stats.t.stats(df=1, moments='mvsk'),\n (np.inf, np.nan, np.nan, np.nan))\n assert_equal(stats.t.stats(df=1.01, moments='mvsk'),\n (0.0, np.inf, np.nan, np.nan))\n assert_equal(stats.t.stats(df=2, moments='mvsk'),\n (0.0, np.inf, np.nan, np.nan))\n assert_equal(stats.t.stats(df=2.01, moments='mvsk'),\n (0.0, 2.01/(2.01-2.0), np.nan, np.inf))\n assert_equal(stats.t.stats(df=3, moments='sk'), (np.nan, np.inf))\n assert_equal(stats.t.stats(df=3.01, moments='sk'), (0.0, np.inf))\n assert_equal(stats.t.stats(df=4, moments='sk'), (0.0, np.inf))\n assert_equal(stats.t.stats(df=4.01, moments='sk'), (0.0, 6.0/(4.01 - 4.0)))\n\n\nclass TestRvDiscrete(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_rvs(self):\n states = [-1, 0, 1, 2, 3, 4]\n probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]\n samples = 1000\n r = stats.rv_discrete(name='sample', values=(states, probability))\n x = r.rvs(size=samples)\n assert_(isinstance(x, numpy.ndarray))\n\n for s, p in zip(states, probability):\n assert_(abs(sum(x == s)/float(samples) - p) < 0.05)\n\n x = r.rvs()\n assert_(isinstance(x, int))\n\n def test_entropy(self):\n # Basic tests of entropy.\n pvals = np.array([0.25, 0.45, 0.3])\n p = stats.rv_discrete(values=([0, 1, 2], pvals))\n expected_h = -sum(xlogy(pvals, pvals))\n h = p.entropy()\n assert_allclose(h, expected_h)\n\n p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))\n h = p.entropy()\n assert_equal(h, 0.0)\n\n def test_pmf(self):\n xk = [1, 2, 4]\n pk = [0.5, 0.3, 0.2]\n rv = stats.rv_discrete(values=(xk, pk))\n\n x = [[1., 4.],\n [3., 2]]\n assert_allclose(rv.pmf(x),\n [[0.5, 0.2],\n [0., 0.3]], atol=1e-14)\n\n def test_cdf(self):\n xk = [1, 2, 4]\n pk = [0.5, 0.3, 0.2]\n rv = stats.rv_discrete(values=(xk, pk))\n\n x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5]\n expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1]\n assert_allclose(rv.cdf(x_values), expected, atol=1e-14)\n\n # also check scalar arguments\n assert_allclose([rv.cdf(xx) for xx in x_values],\n expected, atol=1e-14)\n\n def test_ppf(self):\n xk = [1, 2, 4]\n pk = [0.5, 0.3, 0.2]\n rv = stats.rv_discrete(values=(xk, pk))\n\n q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.]\n expected = [1, 1, 2, 2, 4, 4]\n assert_allclose(rv.ppf(q_values), expected, atol=1e-14)\n\n # also check scalar arguments\n assert_allclose([rv.ppf(q) for q in q_values],\n expected, atol=1e-14)\n\n def test_cdf_ppf_next(self):\n # copied and special cased from test_discrete_basic\n vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1])\n rv = stats.rv_discrete(values=vals)\n\n assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8),\n rv.xk[1:])\n\n def test_expect(self):\n xk = [1, 2, 4, 6, 7, 11]\n pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]\n rv = stats.rv_discrete(values=(xk, pk))\n\n assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)\n\n def test_bad_input(self):\n xk = [1, 2, 3]\n pk = [0.5, 0.5]\n assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))\n\n pk = [1, 2, 3]\n assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))\n\n\nclass TestSkewNorm(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_normal(self):\n # When the skewness is 0 the distribution is normal\n x = np.linspace(-5, 5, 100)\n assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),\n stats.norm.pdf(x))\n\n def test_rvs(self):\n shape = (3, 4, 5)\n x = stats.skewnorm.rvs(a=0.75, size=shape)\n assert_equal(shape, x.shape)\n\n x = stats.skewnorm.rvs(a=-3, size=shape)\n assert_equal(shape, x.shape)\n\n def test_moments(self):\n X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2)\n expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]\n computed = stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk')\n assert_array_almost_equal(computed, expected, decimal=2)\n\n X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2)\n expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]\n computed = stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk')\n assert_array_almost_equal(computed, expected, decimal=2)\n\n def test_cdf_large_x(self):\n # Regression test for gh-7746.\n # The x values are large enough that the closest 64 bit floating\n # point representation of the exact CDF is 1.0.\n p = stats.skewnorm.cdf([10, 20, 30], -1)\n assert_allclose(p, np.ones(3), rtol=1e-14)\n p = stats.skewnorm.cdf(25, 2.5)\n assert_allclose(p, 1.0, rtol=1e-14)\n\n def test_cdf_sf_small_values(self):\n # Triples are [x, a, cdf(x, a)]. These values were computed\n # using CDF[SkewNormDistribution[0, 1, a], x] in Wolfram Alpha.\n cdfvals = [\n [-8, 1, 3.870035046664392611e-31],\n [-4, 2, 8.1298399188811398e-21],\n [-2, 5, 1.55326826787106273e-26],\n [-9, -1, 2.257176811907681295e-19],\n [-10, -4, 1.523970604832105213e-23],\n ]\n for x, a, cdfval in cdfvals:\n p = stats.skewnorm.cdf(x, a)\n assert_allclose(p, cdfval, rtol=1e-8)\n # For the skew normal distribution, sf(-x, -a) = cdf(x, a).\n p = stats.skewnorm.sf(-x, -a)\n assert_allclose(p, cdfval, rtol=1e-8)\n\n\nclass TestExpon(object):\n def test_zero(self):\n assert_equal(stats.expon.pdf(0), 1)\n\n def test_tail(self): # Regression test for ticket 807\n assert_equal(stats.expon.cdf(1e-18), 1e-18)\n assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)\n\n\nclass TestExponNorm(object):\n def test_moments(self):\n # Some moment test cases based on non-loc/scaled formula\n def get_moms(lam, sig, mu):\n # See wikipedia for these formulae\n # where it is listed as an exponentially modified gaussian\n opK2 = 1.0 + 1 / (lam*sig)**2\n exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)\n exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)\n return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]\n\n mu, sig, lam = 0, 1, 1\n K = 1.0 / (lam * sig)\n sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')\n assert_almost_equal(sts, get_moms(lam, sig, mu))\n mu, sig, lam = -3, 2, 0.1\n K = 1.0 / (lam * sig)\n sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')\n assert_almost_equal(sts, get_moms(lam, sig, mu))\n mu, sig, lam = 0, 3, 1\n K = 1.0 / (lam * sig)\n sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')\n assert_almost_equal(sts, get_moms(lam, sig, mu))\n mu, sig, lam = -5, 11, 3.5\n K = 1.0 / (lam * sig)\n sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')\n assert_almost_equal(sts, get_moms(lam, sig, mu))\n\n def test_extremes_x(self):\n # Test for extreme values against overflows\n assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)\n assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)\n\n\nclass TestGenExpon(object):\n def test_pdf_unity_area(self):\n from scipy.integrate import simps\n # PDF should integrate to one\n p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)\n assert_almost_equal(simps(p, dx=0.01), 1, 1)\n\n def test_cdf_bounds(self):\n # CDF should always be positive\n cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)\n assert_(numpy.all((0 <= cdf) & (cdf <= 1)))\n\n\nclass TestExponpow(object):\n def test_tail(self):\n assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)\n assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),\n 5)\n\n\nclass TestSkellam(object):\n def test_pmf(self):\n # comparison to R\n k = numpy.arange(-10, 15)\n mu1, mu2 = 10, 5\n skpmfR = numpy.array(\n [4.2254582961926893e-005, 1.1404838449648488e-004,\n 2.8979625801752660e-004, 6.9177078182101231e-004,\n 1.5480716105844708e-003, 3.2412274963433889e-003,\n 6.3373707175123292e-003, 1.1552351566696643e-002,\n 1.9606152375042644e-002, 3.0947164083410337e-002,\n 4.5401737566767360e-002, 6.1894328166820688e-002,\n 7.8424609500170578e-002, 9.2418812533573133e-002,\n 1.0139793148019728e-001, 1.0371927988298846e-001,\n 9.9076583077406091e-002, 8.8546660073089561e-002,\n 7.4187842052486810e-002, 5.8392772862200251e-002,\n 4.3268692953013159e-002, 3.0248159818374226e-002,\n 1.9991434305603021e-002, 1.2516877303301180e-002,\n 7.4389876226229707e-003])\n\n assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)\n\n def test_cdf(self):\n # comparison to R, only 5 decimals\n k = numpy.arange(-10, 15)\n mu1, mu2 = 10, 5\n skcdfR = numpy.array(\n [6.4061475386192104e-005, 1.7810985988267694e-004,\n 4.6790611790020336e-004, 1.1596768997212152e-003,\n 2.7077485103056847e-003, 5.9489760066490718e-003,\n 1.2286346724161398e-002, 2.3838698290858034e-002,\n 4.3444850665900668e-002, 7.4392014749310995e-002,\n 1.1979375231607835e-001, 1.8168808048289900e-001,\n 2.6011268998306952e-001, 3.5253150251664261e-001,\n 4.5392943399683988e-001, 5.5764871387982828e-001,\n 6.5672529695723436e-001, 7.4527195703032389e-001,\n 8.1945979908281064e-001, 8.7785257194501087e-001,\n 9.2112126489802404e-001, 9.5136942471639818e-001,\n 9.7136085902200120e-001, 9.8387773632530240e-001,\n 9.9131672394792536e-001])\n\n assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)\n\n\nclass TestLognorm(object):\n def test_pdf(self):\n # Regression test for Ticket #1471: avoid nan with 0/0 situation\n # Also make sure there are no warnings at x=0, cf gh-5202\n with warnings.catch_warnings():\n warnings.simplefilter('error', RuntimeWarning)\n pdf = stats.lognorm.pdf([0, 0.5, 1], 1)\n assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])\n\n def test_logcdf(self):\n # Regression test for gh-5940: sf et al would underflow too early\n x2, mu, sigma = 201.68, 195, 0.149\n assert_allclose(stats.lognorm.sf(x2-mu, s=sigma),\n stats.norm.sf(np.log(x2-mu)/sigma))\n assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma),\n stats.norm.logsf(np.log(x2-mu)/sigma))\n\n\nclass TestBeta(object):\n def test_logpdf(self):\n # Regression test for Ticket #1326: avoid nan with 0*log(0) situation\n logpdf = stats.beta.logpdf(0, 1, 0.5)\n assert_almost_equal(logpdf, -0.69314718056)\n logpdf = stats.beta.logpdf(0, 0.5, 1)\n assert_almost_equal(logpdf, np.inf)\n\n def test_logpdf_ticket_1866(self):\n alpha, beta = 267, 1472\n x = np.array([0.2, 0.5, 0.6])\n b = stats.beta(alpha, beta)\n assert_allclose(b.logpdf(x).sum(), -1201.699061824062)\n assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))\n\n\nclass TestBetaPrime(object):\n def test_logpdf(self):\n alpha, beta = 267, 1472\n x = np.array([0.2, 0.5, 0.6])\n b = stats.betaprime(alpha, beta)\n assert_(np.isfinite(b.logpdf(x)).all())\n assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))\n\n def test_cdf(self):\n # regression test for gh-4030: Implementation of\n # scipy.stats.betaprime.cdf()\n x = stats.betaprime.cdf(0, 0.2, 0.3)\n assert_equal(x, 0.0)\n\n alpha, beta = 267, 1472\n x = np.array([0.2, 0.5, 0.6])\n cdfs = stats.betaprime.cdf(x, alpha, beta)\n assert_(np.isfinite(cdfs).all())\n\n # check the new cdf implementation vs generic one:\n gen_cdf = stats.rv_continuous._cdf_single\n cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]\n assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)\n\n\nclass TestGamma(object):\n def test_pdf(self):\n # a few test cases to compare with R\n pdf = stats.gamma.pdf(90, 394, scale=1./5)\n assert_almost_equal(pdf, 0.002312341)\n\n pdf = stats.gamma.pdf(3, 10, scale=1./5)\n assert_almost_equal(pdf, 0.1620358)\n\n def test_logpdf(self):\n # Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)\n # situation\n logpdf = stats.gamma.logpdf(0, 1)\n assert_almost_equal(logpdf, 0)\n\n\nclass TestChi2(object):\n # regression tests after precision improvements, ticket:1041, not verified\n def test_precision(self):\n assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003,\n decimal=14)\n assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778,\n decimal=14)\n\n def test_ppf(self):\n # Expected values computed with mpmath.\n df = 4.8\n x = stats.chi2.ppf(2e-47, df)\n assert_allclose(x, 1.098472479575179840604902808e-19, rtol=1e-10)\n x = stats.chi2.ppf(0.5, df)\n assert_allclose(x, 4.15231407598589358660093156, rtol=1e-10)\n\n df = 13\n x = stats.chi2.ppf(2e-77, df)\n assert_allclose(x, 1.0106330688195199050507943e-11, rtol=1e-10)\n x = stats.chi2.ppf(0.1, df)\n assert_allclose(x, 7.041504580095461859307179763, rtol=1e-10)\n\n\nclass TestGumbelL(object):\n # gh-6228\n def test_cdf_ppf(self):\n x = np.linspace(-100, -4)\n y = stats.gumbel_l.cdf(x)\n xx = stats.gumbel_l.ppf(y)\n assert_allclose(x, xx)\n\n def test_logcdf_logsf(self):\n x = np.linspace(-100, -4)\n y = stats.gumbel_l.logcdf(x)\n z = stats.gumbel_l.logsf(x)\n u = np.exp(y)\n v = -special.expm1(z)\n assert_allclose(u, v)\n\n def test_sf_isf(self):\n x = np.linspace(-20, 5)\n y = stats.gumbel_l.sf(x)\n xx = stats.gumbel_l.isf(y)\n assert_allclose(x, xx)\n\nclass TestLevyStable(object):\n\n def test_fit(self):\n # construct data to have percentiles that match\n # example in McCulloch 1986.\n x = [-.05413,-.05413,\n 0.,0.,0.,0.,\n .00533,.00533,.00533,.00533,.00533,\n .03354,.03354,.03354,.03354,.03354,\n .05309,.05309,.05309,.05309,.05309]\n alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)\n assert_allclose(alpha1, 1.48, rtol=0, atol=0.01)\n assert_almost_equal(beta1, -.22, 2)\n assert_almost_equal(scale1, 0.01717, 4)\n assert_almost_equal(loc1, 0.00233, 2) # to 2 dps due to rounding error in McCulloch86\n\n # cover alpha=2 scenario\n x2 = x + [.05309,.05309,.05309,.05309,.05309]\n alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2)\n assert_equal(alpha2, 2)\n assert_equal(beta2, -1)\n assert_almost_equal(scale2, .02503, 4)\n assert_almost_equal(loc2, .03354, 4)\n\n @pytest.mark.slow\n def test_pdf_nolan_samples(self):\n \"\"\" Test pdf values against Nolan's stablec.exe output\n see - http://fs2.american.edu/jpnolan/www/stable/stable.html\n\n There's a known limitation of Nolan's executable for alpha < 0.2.\n\n Repeat following with beta = -1, -.5, 0, .5 and 1\n stablec.exe <<\n 1 # pdf\n 1 # Nolan S equivalent to S0 in scipy\n .25,2,.25 # alpha\n -1,-1,0 # beta\n -10,10,1 # x\n 1,0 # gamma, delta\n 2 # output file\n \"\"\"\n data = np.load(os.path.abspath(os.path.join(os.path.dirname(__file__),\n 'data/stable-pdf-sample-data.npy')))\n\n data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')\n\n # support numpy 1.8.2 for travis\n npisin = np.isin if hasattr(np, \"isin\") else np.in1d\n\n tests = [\n # best selects\n ['best', None, 8, None],\n\n # quadrature is accurate for most alpha except 0.25; perhaps limitation of Nolan stablec?\n # we reduce size of x to speed up computation as numerical integration slow.\n ['quadrature', None, 8, lambda r: (r['alpha'] > 0.25) & (npisin(r['x'], [-10,-5,0,5,10]))],\n\n # zolatarev is accurate except at alpha==1, beta != 0\n ['zolotarev', None, 8, lambda r: r['alpha'] != 1],\n ['zolotarev', None, 8, lambda r: (r['alpha'] == 1) & (r['beta'] == 0)],\n ['zolotarev', None, 1, lambda r: (r['alpha'] == 1) & (r['beta'] != 0)],\n\n # fft accuracy reduces as alpha decreases, fails at low values of alpha and x=0\n ['fft', 0, 4, lambda r: r['alpha'] > 1],\n ['fft', 0, 3, lambda r: (r['alpha'] < 1) & (r['alpha'] > 0.25)],\n ['fft', 0, 1, lambda r: (r['alpha'] == 0.25) & (r['x'] != 0)], # not useful here\n ]\n for ix, (default_method, fft_min_points, decimal_places, filter_func) in enumerate(tests):\n stats.levy_stable.pdf_default_method = default_method\n stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points\n subdata = data[filter_func(data)] if filter_func is not None else data\n with suppress_warnings() as sup:\n sup.record(RuntimeWarning, \"Density calculation unstable for alpha=1 and beta!=0.*\")\n sup.record(RuntimeWarning, \"Density calculations experimental for FFT method.*\")\n p = stats.levy_stable.pdf(subdata['x'], subdata['alpha'], subdata['beta'], scale=1, loc=0)\n subdata2 = rec_append_fields(subdata, 'calc', p)\n failures = subdata2[(np.abs(p-subdata['p']) >= 1.5*10.**(-decimal_places)) | np.isnan(p)]\n assert_almost_equal(p, subdata['p'], decimal_places, \"pdf test %s failed with method '%s'\\n%s\" % (ix, default_method, failures), verbose=False)\n\n @pytest.mark.slow\n def test_cdf_nolan_samples(self):\n \"\"\" Test cdf values against Nolan's stablec.exe output\n see - http://fs2.american.edu/jpnolan/www/stable/stable.html\n\n There's a known limitation of Nolan's executable for alpha < 0.2.\n\n Repeat following with beta = -1, -.5, 0, .5 and 1\n stablec.exe <<\n 2 # cdf\n 1 # Nolan S equivalent to S0 in scipy\n .25,2,.25 # alpha\n -1,-1,0 # beta\n -10,10,1 # x\n 1,0 # gamma, delta\n 2 # output file\n \"\"\"\n data = np.load(os.path.abspath(os.path.join(os.path.dirname(__file__),\n 'data/stable-cdf-sample-data.npy')))\n\n data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')\n\n tests = [\n # zolatarev is accurate for all values\n ['zolotarev', None, 8, None],\n\n # fft accuracy poor, very poor alpha < 1\n ['fft', 0, 2, lambda r: r['alpha'] > 1],\n ]\n for ix, (default_method, fft_min_points, decimal_places, filter_func) in enumerate(tests):\n stats.levy_stable.pdf_default_method = default_method\n stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points\n subdata = data[filter_func(data)] if filter_func is not None else data\n with suppress_warnings() as sup:\n sup.record(RuntimeWarning, \"Cumulative density calculations experimental for FFT method.*\")\n p = stats.levy_stable.cdf(subdata['x'], subdata['alpha'], subdata['beta'], scale=1, loc=0)\n subdata2 = rec_append_fields(subdata, 'calc', p)\n failures = subdata2[(np.abs(p-subdata['p']) >= 1.5*10.**(-decimal_places)) | np.isnan(p)]\n assert_almost_equal(p, subdata['p'], decimal_places, \"cdf test %s failed with method '%s'\\n%s\" % (ix, default_method, failures), verbose=False)\n\n def test_pdf_alpha_equals_one_beta_non_zero(self):\n \"\"\" sample points extracted from Tables and Graphs of Stable Probability\n Density Functions - Donald R Holt - 1973 - p 187.\n \"\"\"\n xs = np.array([0, 0, 0, 0,\n 1, 1, 1, 1,\n 2, 2, 2, 2,\n 3, 3, 3, 3,\n 4, 4, 4, 4])\n density = np.array([.3183, .3096, .2925, .2622,\n .1591, .1587, .1599, .1635,\n .0637, .0729, .0812, .0955,\n .0318, .0390, .0458, .0586,\n .0187, .0236, .0285, .0384])\n betas = np.array([0, .25, .5, 1,\n 0, .25, .5, 1,\n 0, .25, .5, 1,\n 0, .25, .5, 1,\n 0, .25, .5, 1])\n\n tests = [\n ['quadrature', None, 4],\n #['fft', 0, 4],\n ['zolotarev', None, 1],\n ]\n\n with np.errstate(all='ignore'), suppress_warnings() as sup:\n sup.filter(category=RuntimeWarning, message=\"Density calculation unstable.*\")\n for default_method, fft_min_points, decimal_places in tests:\n stats.levy_stable.pdf_default_method = default_method\n stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points\n #stats.levy_stable.fft_grid_spacing = 0.0001\n pdf = stats.levy_stable.pdf(xs, 1, betas, scale=1, loc=0)\n assert_almost_equal(pdf, density, decimal_places, default_method)\n\n def test_stats(self):\n param_sets = [\n [(1.48,-.22, 0, 1), (0,np.inf,np.NaN,np.NaN)],\n [(2,.9, 10, 1.5), (10,4.5,0,0)]\n ]\n for args, exp_stats in param_sets:\n calc_stats = stats.levy_stable.stats(args[0], args[1], loc=args[2], scale=args[3], moments='mvsk')\n assert_almost_equal(calc_stats, exp_stats)\n\nclass TestArrayArgument(object): # test for ticket:992\n def setup_method(self):\n np.random.seed(1234)\n\n def test_noexception(self):\n rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),\n size=(10, 5))\n assert_equal(rvs.shape, (10, 5))\n\n\nclass TestDocstring(object):\n def test_docstrings(self):\n # See ticket #761\n if stats.rayleigh.__doc__ is not None:\n assert_(\"rayleigh\" in stats.rayleigh.__doc__.lower())\n if stats.bernoulli.__doc__ is not None:\n assert_(\"bernoulli\" in stats.bernoulli.__doc__.lower())\n\n def test_no_name_arg(self):\n # If name is not given, construction shouldn't fail. See #1508.\n stats.rv_continuous()\n stats.rv_discrete()\n\n\nclass TestEntropy(object):\n def test_entropy_positive(self):\n # See ticket #497\n pk = [0.5, 0.2, 0.3]\n qk = [0.1, 0.25, 0.65]\n eself = stats.entropy(pk, pk)\n edouble = stats.entropy(pk, qk)\n assert_(0.0 == eself)\n assert_(edouble >= 0.0)\n\n def test_entropy_base(self):\n pk = np.ones(16, float)\n S = stats.entropy(pk, base=2.)\n assert_(abs(S - 4.) < 1.e-5)\n\n qk = np.ones(16, float)\n qk[:8] = 2.\n S = stats.entropy(pk, qk)\n S2 = stats.entropy(pk, qk, base=2.)\n assert_(abs(S/S2 - np.log(2.)) < 1.e-5)\n\n def test_entropy_zero(self):\n # Test for PR-479\n assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,\n decimal=12)\n\n def test_entropy_2d(self):\n pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]\n qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]\n assert_array_almost_equal(stats.entropy(pk, qk),\n [0.1933259, 0.18609809])\n\n def test_entropy_2d_zero(self):\n pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]\n qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]\n assert_array_almost_equal(stats.entropy(pk, qk),\n [np.inf, 0.18609809])\n\n pk[0][0] = 0.0\n assert_array_almost_equal(stats.entropy(pk, qk),\n [0.17403988, 0.18609809])\n\n\ndef TestArgsreduce():\n a = array([1, 3, 2, 1, 2, 3, 3])\n b, c = argsreduce(a > 1, a, 2)\n\n assert_array_equal(b, [3, 2, 2, 3, 3])\n assert_array_equal(c, [2, 2, 2, 2, 2])\n\n b, c = argsreduce(2 > 1, a, 2)\n assert_array_equal(b, a[0])\n assert_array_equal(c, [2])\n\n b, c = argsreduce(a > 0, a, 2)\n assert_array_equal(b, a)\n assert_array_equal(c, [2] * numpy.size(a))\n\n\nclass TestFitMethod(object):\n skip = ['ncf']\n\n def setup_method(self):\n np.random.seed(1234)\n\n @pytest.mark.slow\n @pytest.mark.parametrize('dist,args,alpha', cases_test_all_distributions())\n def test_fit(self, dist, args, alpha):\n if dist in self.skip:\n pytest.skip(\"%s fit known to fail\" % dist)\n distfunc = getattr(stats, dist)\n with np.errstate(all='ignore'), suppress_warnings() as sup:\n sup.filter(category=DeprecationWarning, message=\".*frechet_\")\n res = distfunc.rvs(*args, **{'size': 200})\n vals = distfunc.fit(res)\n vals2 = distfunc.fit(res, optimizer='powell')\n # Only check the length of the return\n # FIXME: should check the actual results to see if we are 'close'\n # to what was created --- but what is 'close' enough\n assert_(len(vals) == 2+len(args))\n assert_(len(vals2) == 2+len(args))\n\n @pytest.mark.slow\n @pytest.mark.parametrize('dist,args,alpha', cases_test_all_distributions())\n def test_fix_fit(self, dist, args, alpha):\n # Not sure why 'ncf', and 'beta' are failing\n # frechet has different len(args) than distfunc.numargs\n if dist in self.skip + ['frechet']:\n pytest.skip(\"%s fit known to fail\" % dist)\n distfunc = getattr(stats, dist)\n with np.errstate(all='ignore'), suppress_warnings() as sup:\n sup.filter(category=DeprecationWarning, message=\".*frechet_\")\n res = distfunc.rvs(*args, **{'size': 200})\n vals = distfunc.fit(res, floc=0)\n vals2 = distfunc.fit(res, fscale=1)\n assert_(len(vals) == 2+len(args))\n assert_(vals[-2] == 0)\n assert_(vals2[-1] == 1)\n assert_(len(vals2) == 2+len(args))\n if len(args) > 0:\n vals3 = distfunc.fit(res, f0=args[0])\n assert_(len(vals3) == 2+len(args))\n assert_(vals3[0] == args[0])\n if len(args) > 1:\n vals4 = distfunc.fit(res, f1=args[1])\n assert_(len(vals4) == 2+len(args))\n assert_(vals4[1] == args[1])\n if len(args) > 2:\n vals5 = distfunc.fit(res, f2=args[2])\n assert_(len(vals5) == 2+len(args))\n assert_(vals5[2] == args[2])\n\n def test_fix_fit_2args_lognorm(self):\n # Regression test for #1551.\n np.random.seed(12345)\n with np.errstate(all='ignore'):\n x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)\n expected_shape = np.sqrt(((np.log(x) - np.log(20))**2).mean())\n assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),\n [expected_shape, 0, 20], atol=1e-8)\n\n def test_fix_fit_norm(self):\n x = np.arange(1, 6)\n\n loc, scale = stats.norm.fit(x)\n assert_almost_equal(loc, 3)\n assert_almost_equal(scale, np.sqrt(2))\n\n loc, scale = stats.norm.fit(x, floc=2)\n assert_equal(loc, 2)\n assert_equal(scale, np.sqrt(3))\n\n loc, scale = stats.norm.fit(x, fscale=2)\n assert_almost_equal(loc, 3)\n assert_equal(scale, 2)\n\n def test_fix_fit_gamma(self):\n x = np.arange(1, 6)\n meanlog = np.log(x).mean()\n\n # A basic test of gamma.fit with floc=0.\n floc = 0\n a, loc, scale = stats.gamma.fit(x, floc=floc)\n s = np.log(x.mean()) - meanlog\n assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)\n assert_equal(loc, floc)\n assert_almost_equal(scale, x.mean()/a, decimal=8)\n\n # Regression tests for gh-2514.\n # The problem was that if `floc=0` was given, any other fixed\n # parameters were ignored.\n f0 = 1\n floc = 0\n a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)\n assert_equal(a, f0)\n assert_equal(loc, floc)\n assert_almost_equal(scale, x.mean()/a, decimal=8)\n\n f0 = 2\n floc = 0\n a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)\n assert_equal(a, f0)\n assert_equal(loc, floc)\n assert_almost_equal(scale, x.mean()/a, decimal=8)\n\n # loc and scale fixed.\n floc = 0\n fscale = 2\n a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)\n assert_equal(loc, floc)\n assert_equal(scale, fscale)\n c = meanlog - np.log(fscale)\n assert_almost_equal(special.digamma(a), c)\n\n def test_fix_fit_beta(self):\n # Test beta.fit when both floc and fscale are given.\n\n def mlefunc(a, b, x):\n # Zeros of this function are critical points of\n # the maximum likelihood function.\n n = len(x)\n s1 = np.log(x).sum()\n s2 = np.log(1-x).sum()\n psiab = special.psi(a + b)\n func = [s1 - n * (-psiab + special.psi(a)),\n s2 - n * (-psiab + special.psi(b))]\n return func\n\n # Basic test with floc and fscale given.\n x = np.array([0.125, 0.25, 0.5])\n a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)\n assert_equal(loc, 0)\n assert_equal(scale, 1)\n assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)\n\n # Basic test with f0, floc and fscale given.\n # This is also a regression test for gh-2514.\n x = np.array([0.125, 0.25, 0.5])\n a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)\n assert_equal(a, 2)\n assert_equal(loc, 0)\n assert_equal(scale, 1)\n da, db = mlefunc(a, b, x)\n assert_allclose(db, 0, atol=1e-5)\n\n # Same floc and fscale values as above, but reverse the data\n # and fix b (f1).\n x2 = 1 - x\n a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)\n assert_equal(b2, 2)\n assert_equal(loc2, 0)\n assert_equal(scale2, 1)\n da, db = mlefunc(a2, b2, x2)\n assert_allclose(da, 0, atol=1e-5)\n # a2 of this test should equal b from above.\n assert_almost_equal(a2, b)\n\n # Check for detection of data out of bounds when floc and fscale\n # are given.\n assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)\n y = np.array([0, .5, 1])\n assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)\n assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)\n assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)\n\n # Check that attempting to fix all the parameters raises a ValueError.\n assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,\n floc=2, fscale=3)\n\n def test_expon_fit(self):\n x = np.array([2, 2, 4, 4, 4, 4, 4, 8])\n\n loc, scale = stats.expon.fit(x)\n assert_equal(loc, 2) # x.min()\n assert_equal(scale, 2) # x.mean() - x.min()\n\n loc, scale = stats.expon.fit(x, fscale=3)\n assert_equal(loc, 2) # x.min()\n assert_equal(scale, 3) # fscale\n\n loc, scale = stats.expon.fit(x, floc=0)\n assert_equal(loc, 0) # floc\n assert_equal(scale, 4) # x.mean() - loc\n\n def test_lognorm_fit(self):\n x = np.array([1.5, 3, 10, 15, 23, 59])\n lnxm1 = np.log(x - 1)\n\n shape, loc, scale = stats.lognorm.fit(x, floc=1)\n assert_allclose(shape, lnxm1.std(), rtol=1e-12)\n assert_equal(loc, 1)\n assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)\n\n shape, loc, scale = stats.lognorm.fit(x, floc=1, fscale=6)\n assert_allclose(shape, np.sqrt(((lnxm1 - np.log(6))**2).mean()),\n rtol=1e-12)\n assert_equal(loc, 1)\n assert_equal(scale, 6)\n\n shape, loc, scale = stats.lognorm.fit(x, floc=1, fix_s=0.75)\n assert_equal(shape, 0.75)\n assert_equal(loc, 1)\n assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)\n\n def test_uniform_fit(self):\n x = np.array([1.0, 1.1, 1.2, 9.0])\n\n loc, scale = stats.uniform.fit(x)\n assert_equal(loc, x.min())\n assert_equal(scale, x.ptp())\n\n loc, scale = stats.uniform.fit(x, floc=0)\n assert_equal(loc, 0)\n assert_equal(scale, x.max())\n\n loc, scale = stats.uniform.fit(x, fscale=10)\n assert_equal(loc, 0)\n assert_equal(scale, 10)\n\n assert_raises(ValueError, stats.uniform.fit, x, floc=2.0)\n assert_raises(ValueError, stats.uniform.fit, x, fscale=5.0)\n\n def test_fshapes(self):\n # take a beta distribution, with shapes='a, b', and make sure that\n # fa is equivalent to f0, and fb is equivalent to f1\n a, b = 3., 4.\n x = stats.beta.rvs(a, b, size=100, random_state=1234)\n res_1 = stats.beta.fit(x, f0=3.)\n res_2 = stats.beta.fit(x, fa=3.)\n assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)\n\n res_2 = stats.beta.fit(x, fix_a=3.)\n assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)\n\n res_3 = stats.beta.fit(x, f1=4.)\n res_4 = stats.beta.fit(x, fb=4.)\n assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)\n\n res_4 = stats.beta.fit(x, fix_b=4.)\n assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)\n\n # cannot specify both positional and named args at the same time\n assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2)\n\n # check that attempting to fix all parameters raises a ValueError\n assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,\n floc=2, fscale=3)\n\n # check that specifying floc, fscale and fshapes works for\n # beta and gamma which override the generic fit method\n res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1)\n aa, bb, ll, ss = res_5\n assert_equal([aa, ll, ss], [3., 0, 1])\n\n # gamma distribution\n a = 3.\n data = stats.gamma.rvs(a, size=100)\n aa, ll, ss = stats.gamma.fit(data, fa=a)\n assert_equal(aa, a)\n\n def test_extra_params(self):\n # unknown parameters should raise rather than be silently ignored\n dist = stats.exponnorm\n data = dist.rvs(K=2, size=100)\n dct = dict(enikibeniki=-101)\n assert_raises(TypeError, dist.fit, data, **dct)\n\n\nclass TestFrozen(object):\n def setup_method(self):\n np.random.seed(1234)\n\n # Test that a frozen distribution gives the same results as the original\n # object.\n #\n # Only tested for the normal distribution (with loc and scale specified)\n # and for the gamma distribution (with a shape parameter specified).\n def test_norm(self):\n dist = stats.norm\n frozen = stats.norm(loc=10.0, scale=3.0)\n\n result_f = frozen.pdf(20.0)\n result = dist.pdf(20.0, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.cdf(20.0)\n result = dist.cdf(20.0, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.ppf(0.25)\n result = dist.ppf(0.25, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.isf(0.25)\n result = dist.isf(0.25, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.sf(10.0)\n result = dist.sf(10.0, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.median()\n result = dist.median(loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.mean()\n result = dist.mean(loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.var()\n result = dist.var(loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.std()\n result = dist.std(loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.entropy()\n result = dist.entropy(loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n result_f = frozen.moment(2)\n result = dist.moment(2, loc=10.0, scale=3.0)\n assert_equal(result_f, result)\n\n assert_equal(frozen.a, dist.a)\n assert_equal(frozen.b, dist.b)\n\n def test_gamma(self):\n a = 2.0\n dist = stats.gamma\n frozen = stats.gamma(a)\n\n result_f = frozen.pdf(20.0)\n result = dist.pdf(20.0, a)\n assert_equal(result_f, result)\n\n result_f = frozen.cdf(20.0)\n result = dist.cdf(20.0, a)\n assert_equal(result_f, result)\n\n result_f = frozen.ppf(0.25)\n result = dist.ppf(0.25, a)\n assert_equal(result_f, result)\n\n result_f = frozen.isf(0.25)\n result = dist.isf(0.25, a)\n assert_equal(result_f, result)\n\n result_f = frozen.sf(10.0)\n result = dist.sf(10.0, a)\n assert_equal(result_f, result)\n\n result_f = frozen.median()\n result = dist.median(a)\n assert_equal(result_f, result)\n\n result_f = frozen.mean()\n result = dist.mean(a)\n assert_equal(result_f, result)\n\n result_f = frozen.var()\n result = dist.var(a)\n assert_equal(result_f, result)\n\n result_f = frozen.std()\n result = dist.std(a)\n assert_equal(result_f, result)\n\n result_f = frozen.entropy()\n result = dist.entropy(a)\n assert_equal(result_f, result)\n\n result_f = frozen.moment(2)\n result = dist.moment(2, a)\n assert_equal(result_f, result)\n\n assert_equal(frozen.a, frozen.dist.a)\n assert_equal(frozen.b, frozen.dist.b)\n\n def test_regression_ticket_1293(self):\n # Create a frozen distribution.\n frozen = stats.lognorm(1)\n # Call one of its methods that does not take any keyword arguments.\n m1 = frozen.moment(2)\n # Now call a method that takes a keyword argument.\n frozen.stats(moments='mvsk')\n # Call moment(2) again.\n # After calling stats(), the following was raising an exception.\n # So this test passes if the following does not raise an exception.\n m2 = frozen.moment(2)\n # The following should also be true, of course. But it is not\n # the focus of this test.\n assert_equal(m1, m2)\n\n def test_ab(self):\n # test that the support of a frozen distribution\n # (i) remains frozen even if it changes for the original one\n # (ii) is actually correct if the shape parameters are such that\n # the values of [a, b] are not the default [0, inf]\n # take a genpareto as an example where the support\n # depends on the value of the shape parameter:\n # for c > 0: a, b = 0, inf\n # for c < 0: a, b = 0, -1/c\n rv = stats.genpareto(c=-0.1)\n a, b = rv.dist.a, rv.dist.b\n assert_equal([a, b], [0., 10.])\n assert_equal([rv.a, rv.b], [0., 10.])\n\n stats.genpareto.pdf(0, c=0.1) # this changes genpareto.b\n assert_equal([rv.dist.a, rv.dist.b], [a, b])\n assert_equal([rv.a, rv.b], [a, b])\n\n rv1 = stats.genpareto(c=0.1)\n assert_(rv1.dist is not rv.dist)\n\n def test_rv_frozen_in_namespace(self):\n # Regression test for gh-3522\n assert_(hasattr(stats.distributions, 'rv_frozen'))\n\n def test_random_state(self):\n # only check that the random_state attribute exists,\n frozen = stats.norm()\n assert_(hasattr(frozen, 'random_state'))\n\n # ... that it can be set,\n frozen.random_state = 42\n assert_equal(frozen.random_state.get_state(),\n np.random.RandomState(42).get_state())\n\n # ... and that .rvs method accepts it as an argument\n rndm = np.random.RandomState(1234)\n frozen.rvs(size=8, random_state=rndm)\n\n def test_pickling(self):\n # test that a frozen instance pickles and unpickles\n # (this method is a clone of common_tests.check_pickling)\n beta = stats.beta(2.3098496451481823, 0.62687954300963677)\n poiss = stats.poisson(3.)\n sample = stats.rv_discrete(values=([0, 1, 2, 3],\n [0.1, 0.2, 0.3, 0.4]))\n\n for distfn in [beta, poiss, sample]:\n distfn.random_state = 1234\n distfn.rvs(size=8)\n s = pickle.dumps(distfn)\n r0 = distfn.rvs(size=8)\n\n unpickled = pickle.loads(s)\n r1 = unpickled.rvs(size=8)\n assert_equal(r0, r1)\n\n # also smoke test some methods\n medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]\n assert_equal(medians[0], medians[1])\n assert_equal(distfn.cdf(medians[0]),\n unpickled.cdf(medians[1]))\n\n def test_expect(self):\n # smoke test the expect method of the frozen distribution\n # only take a gamma w/loc and scale and poisson with loc specified\n def func(x):\n return x\n\n gm = stats.gamma(a=2, loc=3, scale=4)\n gm_val = gm.expect(func, lb=1, ub=2, conditional=True)\n gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,\n lb=1, ub=2, conditional=True)\n assert_allclose(gm_val, gamma_val)\n\n p = stats.poisson(3, loc=4)\n p_val = p.expect(func)\n poisson_val = stats.poisson.expect(func, args=(3,), loc=4)\n assert_allclose(p_val, poisson_val)\n\n\nclass TestExpect(object):\n # Test for expect method.\n #\n # Uses normal distribution and beta distribution for finite bounds, and\n # hypergeom for discrete distribution with finite support\n def test_norm(self):\n v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)\n assert_almost_equal(v, 4, decimal=14)\n\n m = stats.norm.expect(lambda x: (x), loc=5, scale=2)\n assert_almost_equal(m, 5, decimal=14)\n\n lb = stats.norm.ppf(0.05, loc=5, scale=2)\n ub = stats.norm.ppf(0.95, loc=5, scale=2)\n prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)\n assert_almost_equal(prob90, 0.9, decimal=14)\n\n prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,\n conditional=True)\n assert_almost_equal(prob90c, 1., decimal=14)\n\n def test_beta(self):\n # case with finite support interval\n v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),\n loc=5, scale=2)\n assert_almost_equal(v, 1./18., decimal=13)\n\n m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)\n assert_almost_equal(m, 19/3., decimal=13)\n\n ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)\n lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)\n prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,\n scale=2., lb=lb, ub=ub, conditional=False)\n assert_almost_equal(prob90, 0.9, decimal=13)\n\n prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,\n scale=2, lb=lb, ub=ub, conditional=True)\n assert_almost_equal(prob90c, 1., decimal=13)\n\n def test_hypergeom(self):\n # test case with finite bounds\n\n # without specifying bounds\n m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)\n m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)\n assert_almost_equal(m, m_true, decimal=13)\n\n v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),\n loc=5.)\n assert_almost_equal(v, v_true, decimal=14)\n\n # with bounds, bounds equal to shifted support\n v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2,\n args=(20, 10, 8),\n loc=5., lb=5, ub=13)\n assert_almost_equal(v_bounds, v_true, decimal=14)\n\n # drop boundary points\n prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()\n prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),\n loc=5., lb=6, ub=12)\n assert_almost_equal(prob_bounds, prob_true, decimal=13)\n\n # conditional\n prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,\n lb=6, ub=12, conditional=True)\n assert_almost_equal(prob_bc, 1, decimal=14)\n\n # check simple integral\n prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),\n lb=0, ub=8)\n assert_almost_equal(prob_b, 1, decimal=13)\n\n def test_poisson(self):\n # poisson, use lower bound only\n prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,\n conditional=False)\n prob_b_true = 1-stats.poisson.cdf(2, 2)\n assert_almost_equal(prob_bounds, prob_b_true, decimal=14)\n\n prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,\n conditional=True)\n assert_almost_equal(prob_lb, 1, decimal=14)\n\n def test_genhalflogistic(self):\n # genhalflogistic, changes upper bound of support in _argcheck\n # regression test for gh-2622\n halflog = stats.genhalflogistic\n # check consistency when calling expect twice with the same input\n res1 = halflog.expect(args=(1.5,))\n halflog.expect(args=(0.5,))\n res2 = halflog.expect(args=(1.5,))\n assert_almost_equal(res1, res2, decimal=14)\n\n def test_rice_overflow(self):\n # rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows\n # check that using i0e fixes it\n assert_(np.isfinite(stats.rice.pdf(999, 0.74)))\n\n assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))\n assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))\n assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))\n\n def test_logser(self):\n # test a discrete distribution with infinite support and loc\n p, loc = 0.3, 3\n res_0 = stats.logser.expect(lambda k: k, args=(p,))\n # check against the correct answer (sum of a geom series)\n assert_allclose(res_0,\n p / (p - 1.) / np.log(1. - p), atol=1e-15)\n\n # now check it with `loc`\n res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc)\n assert_allclose(res_l, res_0 + loc, atol=1e-15)\n\n def test_skellam(self):\n # Use a discrete distribution w/ bi-infinite support. Compute two first\n # moments and compare to known values (cf skellam.stats)\n p1, p2 = 18, 22\n m1 = stats.skellam.expect(lambda x: x, args=(p1, p2))\n m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2))\n assert_allclose(m1, p1 - p2, atol=1e-12)\n assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12)\n\n def test_randint(self):\n # Use a discrete distribution w/ parameter-dependent support, which\n # is larger than the default chunksize\n lo, hi = 0, 113\n res = stats.randint.expect(lambda x: x, (lo, hi))\n assert_allclose(res,\n sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15)\n\n def test_zipf(self):\n # Test that there is no infinite loop even if the sum diverges\n assert_warns(RuntimeWarning, stats.zipf.expect,\n lambda x: x**2, (2,))\n\n def test_discrete_kwds(self):\n # check that discrete expect accepts keywords to control the summation\n n0 = stats.poisson.expect(lambda x: 1, args=(2,))\n n1 = stats.poisson.expect(lambda x: 1, args=(2,),\n maxcount=1001, chunksize=32, tolerance=1e-8)\n assert_almost_equal(n0, n1, decimal=14)\n\n def test_moment(self):\n # test the .moment() method: compute a higher moment and compare to\n # a known value\n def poiss_moment5(mu):\n return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu\n\n for mu in [5, 7]:\n m5 = stats.poisson.moment(5, mu)\n assert_allclose(m5, poiss_moment5(mu), rtol=1e-10)\n\n\nclass TestNct(object):\n def test_nc_parameter(self):\n # Parameter values c<=0 were not enabled (gh-2402).\n # For negative values c and for c=0 results of rv.cdf(0) below were nan\n rv = stats.nct(5, 0)\n assert_equal(rv.cdf(0), 0.5)\n rv = stats.nct(5, -1)\n assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)\n\n def test_broadcasting(self):\n res = stats.nct.pdf(5, np.arange(4, 7)[:, None],\n np.linspace(0.1, 1, 4))\n expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],\n [0.00217142, 0.00395366, 0.00683888, 0.01126276],\n [0.00153078, 0.00291093, 0.00525206, 0.00900815]])\n assert_allclose(res, expected, rtol=1e-5)\n\n def test_variance_gh_issue_2401(self):\n # Computation of the variance of a non-central t-distribution resulted\n # in a TypeError: ufunc 'isinf' not supported for the input types,\n # and the inputs could not be safely coerced to any supported types\n # according to the casting rule 'safe'\n rv = stats.nct(4, 0)\n assert_equal(rv.var(), 2.0)\n\n def test_nct_inf_moments(self):\n # n-th moment of nct only exists for df > n\n m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')\n assert_(np.isfinite(m))\n assert_equal([v, s, k], [np.inf, np.nan, np.nan])\n\n m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')\n assert_(np.isfinite([m, v, s]).all())\n assert_equal(k, np.nan)\n\n\nclass TestRice(object):\n def test_rice_zero_b(self):\n # rice distribution should work with b=0, cf gh-2164\n x = [0.2, 1., 5.]\n assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())\n assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())\n assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())\n assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())\n\n q = [0.1, 0.1, 0.5, 0.9]\n assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())\n\n mvsk = stats.rice.stats(0, moments='mvsk')\n assert_(np.isfinite(mvsk).all())\n\n # furthermore, pdf is continuous as b\\to 0\n # rice.pdf(x, b\\to 0) = x exp(-x^2/2) + O(b^2)\n # see e.g. Abramovich & Stegun 9.6.7 & 9.6.10\n b = 1e-8\n assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),\n atol=b, rtol=0)\n\n def test_rice_rvs(self):\n rvs = stats.rice.rvs\n assert_equal(rvs(b=3.).size, 1)\n assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))\n\n\nclass TestErlang(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_erlang_runtimewarning(self):\n # erlang should generate a RuntimeWarning if a non-integer\n # shape parameter is used.\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", RuntimeWarning)\n\n # The non-integer shape parameter 1.3 should trigger a\n # RuntimeWarning\n assert_raises(RuntimeWarning,\n stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)\n\n # Calling the fit method with `f0` set to an integer should\n # *not* trigger a RuntimeWarning. It should return the same\n # values as gamma.fit(...).\n data = [0.5, 1.0, 2.0, 4.0]\n result_erlang = stats.erlang.fit(data, f0=1)\n result_gamma = stats.gamma.fit(data, f0=1)\n assert_allclose(result_erlang, result_gamma, rtol=1e-3)\n\n\nclass TestRayleigh(object):\n # gh-6227\n def test_logpdf(self):\n y = stats.rayleigh.logpdf(50)\n assert_allclose(y, -1246.0879769945718)\n\n def test_logsf(self):\n y = stats.rayleigh.logsf(50)\n assert_allclose(y, -1250)\n\n\nclass TestExponWeib(object):\n\n def test_pdf_logpdf(self):\n # Regression test for gh-3508.\n x = 0.1\n a = 1.0\n c = 100.0\n p = stats.exponweib.pdf(x, a, c)\n logp = stats.exponweib.logpdf(x, a, c)\n # Expected values were computed with mpmath.\n assert_allclose([p, logp],\n [1.0000000000000054e-97, -223.35075402042244])\n\n def test_a_is_1(self):\n # For issue gh-3508.\n # Check that when a=1, the pdf and logpdf methods of exponweib are the\n # same as those of weibull_min.\n x = np.logspace(-4, -1, 4)\n a = 1\n c = 100\n\n p = stats.exponweib.pdf(x, a, c)\n expected = stats.weibull_min.pdf(x, c)\n assert_allclose(p, expected)\n\n logp = stats.exponweib.logpdf(x, a, c)\n expected = stats.weibull_min.logpdf(x, c)\n assert_allclose(logp, expected)\n\n def test_a_is_1_c_is_1(self):\n # When a = 1 and c = 1, the distribution is exponential.\n x = np.logspace(-8, 1, 10)\n a = 1\n c = 1\n\n p = stats.exponweib.pdf(x, a, c)\n expected = stats.expon.pdf(x)\n assert_allclose(p, expected)\n\n logp = stats.exponweib.logpdf(x, a, c)\n expected = stats.expon.logpdf(x)\n assert_allclose(logp, expected)\n\n\nclass TestWeibull(object):\n\n def test_logpdf(self):\n # gh-6217\n y = stats.weibull_min.logpdf(0, 1)\n assert_equal(y, 0)\n\n def test_with_maxima_distrib(self):\n # Tests for weibull_min and weibull_max.\n # The expected values were computed using the symbolic algebra\n # program 'maxima' with the package 'distrib', which has\n # 'pdf_weibull' and 'cdf_weibull'. The mapping between the\n # scipy and maxima functions is as follows:\n # -----------------------------------------------------------------\n # scipy maxima\n # --------------------------------- ------------------------------\n # weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b)\n # weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b))\n # weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b)\n # weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b))\n # weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b)\n # weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b))\n #\n # weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b)\n # weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b))\n # weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b)\n # weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b))\n # weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b)\n # weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b))\n # -----------------------------------------------------------------\n x = 1.5\n a = 2.0\n b = 3.0\n\n # weibull_min\n\n p = stats.weibull_min.pdf(x, a, scale=b)\n assert_allclose(p, np.exp(-0.25)/3)\n\n lp = stats.weibull_min.logpdf(x, a, scale=b)\n assert_allclose(lp, -0.25 - np.log(3))\n\n c = stats.weibull_min.cdf(x, a, scale=b)\n assert_allclose(c, -special.expm1(-0.25))\n\n lc = stats.weibull_min.logcdf(x, a, scale=b)\n assert_allclose(lc, np.log(-special.expm1(-0.25)))\n\n s = stats.weibull_min.sf(x, a, scale=b)\n assert_allclose(s, np.exp(-0.25))\n\n ls = stats.weibull_min.logsf(x, a, scale=b)\n assert_allclose(ls, -0.25)\n\n # Also test using a large value x, for which computing the survival\n # function using the CDF would result in 0.\n s = stats.weibull_min.sf(30, 2, scale=3)\n assert_allclose(s, np.exp(-100))\n\n ls = stats.weibull_min.logsf(30, 2, scale=3)\n assert_allclose(ls, -100)\n\n # weibull_max\n x = -1.5\n\n p = stats.weibull_max.pdf(x, a, scale=b)\n assert_allclose(p, np.exp(-0.25)/3)\n\n lp = stats.weibull_max.logpdf(x, a, scale=b)\n assert_allclose(lp, -0.25 - np.log(3))\n\n c = stats.weibull_max.cdf(x, a, scale=b)\n assert_allclose(c, np.exp(-0.25))\n\n lc = stats.weibull_max.logcdf(x, a, scale=b)\n assert_allclose(lc, -0.25)\n\n s = stats.weibull_max.sf(x, a, scale=b)\n assert_allclose(s, -special.expm1(-0.25))\n\n ls = stats.weibull_max.logsf(x, a, scale=b)\n assert_allclose(ls, np.log(-special.expm1(-0.25)))\n\n # Also test using a value of x close to 0, for which computing the\n # survival function using the CDF would result in 0.\n s = stats.weibull_max.sf(-1e-9, 2, scale=3)\n assert_allclose(s, -special.expm1(-1/9000000000000000000))\n\n ls = stats.weibull_max.logsf(-1e-9, 2, scale=3)\n assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000)))\n\n\nclass TestRdist(object):\n @pytest.mark.slow\n def test_rdist_cdf_gh1285(self):\n # check workaround in rdist._cdf for issue gh-1285.\n distfn = stats.rdist\n values = [0.001, 0.5, 0.999]\n assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),\n values, decimal=5)\n\n\nclass TestTrapz(object):\n def test_reduces_to_triang(self):\n modes = [0, 0.3, 0.5, 1]\n for mode in modes:\n x = [0, mode, 1]\n assert_almost_equal(stats.trapz.pdf(x, mode, mode),\n stats.triang.pdf(x, mode))\n assert_almost_equal(stats.trapz.cdf(x, mode, mode),\n stats.triang.cdf(x, mode))\n\n def test_reduces_to_uniform(self):\n x = np.linspace(0, 1, 10)\n assert_almost_equal(stats.trapz.pdf(x, 0, 1), stats.uniform.pdf(x))\n assert_almost_equal(stats.trapz.cdf(x, 0, 1), stats.uniform.cdf(x))\n\n def test_cases(self):\n # edge cases\n assert_almost_equal(stats.trapz.pdf(0, 0, 0), 2)\n assert_almost_equal(stats.trapz.pdf(1, 1, 1), 2)\n assert_almost_equal(stats.trapz.pdf(0.5, 0, 0.8),\n 1.11111111111111111)\n assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 1.0),\n 1.11111111111111111)\n\n # straightforward case\n assert_almost_equal(stats.trapz.pdf(0.1, 0.2, 0.8), 0.625)\n assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 0.8), 1.25)\n assert_almost_equal(stats.trapz.pdf(0.9, 0.2, 0.8), 0.625)\n\n assert_almost_equal(stats.trapz.cdf(0.1, 0.2, 0.8), 0.03125)\n assert_almost_equal(stats.trapz.cdf(0.2, 0.2, 0.8), 0.125)\n assert_almost_equal(stats.trapz.cdf(0.5, 0.2, 0.8), 0.5)\n assert_almost_equal(stats.trapz.cdf(0.9, 0.2, 0.8), 0.96875)\n assert_almost_equal(stats.trapz.cdf(1.0, 0.2, 0.8), 1.0)\n\n def test_trapz_vect(self):\n # test that array-valued shapes and arguments are handled\n c = np.array([0.1, 0.2, 0.3])\n d = np.array([0.5, 0.6])[:, None]\n x = np.array([0.15, 0.25, 0.9])\n v = stats.trapz.pdf(x, c, d)\n\n cc, dd, xx = np.broadcast_arrays(c, d, x)\n\n res = np.empty(xx.size, dtype=xx.dtype)\n ind = np.arange(xx.size)\n for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()):\n res[i] = stats.trapz.pdf(x1, c1, d1)\n\n assert_allclose(v, res.reshape(v.shape), atol=1e-15)\n\n\nclass TestTriang(object):\n def test_edge_cases(self):\n with np.errstate(all='raise'):\n assert_equal(stats.triang.pdf(0, 0), 2.)\n assert_equal(stats.triang.pdf(0.5, 0), 1.)\n assert_equal(stats.triang.pdf(1, 0), 0.)\n\n assert_equal(stats.triang.pdf(0, 1), 0)\n assert_equal(stats.triang.pdf(0.5, 1), 1.)\n assert_equal(stats.triang.pdf(1, 1), 2)\n\n assert_equal(stats.triang.cdf(0., 0.), 0.)\n assert_equal(stats.triang.cdf(0.5, 0.), 0.75)\n assert_equal(stats.triang.cdf(1.0, 0.), 1.0)\n\n assert_equal(stats.triang.cdf(0., 1.), 0.)\n assert_equal(stats.triang.cdf(0.5, 1.), 0.25)\n assert_equal(stats.triang.cdf(1., 1.), 1)\n\n\ndef test_540_567():\n # test for nan returned in tickets 540, 567\n assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,\n decimal=10, err_msg='test_540_567')\n assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,\n decimal=10, err_msg='test_540_567')\n assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,\n scale=0.204423758009),\n 0.98353464004309321,\n decimal=10, err_msg='test_540_567')\n\n\ndef test_regression_ticket_1316():\n # The following was raising an exception, because _construct_default_doc()\n # did not handle the default keyword extradoc=None. See ticket #1316.\n g = stats._continuous_distns.gamma_gen(name='gamma')\n\n\ndef test_regression_ticket_1326():\n # adjust to avoid nan with 0*log(0)\n assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)\n\n\ndef test_regression_tukey_lambda():\n # Make sure that Tukey-Lambda distribution correctly handles\n # non-positive lambdas.\n x = np.linspace(-5.0, 5.0, 101)\n\n olderr = np.seterr(divide='ignore')\n try:\n for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:\n p = stats.tukeylambda.pdf(x, lam)\n assert_((p != 0.0).all())\n assert_(~np.isnan(p).all())\n\n lam = np.array([[-1.0], [0.0], [2.0]])\n p = stats.tukeylambda.pdf(x, lam)\n finally:\n np.seterr(**olderr)\n\n assert_(~np.isnan(p).all())\n assert_((p[0] != 0.0).all())\n assert_((p[1] != 0.0).all())\n assert_((p[2] != 0.0).any())\n assert_((p[2] == 0.0).any())\n\n\[email protected](DOCSTRINGS_STRIPPED, reason=\"docstrings stripped\")\ndef test_regression_ticket_1421():\n assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)\n assert_('pmf(x,' in stats.poisson.__doc__)\n\n\ndef test_nan_arguments_gh_issue_1362():\n with np.errstate(invalid='ignore'):\n assert_(np.isnan(stats.t.logcdf(1, np.nan)))\n assert_(np.isnan(stats.t.cdf(1, np.nan)))\n assert_(np.isnan(stats.t.logsf(1, np.nan)))\n assert_(np.isnan(stats.t.sf(1, np.nan)))\n assert_(np.isnan(stats.t.pdf(1, np.nan)))\n assert_(np.isnan(stats.t.logpdf(1, np.nan)))\n assert_(np.isnan(stats.t.ppf(1, np.nan)))\n assert_(np.isnan(stats.t.isf(1, np.nan)))\n\n assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))\n assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))\n\n\ndef test_frozen_fit_ticket_1536():\n np.random.seed(5678)\n true = np.array([0.25, 0., 0.5])\n x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)\n\n olderr = np.seterr(divide='ignore')\n try:\n params = np.array(stats.lognorm.fit(x, floc=0.))\n finally:\n np.seterr(**olderr)\n\n assert_almost_equal(params, true, decimal=2)\n\n params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))\n assert_almost_equal(params, true, decimal=2)\n\n params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))\n assert_almost_equal(params, true, decimal=2)\n\n params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))\n assert_almost_equal(params, true, decimal=2)\n\n np.random.seed(5678)\n loc = 1\n floc = 0.9\n x = stats.norm.rvs(loc, 2., size=100)\n params = np.array(stats.norm.fit(x, floc=floc))\n expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])\n assert_almost_equal(params, expected, decimal=4)\n\n\ndef test_regression_ticket_1530():\n # Check the starting value works for Cauchy distribution fit.\n np.random.seed(654321)\n rvs = stats.cauchy.rvs(size=100)\n params = stats.cauchy.fit(rvs)\n expected = (0.045, 1.142)\n assert_almost_equal(params, expected, decimal=1)\n\n\ndef test_gh_pr_4806():\n # Check starting values for Cauchy distribution fit.\n np.random.seed(1234)\n x = np.random.randn(42)\n for offset in 10000.0, 1222333444.0:\n loc, scale = stats.cauchy.fit(x + offset)\n assert_allclose(loc, offset, atol=1.0)\n assert_allclose(scale, 0.6, atol=1.0)\n\n\ndef test_tukeylambda_stats_ticket_1545():\n # Some test for the variance and kurtosis of the Tukey Lambda distr.\n # See test_tukeylamdba_stats.py for more tests.\n\n mv = stats.tukeylambda.stats(0, moments='mvsk')\n # Known exact values:\n expected = [0, np.pi**2/3, 0, 1.2]\n assert_almost_equal(mv, expected, decimal=10)\n\n mv = stats.tukeylambda.stats(3.13, moments='mvsk')\n # 'expected' computed with mpmath.\n expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]\n assert_almost_equal(mv, expected, decimal=10)\n\n mv = stats.tukeylambda.stats(0.14, moments='mvsk')\n # 'expected' computed with mpmath.\n expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]\n assert_almost_equal(mv, expected, decimal=10)\n\n\ndef test_poisson_logpmf_ticket_1436():\n assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))\n\n\ndef test_powerlaw_stats():\n \"\"\"Test the powerlaw stats function.\n\n This unit test is also a regression test for ticket 1548.\n\n The exact values are:\n mean:\n mu = a / (a + 1)\n variance:\n sigma**2 = a / ((a + 2) * (a + 1) ** 2)\n skewness:\n One formula (see https://en.wikipedia.org/wiki/Skewness) is\n gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3\n A short calculation shows that E[X**k] is a / (a + k), so gamma_1\n can be implemented as\n n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3\n d = sqrt(a/((a+2)*(a+1)**2)) ** 3\n gamma_1 = n/d\n Either by simplifying, or by a direct calculation of mu_3 / sigma**3,\n one gets the more concise formula:\n gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)\n kurtosis: (See https://en.wikipedia.org/wiki/Kurtosis)\n The excess kurtosis is\n gamma_2 = mu_4 / sigma**4 - 3\n A bit of calculus and algebra (sympy helps) shows that\n mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))\n so\n gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3\n which can be rearranged to\n gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))\n \"\"\"\n cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),\n (2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]\n for a, exact_mvsk in cases:\n mvsk = stats.powerlaw.stats(a, moments=\"mvsk\")\n assert_array_almost_equal(mvsk, exact_mvsk)\n\n\ndef test_powerlaw_edge():\n # Regression test for gh-3986.\n p = stats.powerlaw.logpdf(0, 1)\n assert_equal(p, 0.0)\n\n\ndef test_exponpow_edge():\n # Regression test for gh-3982.\n p = stats.exponpow.logpdf(0, 1)\n assert_equal(p, 0.0)\n\n # Check pdf and logpdf at x = 0 for other values of b.\n p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])\n assert_equal(p, [np.inf, 1.0, 0.0])\n p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])\n assert_equal(p, [np.inf, 0.0, -np.inf])\n\n\ndef test_gengamma_edge():\n # Regression test for gh-3985.\n p = stats.gengamma.pdf(0, 1, 1)\n assert_equal(p, 1.0)\n\n # Regression tests for gh-4724.\n p = stats.gengamma._munp(-2, 200, 1.)\n assert_almost_equal(p, 1./199/198)\n\n p = stats.gengamma._munp(-2, 10, 1.)\n assert_almost_equal(p, 1./9/8)\n\n\ndef test_ksone_fit_freeze():\n # Regression test for ticket #1638.\n d = np.array(\n [-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,\n -0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,\n 0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,\n 0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,\n 0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,\n 0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,\n -0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,\n -0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,\n -0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,\n -0.06037974, 0.37670779, -0.21684405])\n\n try:\n olderr = np.seterr(invalid='ignore')\n with suppress_warnings() as sup:\n sup.filter(IntegrationWarning,\n \"The maximum number of subdivisions .50. has been \"\n \"achieved.\")\n sup.filter(RuntimeWarning,\n \"floating point number truncated to an integer\")\n stats.ksone.fit(d)\n finally:\n np.seterr(**olderr)\n\n\ndef test_norm_logcdf():\n # Test precision of the logcdf of the normal distribution.\n # This precision was enhanced in ticket 1614.\n x = -np.asarray(list(range(0, 120, 4)))\n # Values from R\n expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,\n -131.69539607, -203.91715537, -292.09872100, -396.25241451,\n -516.38564863, -652.50322759, -804.60844201, -972.70364403,\n -1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,\n -2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,\n -3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,\n -4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,\n -6277.63751711, -6733.67260303]\n\n assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)\n\n # also test the complex-valued code path\n assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8)\n\n # test the accuracy: d(logcdf)/dx = pdf / cdf \\equiv exp(logpdf - logcdf)\n deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag\n deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x))\n assert_allclose(deriv, deriv_expected, atol=1e-10)\n\n\ndef test_levy_cdf_ppf():\n # Test levy.cdf, including small arguments.\n x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])\n\n # Expected values were calculated separately with mpmath.\n # E.g.\n # >>> mpmath.mp.dps = 100\n # >>> x = mpmath.mp.mpf('0.01')\n # >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))\n expected = np.array([0.9747728793699604,\n 0.3173105078629141,\n 0.1572992070502851,\n 0.0015654022580025495,\n 1.523970604832105e-23,\n 1.795832784800726e-219])\n\n y = stats.levy.cdf(x)\n assert_allclose(y, expected, rtol=1e-10)\n\n # ppf(expected) should get us back to x.\n xx = stats.levy.ppf(expected)\n assert_allclose(xx, x, rtol=1e-13)\n\n\ndef test_hypergeom_interval_1802():\n # these two had endless loops\n assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),\n (152.0, 197.0))\n assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),\n (152.0, 197.0))\n # this was working also before\n assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),\n (153.0, 196.0))\n\n # degenerate case .a == .b\n assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)\n assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)\n\n\ndef test_distribution_too_many_args():\n np.random.seed(1234)\n\n # Check that a TypeError is raised when too many args are given to a method\n # Regression test for ticket 1815.\n x = np.linspace(0.1, 0.7, num=5)\n assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)\n assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)\n assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)\n assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)\n assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)\n\n # These should not give errors\n stats.gamma.pdf(x, 2, 3) # loc=3\n stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4\n stats.gamma.stats(2., 3)\n stats.gamma.stats(2., 3, 4)\n stats.gamma.stats(2., 3, 4, 'mv')\n stats.gamma.rvs(2., 3, 4, 5)\n stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)\n\n # Also for a discrete distribution\n stats.geom.pmf(x, 2, loc=3) # no error, loc=3\n assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)\n assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)\n\n # And for distributions with 0, 2 and 3 args respectively\n assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)\n assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)\n assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)\n assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)\n assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)\n stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale\n\n\ndef test_ncx2_tails_ticket_955():\n # Trac #955 -- check that the cdf computed by special functions\n # matches the integrated pdf\n a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)\n b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)\n assert_allclose(a, b, rtol=1e-3, atol=0)\n\n\ndef test_ncx2_tails_pdf():\n # ncx2.pdf does not return nans in extreme tails(example from gh-1577)\n # NB: this is to check that nan_to_num is not needed in ncx2.pdf\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"divide by zero encountered in log\")\n assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)\n logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)\n\n assert_(np.isneginf(logval).all())\n\n\ndef test_foldnorm_zero():\n # Parameter value c=0 was not enabled, see gh-2399.\n rv = stats.foldnorm(0, scale=1)\n assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan\n\n\ndef test_stats_shapes_argcheck():\n # stats method was failing for vector shapes if some of the values\n # were outside of the allowed range, see gh-2678\n mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`\n mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)\n mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)\n assert_equal(mv2_augmented, mv3)\n\n # -1 is not a legal shape parameter\n mv3 = stats.lognorm.stats([2, 2.4, -1])\n mv2 = stats.lognorm.stats([2, 2.4])\n mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)\n assert_equal(mv2_augmented, mv3)\n\n # FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.\n # stats method with multiple shape parameters is not properly vectorized\n # anyway, so some distributions may or may not fail.\n\n\n# Test subclassing distributions w/ explicit shapes\n\nclass _distr_gen(stats.rv_continuous):\n def _pdf(self, x, a):\n return 42\n\n\nclass _distr2_gen(stats.rv_continuous):\n def _cdf(self, x, a):\n return 42 * a + x\n\n\nclass _distr3_gen(stats.rv_continuous):\n def _pdf(self, x, a, b):\n return a + b\n\n def _cdf(self, x, a):\n # Different # of shape params from _pdf, to be able to check that\n # inspection catches the inconsistency.\"\"\"\n return 42 * a + x\n\n\nclass _distr6_gen(stats.rv_continuous):\n # Two shape parameters (both _pdf and _cdf defined, consistent shapes.)\n def _pdf(self, x, a, b):\n return a*x + b\n\n def _cdf(self, x, a, b):\n return 42 * a + x\n\n\nclass TestSubclassingExplicitShapes(object):\n # Construct a distribution w/ explicit shapes parameter and test it.\n\n def test_correct_shapes(self):\n dummy_distr = _distr_gen(name='dummy', shapes='a')\n assert_equal(dummy_distr.pdf(1, a=1), 42)\n\n def test_wrong_shapes_1(self):\n dummy_distr = _distr_gen(name='dummy', shapes='A')\n assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))\n\n def test_wrong_shapes_2(self):\n dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')\n dct = dict(a=1, b=2, c=3)\n assert_raises(TypeError, dummy_distr.pdf, 1, **dct)\n\n def test_shapes_string(self):\n # shapes must be a string\n dct = dict(name='dummy', shapes=42)\n assert_raises(TypeError, _distr_gen, **dct)\n\n def test_shapes_identifiers_1(self):\n # shapes must be a comma-separated list of valid python identifiers\n dct = dict(name='dummy', shapes='(!)')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_identifiers_2(self):\n dct = dict(name='dummy', shapes='4chan')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_identifiers_3(self):\n dct = dict(name='dummy', shapes='m(fti)')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_identifiers_nodefaults(self):\n dct = dict(name='dummy', shapes='a=2')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_args(self):\n dct = dict(name='dummy', shapes='*args')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_kwargs(self):\n dct = dict(name='dummy', shapes='**kwargs')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_keywords(self):\n # python keywords cannot be used for shape parameters\n dct = dict(name='dummy', shapes='a, b, c, lambda')\n assert_raises(SyntaxError, _distr_gen, **dct)\n\n def test_shapes_signature(self):\n # test explicit shapes which agree w/ the signature of _pdf\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, a):\n return stats.norm._pdf(x) * a\n\n dist = _dist_gen(shapes='a')\n assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)\n\n def test_shapes_signature_inconsistent(self):\n # test explicit shapes which do not agree w/ the signature of _pdf\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, a):\n return stats.norm._pdf(x) * a\n\n dist = _dist_gen(shapes='a, b')\n assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))\n\n def test_star_args(self):\n # test _pdf with only starargs\n # NB: **kwargs of pdf will never reach _pdf\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, *args):\n extra_kwarg = args[0]\n return stats.norm._pdf(x) * extra_kwarg\n\n dist = _dist_gen(shapes='extra_kwarg')\n assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)\n assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)\n assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))\n\n def test_star_args_2(self):\n # test _pdf with named & starargs\n # NB: **kwargs of pdf will never reach _pdf\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, offset, *args):\n extra_kwarg = args[0]\n return stats.norm._pdf(x) * extra_kwarg + offset\n\n dist = _dist_gen(shapes='offset, extra_kwarg')\n assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),\n stats.norm.pdf(0.5)*33 + 111)\n assert_equal(dist.pdf(0.5, 111, 33),\n stats.norm.pdf(0.5)*33 + 111)\n\n def test_extra_kwarg(self):\n # **kwargs to _pdf are ignored.\n # this is a limitation of the framework (_pdf(x, *goodargs))\n class _distr_gen(stats.rv_continuous):\n def _pdf(self, x, *args, **kwargs):\n # _pdf should handle *args, **kwargs itself. Here \"handling\"\n # is ignoring *args and looking for ``extra_kwarg`` and using\n # that.\n extra_kwarg = kwargs.pop('extra_kwarg', 1)\n return stats.norm._pdf(x) * extra_kwarg\n\n dist = _distr_gen(shapes='extra_kwarg')\n assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))\n\n def shapes_empty_string(self):\n # shapes='' is equivalent to shapes=None\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x):\n return stats.norm.pdf(x)\n\n dist = _dist_gen(shapes='')\n assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))\n\n\nclass TestSubclassingNoShapes(object):\n # Construct a distribution w/o explicit shapes parameter and test it.\n\n def test_only__pdf(self):\n dummy_distr = _distr_gen(name='dummy')\n assert_equal(dummy_distr.pdf(1, a=1), 42)\n\n def test_only__cdf(self):\n # _pdf is determined from _cdf by taking numerical derivative\n dummy_distr = _distr2_gen(name='dummy')\n assert_almost_equal(dummy_distr.pdf(1, a=1), 1)\n\n @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason=\"docstring stripped\")\n def test_signature_inspection(self):\n # check that _pdf signature inspection works correctly, and is used in\n # the class docstring\n dummy_distr = _distr_gen(name='dummy')\n assert_equal(dummy_distr.numargs, 1)\n assert_equal(dummy_distr.shapes, 'a')\n res = re.findall(r'logpdf\\(x, a, loc=0, scale=1\\)',\n dummy_distr.__doc__)\n assert_(len(res) == 1)\n\n @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason=\"docstring stripped\")\n def test_signature_inspection_2args(self):\n # same for 2 shape params and both _pdf and _cdf defined\n dummy_distr = _distr6_gen(name='dummy')\n assert_equal(dummy_distr.numargs, 2)\n assert_equal(dummy_distr.shapes, 'a, b')\n res = re.findall(r'logpdf\\(x, a, b, loc=0, scale=1\\)',\n dummy_distr.__doc__)\n assert_(len(res) == 1)\n\n def test_signature_inspection_2args_incorrect_shapes(self):\n # both _pdf and _cdf defined, but shapes are inconsistent: raises\n assert_raises(TypeError, _distr3_gen, name='dummy')\n\n def test_defaults_raise(self):\n # default arguments should raise\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, a=42):\n return 42\n assert_raises(TypeError, _dist_gen, **dict(name='dummy'))\n\n def test_starargs_raise(self):\n # without explicit shapes, *args are not allowed\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, a, *args):\n return 42\n assert_raises(TypeError, _dist_gen, **dict(name='dummy'))\n\n def test_kwargs_raise(self):\n # without explicit shapes, **kwargs are not allowed\n class _dist_gen(stats.rv_continuous):\n def _pdf(self, x, a, **kwargs):\n return 42\n assert_raises(TypeError, _dist_gen, **dict(name='dummy'))\n\n\[email protected](DOCSTRINGS_STRIPPED, reason=\"docstring stripped\")\ndef test_docstrings():\n badones = [r',\\s*,', r'\\(\\s*,', r'^\\s*:']\n for distname in stats.__all__:\n dist = getattr(stats, distname)\n if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):\n for regex in badones:\n assert_(re.search(regex, dist.__doc__) is None)\n\n\ndef test_infinite_input():\n assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)\n assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)\n\n\ndef test_lomax_accuracy():\n # regression test for gh-4033\n p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)\n assert_allclose(p, 1e-100)\n\n\ndef test_gompertz_accuracy():\n # Regression test for gh-4031\n p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)\n assert_allclose(p, 1e-100)\n\n\ndef test_truncexpon_accuracy():\n # regression test for gh-4035\n p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)\n assert_allclose(p, 1e-100)\n\n\ndef test_rayleigh_accuracy():\n # regression test for gh-4034\n p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)\n assert_almost_equal(p, 9.0, decimal=15)\n\n\ndef test_genextreme_give_no_warnings():\n \"\"\"regression test for gh-6219\"\"\"\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n\n p = stats.genextreme.cdf(.5, 0)\n p = stats.genextreme.pdf(.5, 0)\n p = stats.genextreme.ppf(.5, 0)\n p = stats.genextreme.logpdf(-np.inf, 0.0)\n number_of_warnings_thrown = len(w)\n assert_equal(number_of_warnings_thrown, 0)\n\n\ndef test_genextreme_entropy():\n # regression test for gh-5181\n euler_gamma = 0.5772156649015329\n\n h = stats.genextreme.entropy(-1.0)\n assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)\n\n h = stats.genextreme.entropy(0)\n assert_allclose(h, euler_gamma + 1, rtol=1e-14)\n\n h = stats.genextreme.entropy(1.0)\n assert_equal(h, 1)\n\n h = stats.genextreme.entropy(-2.0, scale=10)\n assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)\n\n h = stats.genextreme.entropy(10)\n assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)\n\n h = stats.genextreme.entropy(-10)\n assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)\n\n\ndef test_genextreme_sf_isf():\n # Expected values were computed using mpmath:\n #\n # import mpmath\n #\n # def mp_genextreme_sf(x, xi, mu=0, sigma=1):\n # # Formula from wikipedia, which has a sign convention for xi that\n # # is the opposite of scipy's shape parameter.\n # if xi != 0:\n # t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)\n # else:\n # t = mpmath.exp(-(x - mu)/sigma)\n # return 1 - mpmath.exp(-t)\n #\n # >>> mpmath.mp.dps = 1000\n # >>> s = mp_genextreme_sf(mpmath.mp.mpf(\"1e8\"), mpmath.mp.mpf(\"0.125\"))\n # >>> float(s)\n # 1.6777205262585625e-57\n # >>> s = mp_genextreme_sf(mpmath.mp.mpf(\"7.98\"), mpmath.mp.mpf(\"-0.125\"))\n # >>> float(s)\n # 1.52587890625e-21\n # >>> s = mp_genextreme_sf(mpmath.mp.mpf(\"7.98\"), mpmath.mp.mpf(\"0\"))\n # >>> float(s)\n # 0.00034218086528426593\n\n x = 1e8\n s = stats.genextreme.sf(x, -0.125)\n assert_allclose(s, 1.6777205262585625e-57)\n x2 = stats.genextreme.isf(s, -0.125)\n assert_allclose(x2, x)\n\n x = 7.98\n s = stats.genextreme.sf(x, 0.125)\n assert_allclose(s, 1.52587890625e-21)\n x2 = stats.genextreme.isf(s, 0.125)\n assert_allclose(x2, x)\n\n x = 7.98\n s = stats.genextreme.sf(x, 0)\n assert_allclose(s, 0.00034218086528426593)\n x2 = stats.genextreme.isf(s, 0)\n assert_allclose(x2, x)\n\n\ndef test_burr12_ppf_small_arg():\n prob = 1e-16\n quantile = stats.burr12.ppf(prob, 2, 3)\n # The expected quantile was computed using mpmath:\n # >>> import mpmath\n # >>> mpmath.mp.dps = 100\n # >>> prob = mpmath.mpf('1e-16')\n # >>> c = mpmath.mpf(2)\n # >>> d = mpmath.mpf(3)\n # >>> float(((1-prob)**(-1/d) - 1)**(1/c))\n # 5.7735026918962575e-09\n assert_allclose(quantile, 5.7735026918962575e-09)\n\n\ndef test_crystalball_function():\n \"\"\"\n All values are calculated using the independent implementation of the\n ROOT framework (see https://root.cern.ch/).\n Corresponding ROOT code is given in the comments.\n \"\"\"\n X = np.linspace(-5.0, 5.0, 21)[:-1]\n\n # for(float x = -5.0; x < 5.0; x+=0.5)\n # std::cout << ROOT::Math::crystalball_pdf(x, 1.0, 2.0, 1.0) << \", \";\n calculated = stats.crystalball.pdf(X, beta=1.0, m=2.0)\n expected = np.array([0.0202867, 0.0241428, 0.0292128, 0.0360652, 0.045645,\n 0.059618, 0.0811467, 0.116851, 0.18258, 0.265652,\n 0.301023, 0.265652, 0.18258, 0.097728, 0.0407391,\n 0.013226, 0.00334407, 0.000658486, 0.000100982,\n 1.20606e-05])\n assert_allclose(expected, calculated, rtol=0.001)\n\n # for(float x = -5.0; x < 5.0; x+=0.5)\n # std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 1.0) << \", \";\n calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0)\n expected = np.array([0.0019648, 0.00279754, 0.00417592, 0.00663121,\n 0.0114587, 0.0223803, 0.0530497, 0.12726, 0.237752,\n 0.345928, 0.391987, 0.345928, 0.237752, 0.12726,\n 0.0530497, 0.0172227, 0.00435458, 0.000857469,\n 0.000131497, 1.57051e-05])\n assert_allclose(expected, calculated, rtol=0.001)\n\n # for(float x = -5.0; x < 5.0; x+=0.5) {\n # std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 2.0, 0.5);\n # std::cout << \", \";\n # }\n calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)\n expected = np.array([0.00785921, 0.0111902, 0.0167037, 0.0265249,\n 0.0423866, 0.0636298, 0.0897324, 0.118876, 0.147944,\n 0.172964, 0.189964, 0.195994, 0.189964, 0.172964,\n 0.147944, 0.118876, 0.0897324, 0.0636298, 0.0423866,\n 0.0265249])\n assert_allclose(expected, calculated, rtol=0.001)\n\n # for(float x = -5.0; x < 5.0; x+=0.5)\n # std::cout << ROOT::Math::crystalball_cdf(x, 1.0, 2.0, 1.0) << \", \";\n calculated = stats.crystalball.cdf(X, beta=1.0, m=2.0)\n expected = np.array([0.12172, 0.132785, 0.146064, 0.162293, 0.18258,\n 0.208663, 0.24344, 0.292128, 0.36516, 0.478254,\n 0.622723, 0.767192, 0.880286, 0.94959, 0.982834,\n 0.995314, 0.998981, 0.999824, 0.999976, 0.999997])\n assert_allclose(expected, calculated, rtol=0.001)\n\n # for(float x = -5.0; x < 5.0; x+=0.5)\n # std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 1.0) << \", \";\n calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0)\n expected = np.array([0.00442081, 0.00559509, 0.00730787, 0.00994682,\n 0.0143234, 0.0223803, 0.0397873, 0.0830763, 0.173323,\n 0.320592, 0.508717, 0.696841, 0.844111, 0.934357,\n 0.977646, 0.993899, 0.998674, 0.999771, 0.999969,\n 0.999997])\n assert_allclose(expected, calculated, rtol=0.001)\n\n # for(float x = -5.0; x < 5.0; x+=0.5) {\n # std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 2.0, 0.5);\n # std::cout << \", \";\n # }\n calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)\n expected = np.array([0.0176832, 0.0223803, 0.0292315, 0.0397873, 0.0567945,\n 0.0830763, 0.121242, 0.173323, 0.24011, 0.320592,\n 0.411731, 0.508717, 0.605702, 0.696841, 0.777324,\n 0.844111, 0.896192, 0.934357, 0.960639, 0.977646])\n assert_allclose(expected, calculated, rtol=0.001)\n\n\ndef test_crystalball_function_moments():\n \"\"\"\n All values are calculated using the pdf formula and the integrate function\n of Mathematica\n \"\"\"\n # The Last two (alpha, n) pairs test the special case n == alpha**2\n beta = np.array([2.0, 1.0, 3.0, 2.0, 3.0])\n m = np.array([3.0, 3.0, 2.0, 4.0, 9.0])\n\n # The distribution should be correctly normalised\n expected_0th_moment = np.array([1.0, 1.0, 1.0, 1.0, 1.0])\n calculated_0th_moment = stats.crystalball._munp(0, beta, m)\n assert_allclose(expected_0th_moment, calculated_0th_moment, rtol=0.001)\n\n # calculated using wolframalpha.com\n # e.g. for beta = 2 and m = 3 we calculate the norm like this:\n # integrate exp(-x^2/2) from -2 to infinity +\n # integrate (3/2)^3*exp(-2^2/2)*(3/2-2-x)^(-3) from -infinity to -2\n norm = np.array([2.5511, 3.01873, 2.51065, 2.53983, 2.507410455])\n\n a = np.array([-0.21992, -3.03265, np.inf, -0.135335, -0.003174])\n expected_1th_moment = a / norm\n calculated_1th_moment = stats.crystalball._munp(1, beta, m)\n assert_allclose(expected_1th_moment, calculated_1th_moment, rtol=0.001)\n\n a = np.array([np.inf, np.inf, np.inf, 3.2616, 2.519908])\n expected_2th_moment = a / norm\n calculated_2th_moment = stats.crystalball._munp(2, beta, m)\n assert_allclose(expected_2th_moment, calculated_2th_moment, rtol=0.001)\n\n a = np.array([np.inf, np.inf, np.inf, np.inf, -0.0577668])\n expected_3th_moment = a / norm\n calculated_3th_moment = stats.crystalball._munp(3, beta, m)\n assert_allclose(expected_3th_moment, calculated_3th_moment, rtol=0.001)\n\n a = np.array([np.inf, np.inf, np.inf, np.inf, 7.78468])\n expected_4th_moment = a / norm\n calculated_4th_moment = stats.crystalball._munp(4, beta, m)\n assert_allclose(expected_4th_moment, calculated_4th_moment, rtol=0.001)\n\n a = np.array([np.inf, np.inf, np.inf, np.inf, -1.31086])\n expected_5th_moment = a / norm\n calculated_5th_moment = stats.crystalball._munp(5, beta, m)\n assert_allclose(expected_5th_moment, calculated_5th_moment, rtol=0.001)\n\n\ndef test_argus_function():\n # There is no usable reference implementation.\n # (RootFit implementation returns unreasonable results which are not\n # normalized correctly.)\n # Instead we do some tests if the distribution behaves as expected for\n # different shapes and scales.\n for i in range(1, 10):\n for j in range(1, 10):\n assert_equal(stats.argus.pdf(i + 0.001, chi=j, scale=i), 0.0)\n assert_(stats.argus.pdf(i - 0.001, chi=j, scale=i) > 0.0)\n assert_equal(stats.argus.pdf(-0.001, chi=j, scale=i), 0.0)\n assert_(stats.argus.pdf(+0.001, chi=j, scale=i) > 0.0)\n\n for i in range(1, 10):\n assert_equal(stats.argus.cdf(1.0, chi=i), 1.0)\n assert_equal(stats.argus.cdf(1.0, chi=i),\n 1.0 - stats.argus.sf(1.0, chi=i))\n\n\nclass TestHistogram(object):\n def setup_method(self):\n np.random.seed(1234)\n\n # We have 8 bins\n # [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9)\n # But actually np.histogram will put the last 9 also in the [8,9) bin!\n # Therefore there is a slight difference below for the last bin, from\n # what you might have expected.\n histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5,\n 6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)\n self.template = stats.rv_histogram(histogram)\n\n data = stats.norm.rvs(loc=1.0, scale=2.5, size=10000, random_state=123)\n norm_histogram = np.histogram(data, bins=50)\n self.norm_template = stats.rv_histogram(norm_histogram)\n\n def test_pdf(self):\n values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,\n 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])\n pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0,\n 2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0,\n 4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0,\n 4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0,\n 3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0])\n assert_allclose(self.template.pdf(values), pdf_values)\n\n # Test explicitly the corner cases:\n # As stated above the pdf in the bin [8,9) is greater than\n # one would naively expect because np.histogram putted the 9\n # into the [8,9) bin.\n assert_almost_equal(self.template.pdf(8.0), 3.0/25.0)\n assert_almost_equal(self.template.pdf(8.5), 3.0/25.0)\n # 9 is outside our defined bins [8,9) hence the pdf is already 0\n # for a continuous distribution this is fine, because a single value\n # does not have a finite probability!\n assert_almost_equal(self.template.pdf(9.0), 0.0/25.0)\n assert_almost_equal(self.template.pdf(10.0), 0.0/25.0)\n\n x = np.linspace(-2, 2, 10)\n assert_allclose(self.norm_template.pdf(x),\n stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1)\n\n def test_cdf_ppf(self):\n values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,\n 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])\n cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0,\n 1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0,\n 6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0,\n 15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0,\n 22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0])\n assert_allclose(self.template.cdf(values), cdf_values)\n # First three and last two values in cdf_value are not unique\n assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1])\n\n # Test of cdf and ppf are inverse functions\n x = np.linspace(1.0, 9.0, 100)\n assert_allclose(self.template.ppf(self.template.cdf(x)), x)\n x = np.linspace(0.0, 1.0, 100)\n assert_allclose(self.template.cdf(self.template.ppf(x)), x)\n\n x = np.linspace(-2, 2, 10)\n assert_allclose(self.norm_template.cdf(x),\n stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1)\n\n def test_rvs(self):\n N = 10000\n sample = self.template.rvs(size=N, random_state=123)\n assert_equal(np.sum(sample < 1.0), 0.0)\n assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2)\n assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2)\n assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1)\n assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1)\n assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1)\n assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1)\n assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)\n assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)\n assert_equal(np.sum(sample > 9.0), 0.0)\n\n def test_munp(self):\n for n in range(4):\n assert_allclose(self.norm_template._munp(n),\n stats.norm._munp(n, 1.0, 2.5), rtol=0.05)\n\n def test_entropy(self):\n assert_allclose(self.norm_template.entropy(),\n stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05)\n"
] |
[
[
"scipy.stats.norm.ppf",
"scipy.stats.bernoulli.logpmf",
"numpy.sqrt",
"scipy.stats.crystalball.cdf",
"scipy.stats.hypergeom.logsf",
"numpy.all",
"scipy.stats.genpareto.ppf",
"scipy.stats.gengamma.pdf",
"scipy.stats.weibull_max.logsf",
"scipy.stats.norm.expect",
"scipy.stats.logser.pmf",
"numpy.where",
"numpy.exp",
"scipy.stats.geom.sf",
"scipy.stats.t.pdf",
"scipy.stats.halfnorm.pdf",
"scipy.stats.rayleigh.logpdf",
"scipy.stats.gamma.fit",
"scipy.stats.hypergeom.interval",
"scipy.stats.uniform.fit",
"scipy.stats.norm.fit",
"scipy.stats.skellam.sf",
"scipy.stats.weibull_min.sf",
"scipy.stats.nbinom.logpmf",
"scipy.stats.logser.rvs",
"scipy.stats.zipf",
"scipy.stats.laplace.pdf",
"numpy.log1p",
"scipy.stats.triang.pdf",
"scipy.stats.levy.cdf",
"scipy.stats.chi2.ppf",
"scipy.stats.lognorm.stats",
"scipy.stats.genpareto._argcheck",
"numpy.log",
"scipy.stats.exponweib.pdf",
"scipy.stats.truncexpon.cdf",
"scipy.stats.levy_stable._fitstart",
"scipy.stats.rice.logcdf",
"scipy.stats.poisson.logpmf",
"scipy.stats.beta",
"scipy.stats.gengamma._munp",
"scipy.stats.weibull_min.logsf",
"scipy.stats.norm.entropy",
"numpy.floor",
"scipy.stats.skellam.expect",
"scipy.stats.lomax.cdf",
"scipy.stats.kurtosis",
"scipy.integrate.simps",
"numpy.array",
"scipy.stats.levy_stable.cdf",
"numpy.sum",
"scipy.special.psi",
"scipy.stats.rice.expect",
"scipy.stats.t.logsf",
"scipy.stats.bernoulli.logsf",
"scipy.stats.halfgennorm.pdf",
"scipy.stats.betaprime",
"scipy.stats.chi2.pdf",
"scipy.stats.argus.sf",
"scipy.stats.hypergeom.rvs",
"scipy.stats.expon.cdf",
"scipy.stats.bradford.ppf",
"scipy.stats.logistic.cdf",
"scipy.stats.beta.rvs",
"scipy.stats.trapz.cdf",
"scipy.stats.gumbel_l.cdf",
"scipy.stats.norm.logcdf",
"numpy.testing.assert_array_equal",
"scipy.stats.genpareto.pdf",
"scipy.stats.gamma.rvs",
"scipy.stats.gumbel_r.cdf",
"scipy.stats.entropy",
"scipy.stats.hypergeom.stats",
"scipy.stats.gompertz.cdf",
"numpy.shape",
"scipy.stats.randint.pmf",
"scipy.stats.vonmises",
"scipy.stats.bernoulli.ppf",
"scipy.stats.beta.logpdf",
"scipy.stats.truncnorm.rvs",
"scipy.stats.bernoulli.isf",
"scipy.stats.geom.logsf",
"scipy.stats.tukeylambda.stats",
"scipy._lib._numpy_compat.suppress_warnings",
"scipy.stats.gamma",
"scipy.stats.ncf.pdf",
"scipy.stats.skewnorm.cdf",
"scipy.stats.poisson.moment",
"scipy.stats.pearson3",
"numpy.asarray",
"scipy.stats.skewnorm.sf",
"scipy.stats.t.stats",
"scipy.stats.rv_continuous",
"scipy.stats.genextreme.isf",
"scipy.stats.weibull_max.cdf",
"numpy.seterr",
"scipy.stats.weibull_max.sf",
"scipy.stats.pareto.stats",
"scipy.stats.f.stats",
"scipy.stats.exponpow.logpdf",
"scipy.stats.foldnorm",
"scipy.stats.gumbel_l.logsf",
"numpy.core.records.fromarrays",
"scipy.stats.genpareto.cdf",
"numpy.var",
"scipy.stats.nbinom",
"scipy.stats.zipf.stats",
"scipy.stats.cauchy.fit",
"scipy.stats.powerlaw.logpdf",
"scipy.stats.beta.expect",
"scipy.stats.weibull_min.cdf",
"scipy.stats.rice.pdf",
"scipy.stats.genpareto",
"scipy.stats.exponnorm.stats",
"scipy.stats.weibull_min.logcdf",
"scipy.stats.trapz.pdf",
"scipy.stats.ksone.fit",
"numpy.size",
"scipy.stats.skew",
"scipy.stats.poisson.interval",
"scipy.stats.uniform.cdf",
"scipy.stats.levy.ppf",
"scipy.stats.rice.stats",
"scipy.stats.geom.logcdf",
"scipy.stats.gumbel_l.isf",
"scipy.stats.bernoulli.logcdf",
"scipy.stats.binom.pmf",
"scipy.stats.uniform.ppf",
"scipy.stats.rayleigh.sf",
"scipy.stats.norm._pdf",
"scipy.stats.hypergeom.sf",
"scipy.stats.norm.rvs",
"scipy.stats.binom",
"numpy.testing.assert_",
"scipy.stats.poisson.pmf",
"numpy.errstate",
"scipy.stats.levy_stable.pdf",
"scipy.stats.exponpow.cdf",
"numpy.random.RandomState",
"numpy.testing.assert_warns",
"scipy.special.xlogy",
"scipy.stats.rayleigh.__doc__.lower",
"scipy.stats.kstest",
"scipy.stats._distn_infrastructure.argsreduce",
"scipy.stats.norm.pdf",
"scipy.stats.genextreme.logpdf",
"numpy.isposinf",
"scipy.stats.bernoulli.cdf",
"scipy.stats.skewnorm.rvs",
"numpy.ones",
"scipy.stats.hypergeom",
"scipy.stats.pearson3.pdf",
"scipy.stats.pearson3.rvs",
"scipy.stats.norm",
"scipy.stats.logistic.isf",
"scipy.stats.gamma.stats",
"scipy.stats.poisson",
"scipy.stats.weibull_max.logcdf",
"scipy.stats.bradford.cdf",
"scipy.stats.expon.fit",
"scipy.stats.skellam.cdf",
"scipy.stats.t.logpdf",
"scipy.stats.truncnorm.ppf",
"numpy.empty",
"scipy.stats.invgamma.sf",
"scipy.stats.hypergeom.expect",
"scipy.stats.planck.logsf",
"scipy.stats.norm.cdf",
"numpy.linspace",
"scipy.stats.geom.pmf",
"scipy.stats.lognorm.logsf",
"scipy.stats.t.ppf",
"scipy.stats.randint.cdf",
"scipy.stats.randint.expect",
"scipy.stats.t.isf",
"scipy.stats.genpareto.isf",
"scipy.stats.expon.pdf",
"scipy.stats.expon.sf",
"scipy.stats.bernoulli.rvs",
"scipy.stats.invgamma.cdf",
"scipy.stats.expon.ppf",
"scipy.stats.nbinom.rvs",
"numpy.mean",
"scipy.stats.norm._munp",
"scipy.stats.gamma.logpdf",
"numpy.histogram",
"scipy.stats.geom.rvs",
"numpy.testing.assert_equal",
"scipy.stats.gumbel_l.ppf",
"scipy.stats.tukeylambda.pdf",
"scipy.stats.pareto.sf",
"scipy.stats.genextreme.pdf",
"scipy.stats.exponpow.sf",
"scipy.special.digamma",
"scipy.stats.crystalball._munp",
"scipy.stats.beta.ppf",
"scipy.stats.t.cdf",
"scipy.stats.norminvgauss.pdf",
"scipy.stats.dlaplace",
"numpy.lib.recfunctions.rec_append_fields",
"scipy.stats.invgamma.ppf",
"numpy.testing.assert_array_almost_equal",
"scipy.stats.nbinom.pmf",
"numpy.logspace",
"scipy.stats.betaprime.cdf",
"scipy.stats.gumbel_l.logcdf",
"numpy.isnan",
"scipy.stats.lognorm.pdf",
"numpy.isneginf",
"scipy.stats.lognorm",
"numpy.testing.assert_allclose",
"scipy.stats.invgamma.isf",
"numpy.broadcast_arrays",
"scipy.stats.gumbel_l.sf",
"scipy.stats.exponpow.pdf",
"scipy.stats.erlang.fit",
"scipy.stats.logistic.ppf",
"scipy.stats.pearson3.cdf",
"scipy.stats.argus.pdf",
"scipy.stats.beta.fit",
"scipy.stats.exponnorm.pdf",
"scipy.stats.uniform.pdf",
"scipy.stats.t.sf",
"scipy.stats.bernoulli.__doc__.lower",
"scipy.stats.gennorm.pdf",
"scipy.stats.ncx2._cdf",
"scipy.stats.weibull_min.pdf",
"scipy.stats.norminvgauss.stats",
"scipy.stats.t.logcdf",
"scipy.stats.geom.logpmf",
"scipy.stats.hypergeom.cdf",
"scipy.stats.poisson.stats",
"scipy.stats.norminvgauss.ppf",
"scipy.special.expm1",
"scipy.stats.geom",
"scipy.stats.planck.sf",
"scipy.stats.levy_stable.stats",
"scipy.stats.poisson.expect",
"scipy.stats.exponweib.logpdf",
"scipy.stats.rice.cdf",
"scipy.stats.rayleigh.logsf",
"scipy.stats.genextreme.sf",
"scipy.stats.rv_histogram",
"scipy.stats.truncnorm.isf",
"scipy.stats.lognorm.rvs",
"numpy.random.randn",
"numpy.select",
"scipy.stats.binom.rvs",
"scipy.stats.invgamma.stats",
"scipy.stats.geom.ppf",
"scipy.stats.genextreme.cdf",
"scipy.stats.poisson.cdf",
"scipy.stats.gamma.expect",
"scipy.stats.bernoulli.sf",
"numpy.arange",
"scipy.stats.rice.logpdf",
"scipy.stats.powerlaw.stats",
"scipy.stats.cauchy.rvs",
"scipy.stats.skellam.pmf",
"scipy.stats._continuous_distns.gamma_gen",
"numpy.testing.assert_almost_equal",
"scipy.stats.kappa4.cdf",
"scipy.stats.logser",
"scipy.stats.randint.rvs",
"scipy.stats.nct",
"scipy.stats.t.std",
"scipy.stats.expon.logpdf",
"scipy.stats.randint",
"scipy.stats.rice.ppf",
"scipy.stats.skewnorm.stats",
"scipy.stats.hypergeom.ppf",
"scipy.stats.weibull_min.logpdf",
"scipy.stats.weibull_max.logpdf",
"scipy.stats.lognorm.fit",
"scipy.stats.hypergeom.pmf",
"scipy.stats.crystalball.pdf",
"scipy.stats.loggamma.stats",
"scipy.stats.skewnorm.pdf",
"scipy.stats.argus.cdf",
"scipy.stats.geom.cdf",
"scipy.stats.triang.cdf",
"scipy.stats.genextreme.entropy",
"scipy.stats.norminvgauss.cdf",
"scipy.stats.zipf.rvs",
"scipy.stats.genpareto.logsf",
"scipy.stats.weibull_max.pdf",
"scipy.stats.kappa4",
"scipy.stats.dlaplace.rvs",
"numpy.random.random",
"scipy.stats.nct.stats",
"numpy.random.seed",
"scipy.stats.logistic.sf",
"scipy.stats.poisson.rvs",
"numpy.isfinite",
"scipy.stats.lognorm.sf",
"scipy.stats.gamma.pdf",
"scipy.stats.bernoulli.pmf",
"numpy.abs",
"scipy.stats.uniform.sf",
"scipy.stats.norm.logpdf",
"scipy.stats.logser.mean",
"scipy.stats.burr12.ppf",
"scipy.stats.logser.expect",
"scipy.stats.genextreme.ppf",
"scipy.stats.rv_discrete",
"scipy.stats.bernoulli"
]
] |
FlowAnalysis/RL_MemoryAllocation
|
[
"e1145b3ad1ba0db6f6ca5260ff9e0c687e4881f2"
] |
[
"sim/a3c.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nimport tflearn\n\n\nGAMMA = 0.99\nA_DIM = 6\nENTROPY_WEIGHT = 0.5\nENTROPY_EPS = 1e-6\nS_INFO = 4\n\n\nclass ActorNetwork(object):\n \"\"\"\n Input to the network is the state, output is the distribution\n of all actions.\n \"\"\"\n def __init__(self, sess, state_dim, action_dim, learning_rate):\n self.sess = sess\n self.s_dim = state_dim\n self.a_dim = action_dim\n self.lr_rate = learning_rate\n\n # Create the actor network\n self.inputs, self.out = self.create_actor_network()\n\n # Get all network parameters\n self.network_params = \\\n tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='actor')\n\n # Set all network parameters\n self.input_network_params = []\n for param in self.network_params:\n self.input_network_params.append(\n tf.placeholder(tf.float32, shape=param.get_shape()))\n self.set_network_params_op = []\n for idx, param in enumerate(self.input_network_params):\n self.set_network_params_op.append(self.network_params[idx].assign(param))\n\n # Selected action, 0-1 vector\n self.acts = tf.placeholder(tf.float32, [None, self.a_dim])\n\n # This gradient will be provided by the critic network\n self.act_grad_weights = tf.placeholder(tf.float32, [None, 1])\n\n # Compute the objective (log action_vector and entropy)\n self.obj = tf.reduce_sum(tf.multiply(\n tf.log(tf.reduce_sum(tf.multiply(self.out, self.acts),\n reduction_indices=1, keep_dims=True)),\n -self.act_grad_weights)) \\\n + ENTROPY_WEIGHT * tf.reduce_sum(tf.multiply(self.out,\n tf.log(self.out + ENTROPY_EPS)))\n\n # Combine the gradients here\n self.actor_gradients = tf.gradients(self.obj, self.network_params)\n\n # Optimization Op\n self.optimize = tf.train.RMSPropOptimizer(self.lr_rate).\\\n apply_gradients(zip(self.actor_gradients, self.network_params))\n\n def create_actor_network(self):\n with tf.variable_scope('actor'):\n inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])\n\n split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 128, activation='relu')\n split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 128, activation='relu')\n split_2 = tflearn.conv_1d(inputs[:, 2:3, :], 128, 4, activation='relu')\n split_3 = tflearn.conv_1d(inputs[:, 3:4, :], 128, 4, activation='relu')\n split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM], 128, 4, activation='relu')\n split_5 = tflearn.fully_connected(inputs[:, 5:6, -1], 128, activation='relu')\n\n split_2_flat = tflearn.flatten(split_2)\n split_3_flat = tflearn.flatten(split_3)\n split_4_flat = tflearn.flatten(split_4)\n\n merge_net = tflearn.merge([split_0, split_1, split_2_flat, split_3_flat, split_4_flat, split_5], 'concat')\n\n dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu')\n out = tflearn.fully_connected(dense_net_0, self.a_dim, activation='softmax')\n\n return inputs, out\n\n def train(self, inputs, acts, act_grad_weights):\n\n self.sess.run(self.optimize, feed_dict={\n self.inputs: inputs,\n self.acts: acts,\n self.act_grad_weights: act_grad_weights\n })\n\n def predict(self, inputs):\n return self.sess.run(self.out, feed_dict={\n self.inputs: inputs\n })\n\n def get_gradients(self, inputs, acts, act_grad_weights):\n return self.sess.run(self.actor_gradients, feed_dict={\n self.inputs: inputs,\n self.acts: acts,\n self.act_grad_weights: act_grad_weights\n })\n\n def apply_gradients(self, actor_gradients):\n return self.sess.run(self.optimize, feed_dict={\n i: d for i, d in zip(self.actor_gradients, actor_gradients)\n })\n\n def get_network_params(self):\n return self.sess.run(self.network_params)\n\n def set_network_params(self, input_network_params):\n self.sess.run(self.set_network_params_op, feed_dict={\n i: d for i, d in zip(self.input_network_params, input_network_params)\n })\n\n\nclass CriticNetwork(object):\n \"\"\"\n Input to the network is the state and action, output is V(s).\n On policy: the action must be obtained from the output of the Actor network.\n \"\"\"\n def __init__(self, sess, state_dim, learning_rate):\n self.sess = sess\n self.s_dim = state_dim\n self.lr_rate = learning_rate\n\n # Create the critic network\n self.inputs, self.out = self.create_critic_network()\n\n # Get all network parameters\n self.network_params = \\\n tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='critic')\n\n # Set all network parameters\n self.input_network_params = []\n for param in self.network_params:\n self.input_network_params.append(\n tf.placeholder(tf.float32, shape=param.get_shape()))\n self.set_network_params_op = []\n for idx, param in enumerate(self.input_network_params):\n self.set_network_params_op.append(self.network_params[idx].assign(param))\n\n # Network target V(s)\n self.td_target = tf.placeholder(tf.float32, [None, 1])\n\n # Temporal Difference, will also be weights for actor_gradients\n self.td = tf.subtract(self.td_target, self.out)\n\n # Mean square error\n self.loss = tflearn.mean_square(self.td_target, self.out)\n\n # Compute critic gradient\n self.critic_gradients = tf.gradients(self.loss, self.network_params)\n\n # Optimization Op\n self.optimize = tf.train.RMSPropOptimizer(self.lr_rate).\\\n apply_gradients(zip(self.critic_gradients, self.network_params))\n\n def create_critic_network(self):\n with tf.variable_scope('critic'):\n inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])\n\n split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 128, activation='relu')\n split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 128, activation='relu')\n split_2 = tflearn.conv_1d(inputs[:, 2:3, :], 128, 4, activation='relu')\n split_3 = tflearn.conv_1d(inputs[:, 3:4, :], 128, 4, activation='relu')\n split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM], 128, 4, activation='relu')\n split_5 = tflearn.fully_connected(inputs[:, 5:6, -1], 128, activation='relu')\n\n split_2_flat = tflearn.flatten(split_2)\n split_3_flat = tflearn.flatten(split_3)\n split_4_flat = tflearn.flatten(split_4)\n\n merge_net = tflearn.merge([split_0, split_1, split_2_flat, split_3_flat, split_4_flat, split_5], 'concat')\n\n dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu')\n out = tflearn.fully_connected(dense_net_0, 1, activation='linear')\n\n return inputs, out\n\n def train(self, inputs, td_target):\n return self.sess.run([self.loss, self.optimize], feed_dict={\n self.inputs: inputs,\n self.td_target: td_target\n })\n\n def predict(self, inputs):\n return self.sess.run(self.out, feed_dict={\n self.inputs: inputs\n })\n\n def get_td(self, inputs, td_target):\n return self.sess.run(self.td, feed_dict={\n self.inputs: inputs,\n self.td_target: td_target\n })\n\n def get_gradients(self, inputs, td_target):\n return self.sess.run(self.critic_gradients, feed_dict={\n self.inputs: inputs,\n self.td_target: td_target\n })\n\n def apply_gradients(self, critic_gradients):\n return self.sess.run(self.optimize, feed_dict={\n i: d for i, d in zip(self.critic_gradients, critic_gradients)\n })\n\n def get_network_params(self):\n return self.sess.run(self.network_params)\n\n def set_network_params(self, input_network_params):\n self.sess.run(self.set_network_params_op, feed_dict={\n i: d for i, d in zip(self.input_network_params, input_network_params)\n })\n\n\ndef compute_gradients(s_batch, a_batch, r_batch, terminal, actor, critic):\n \"\"\"\n batch of s, a, r is from samples in a sequence\n the format is in np.array([batch_size, s/a/r_dim])\n terminal is True when sequence ends as a terminal state\n \"\"\"\n assert s_batch.shape[0] == a_batch.shape[0]\n assert s_batch.shape[0] == r_batch.shape[0]\n ba_size = s_batch.shape[0]\n\n v_batch = critic.predict(s_batch)\n\n R_batch = np.zeros(r_batch.shape)\n\n if terminal:\n R_batch[-1, 0] = 0 # terminal state\n else:\n R_batch[-1, 0] = v_batch[-1, 0] # boot strap from last state\n\n for t in reversed(xrange(ba_size - 1)):\n R_batch[t, 0] = r_batch[t] + GAMMA * R_batch[t + 1, 0]\n\n td_batch = R_batch - v_batch\n\n actor_gradients = actor.get_gradients(s_batch, a_batch, td_batch)\n critic_gradients = critic.get_gradients(s_batch, R_batch)\n\n return actor_gradients, critic_gradients, td_batch\n\n\ndef discount(x, gamma):\n \"\"\"\n Given vector x, computes a vector y such that\n y[i] = x[i] + gamma * x[i+1] + gamma^2 x[i+2] + ...\n \"\"\"\n out = np.zeros(len(x))\n out[-1] = x[-1]\n for i in reversed(xrange(len(x)-1)):\n out[i] = x[i] + gamma*out[i+1]\n assert x.ndim >= 1\n # More efficient version:\n # scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]\n return out\n\n\ndef compute_entropy(x):\n \"\"\"\n Given vector x, computes the entropy\n H(x) = - sum( p * log(p))\n \"\"\"\n H = 0.0\n for i in xrange(len(x)):\n if 0 < x[i] < 1:\n H -= x[i] * np.log(x[i])\n return H\n\n\ndef build_summaries():\n td_loss = tf.Variable(0.)\n tf.summary.scalar(\"TD_loss\", td_loss)\n eps_total_reward = tf.Variable(0.)\n tf.summary.scalar(\"Eps_total_reward\", eps_total_reward)\n avg_entropy = tf.Variable(0.)\n tf.summary.scalar(\"Avg_entropy\", avg_entropy)\n\n summary_vars = [td_loss, eps_total_reward, avg_entropy]\n summary_ops = tf.summary.merge_all()\n\n return summary_ops, summary_vars\n"
] |
[
[
"numpy.log",
"tensorflow.multiply",
"tensorflow.summary.scalar",
"tensorflow.Variable",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.get_collection",
"tensorflow.gradients",
"tensorflow.placeholder",
"tensorflow.subtract",
"tensorflow.summary.merge_all",
"tensorflow.log",
"tensorflow.variable_scope",
"numpy.zeros"
]
] |
duhnoo/FinalProject
|
[
"b28a16ad44fa4541381daa8168c3b9adf09bc5dd"
] |
[
"FinalProject.py"
] |
[
"'''\nDate: 04/28/2021\nSection: 3\nFile: D'AnnolfoFinalProject.py\nDescription:\nThis program uses StreamlitUI and other tools to create a dynamic web application that\nillustrates a heat map and bar chart based on a CSV dataset. This is the final project.\nI pledge that I have completed this programming assignment independently.\nI have not copied from a student or any source.\nI have not given my code to any student.\n'''\n\n# Import libraries\nimport streamlit as st\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport folium\nfrom folium.plugins import HeatMap\nfrom streamlit_folium import folium_static\n\n# Get Data Function with Default\ndef get_data(file=\"collisions.csv\"):\n csv = pd.read_csv(file)\n return csv\n\ndef bar_chart():\n\n # Initialize and Make Bar Chart\n earlyCount, morningCount, afternoonCount, eveningCount, nightCount = 0, 0, 0, 0, 0\n data['TIME'] = pd.to_datetime(data['TIME'], format='%H:%M')\n for time in data['TIME']:\n if 0 <= time.hour <= 5:\n earlyCount += 1\n elif 6 <= time.hour <= 11:\n morningCount += 1\n elif 12 <= time.hour <= 16:\n afternoonCount += 1\n elif 17 <= time.hour <= 20:\n eveningCount += 1\n elif 21 <= time.hour <= 23:\n nightCount += 1\n\n x = [\"Early Morning (12-5 AM)\", \"Morning Commute (6-11 AM)\", \"Afternoon (12-4 PM)\",\n \"Evening Rush Hour (5-8 PM)\", \"Night (9-11 PM)\"]\n y = [earlyCount, morningCount, afternoonCount, eveningCount, nightCount]\n plt.bar(x, y, color='red', edgecolor='cyan')\n plt.title(\"Time Period of Car Collisions in All 5 Boroughs\")\n plt.xticks(rotation=45, ha='right')\n plt.xlabel(\"Time Period\")\n plt.ylabel(\"Frequency\")\n\n return plt\n\n# Get data\ndata = get_data()\n\n# Titles and Sidebar\nst.title(\"NYC Traffic Collisions from Dec. 2016 - Feb. 2017\")\nst.sidebar.subheader(\"Data Inputs\")\n\n# Toggles heatmap and histogram selections\nshow_heatmap = st.sidebar.checkbox(\"Show Heatmap\")\nshow_barchart = st.sidebar.checkbox(\"Show Bar Chart\")\n\n# Filter Data\nst.write(f'Data Points: {len(data):,}')\n\nif show_heatmap:\n heatMap = folium.Map(location=[40.7128, -74.0060], zoom_start=10)\n\n # filter and remove NaNs\n dataFrame = data[[\"LATITUDE\", \"LONGITUDE\"]]\n dataFrame = dataFrame.dropna(axis=0, subset=['LATITUDE', 'LONGITUDE'])\n\n # List Comprehension to store lats and longs in a list\n heatMapList = [[row[\"LATITUDE\"], row[\"LONGITUDE\"]] for index, row in dataFrame.iterrows()]\n\n # Plot it\n HeatMap(heatMapList).add_to(heatMap)\n\n # Subheader and Streamlit\n st.subheader(\"Where do the most accidents occur in NYC's 5 Boroughs?\")\n folium_static(heatMap)\n\n # Heat Map Borough Filter\n st.sidebar.subheader(\"Heat Map Filters\")\n st.sidebar.write(\"By Borough\")\n boroughsList = data['BOROUGH'].drop_duplicates().dropna(axis='index', how='any').sort_values().to_list()\n borough_options = st.sidebar.radio('', boroughsList)\n st.markdown(f'**{borough_options}**')\n if borough_options == 'BRONX':\n newMap = folium.Map(location=[40.7128, -74.0060], zoom_start=10)\n newDF = data[['LATITUDE', 'LONGITUDE']].loc[data['BOROUGH'] == 'BRONX']\n newDF = newDF.dropna(axis=0, subset=['LATITUDE', 'LONGITUDE'])\n newList = [[row[\"LATITUDE\"], row[\"LONGITUDE\"]] for index, row in newDF.iterrows()]\n HeatMap(newList).add_to(newMap)\n folium_static(newMap)\n\n elif borough_options == 'BROOKLYN':\n newMap = folium.Map(location=[40.7128, -74.0060], zoom_start=10)\n newDF = data[['LATITUDE', 'LONGITUDE']].loc[data['BOROUGH'] == 'BROOKLYN']\n newDF = newDF.dropna(axis=0, subset=['LATITUDE', 'LONGITUDE'])\n newList = [[row[\"LATITUDE\"], row[\"LONGITUDE\"]] for index, row in newDF.iterrows()]\n HeatMap(newList).add_to(newMap)\n folium_static(newMap)\n\n elif borough_options == 'MANHATTAN':\n newMap = folium.Map(location=[40.7128, -74.0060], zoom_start=10)\n newDF = data[['LATITUDE', 'LONGITUDE']].loc[data['BOROUGH'] == 'MANHATTAN']\n newDF = newDF.dropna(axis=0, subset=['LATITUDE', 'LONGITUDE'])\n newList = [[row[\"LATITUDE\"], row[\"LONGITUDE\"]] for index, row in newDF.iterrows()]\n HeatMap(newList).add_to(newMap)\n folium_static(newMap)\n\n elif borough_options == 'QUEENS':\n newMap = folium.Map(location=[40.7128, -74.0060], zoom_start=10)\n newDF = data[['LATITUDE', 'LONGITUDE']].loc[data['BOROUGH'] == 'QUEENS']\n newDF = newDF.dropna(axis=0, subset=['LATITUDE', 'LONGITUDE'])\n newList = [[row[\"LATITUDE\"], row[\"LONGITUDE\"]] for index, row in newDF.iterrows()]\n HeatMap(newList).add_to(newMap)\n folium_static(newMap)\n\n elif borough_options == 'STATEN ISLAND':\n newMap = folium.Map(location=[40.7128, -74.0060], zoom_start=10)\n newDF = data[['LATITUDE', 'LONGITUDE']].loc[data['BOROUGH'] == 'STATEN ISLAND']\n newDF = newDF.dropna(axis=0, subset=['LATITUDE', 'LONGITUDE'])\n newList = [[row[\"LATITUDE\"], row[\"LONGITUDE\"]] for index, row in newDF.iterrows()]\n HeatMap(newList).add_to(newMap)\n folium_static(newMap)\n\n# Add Space between Charts\nst.text(\"\\n\\n\\n\\n\\n\")\n\n\n# uses get_data function passing in csv\ndata = get_data(\"collisions.csv\")\n\n# If show bar chart button is toggled, run it\nif show_barchart:\n \n # Show Filters and Title\n st.subheader(\"At what general time period (morning commute, late morning, afternoon, rush hour, evening) did the majority of these accidents occur?\")\n\n # Call in function\n st.pyplot(bar_chart())\n\n"
] |
[
[
"pandas.read_csv",
"pandas.to_datetime",
"matplotlib.pyplot.title",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel"
]
] |
rit-ai/ritai-bot
|
[
"cf80d06d6778a83336281c9fd9aa79ec9fc94daa"
] |
[
"bot/skill/joke/deepjoke.py"
] |
[
"# TODO\n# http://arno.uvt.nl/show.cgi?fid=144631\n# https://arxiv.org/abs/1806.04510\n#\n# Code based on:\n# machinelearningmastery.com/text-generation-lstm-recurrent-neural-networks-python-keras/\n\n\nimport os\nimport re\nimport sys\nimport keras\nimport pickle\nimport numpy as np\n\nSEQUENCE_LENGTH = 1 * 10**3\nMAX_LENGTH = 1 * 10**4\n\n# regular expression to remove control characters\nSTRIP_STRING = lambda s: re.sub('[\\0\\200-\\377]', '', s)\n\nclass Database():\n def __init__(self, sequences, out_shape):\n self.sequences = sequences\n self.out_shape = out_shape\n\ndef generate_database(fname):\n \n with open(fname + '.txt', 'r') as f:\n raw = f.read()\n raw = STRIP_STRING(raw)[:MAX_LENGTH]\n chars = sorted(list(set(raw)))\n int_chars = dict((c, i) for i, c in enumerate(chars))\n\n n_chars = len(raw)\n n_vocab = len(chars)\n\n print('Length: %d' % n_chars)\n print('Vocab: %d' % n_vocab)\n\n dataX = []\n dataY = []\n \n for i in range(0, len(raw) - SEQUENCE_LENGTH, 1):\n seq_in = raw[i:i+SEQUENCE_LENGTH]\n seq_out = raw[i + SEQUENCE_LENGTH]\n dataX.append([int_chars[char] for char in seq_in])\n dataY.append(int_chars[seq_out])\n\n n_patterns = len(dataX)\n print('Total patterns: %d' % n_patterns)\n\n sequences = np.reshape(dataX, (n_patterns, SEQUENCE_LENGTH, 1))\n sequences = sequences / float(n_vocab)\n\n out_shape = keras.utils.np_utils.to_categorical(dataY)\n\n db = Database(sequences, out_shape)\n with open('db.pkl', 'wb') as f:\n pickle.dump(db, f)\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('usage: python deepjoke.py [filename]')\n sys.exit(1)\n fname = sys.argv[1]\n generate_database(fname)\n\n"
] |
[
[
"numpy.reshape"
]
] |
sgibson91/johnny-decimal
|
[
"07733e6b71ad0e2795d7a012c135db92112a2df1"
] |
[
"03-move-files.py"
] |
[
"import argparse\nimport glob\nimport os\nimport shutil\nimport sys\n\nimport pandas as pd\n\n\ndef parse_args(args: list):\n parser = argparse.ArgumentParser(\n description=\"Move files into a Johnny Decimal folder structure from a list of categorised files\"\n )\n\n parser.add_argument(\n \"target_path\",\n type=str,\n help=\"Path under which the Johnny Decimal structure exists\",\n )\n\n parser.add_argument(\n \"-i\",\n \"--input\",\n type=str,\n default=\"categorised-files.csv\",\n help=\"Input CSV file of categorised files\",\n )\n\n parser.add_argument(\n \"--purge\",\n action=\"store_true\",\n help=\"Delete files marked for removal\",\n )\n\n return parser.parse_args()\n\n\ndef main():\n args = parse_args(sys.argv[1:])\n\n target_path = os.path.abspath(args.target_path)\n df = pd.read_csv(args.input)\n\n if args.purge:\n purge_df = df[df[\"delete\"] == True]\n\n for i, row in purge_df.iterrows():\n filepath = os.path.join(row[\"filepath\"], row[\"filename\"])\n if os.path.exists(filepath):\n print(f\"Deleting file: {filepath}\")\n os.remove(filepath)\n\n df = df[df[\"delete\"] == False]\n areas = sorted(df[\"area\"].dropna().unique().tolist())\n\n for area in areas:\n sub_df = df[df[\"area\"] == area]\n categories = sorted(sub_df[\"category\"].dropna().unique().tolist())\n\n for category in categories:\n cat_df = sub_df[sub_df[\"category\"] == category]\n\n pattern = os.path.join(\n target_path, f\"**{area.title()}**\", f\"**{category.title()}**\"\n )\n folders = glob.glob(pattern)\n\n if len(folders) > 1:\n print(\n f\"WARNING: Multiple matching folders found. Skipping this category: {category}\"\n )\n continue\n else:\n dest_path = folders[0]\n\n for i, row in cat_df.iterrows():\n source_name = os.path.join(row[\"filepath\"], row[\"filename\"])\n dest_name = os.path.join(dest_path, row[\"filename\"])\n\n shutil.move(source_name, dest_name)\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.read_csv"
]
] |
lidingGK/AnyNet
|
[
"78a7e85ef482197b8668246d4f446f121f4c55ff"
] |
[
"models/cspn.py"
] |
[
"import torch.nn as nn\nimport torch\n\nimport torch.nn.functional as F\n\nclass Affinity_Propagate(nn.Module):\n\n def __init__(self,\n prop_time,\n prop_kernel=3,\n norm_type='8sum'):\n \"\"\"\n Inputs:\n prop_time: how many steps for CSPN to perform\n prop_kernel: the size of kernel (current only support 3x3)\n way to normalize affinity\n '8sum': normalize using 8 surrounding neighborhood\n '8sum_abs': normalization enforcing affinity to be positive\n This will lead the center affinity to be 0\n \"\"\"\n super(Affinity_Propagate, self).__init__()\n self.prop_time = prop_time\n self.prop_kernel = prop_kernel\n assert prop_kernel == 3, 'this version only support 8 (3x3 - 1) neighborhood'\n\n self.norm_type = norm_type\n assert norm_type in ['8sum', '8sum_abs']\n\n self.in_feature = 1\n self.out_feature = 1\n\n\n def forward(self, guidance, blur_depth, sparse_depth=None):\n\n self.sum_conv = nn.Conv3d(in_channels=8,\n out_channels=1,\n kernel_size=(1, 1, 1),\n stride=1,\n padding=0,\n bias=False)\n weight = torch.ones(1, 8, 1, 1, 1).cuda()\n self.sum_conv.weight = nn.Parameter(weight)\n for param in self.sum_conv.parameters():\n param.requires_grad = False\n\n gate_wb, gate_sum = self.affinity_normalization(guidance)\n\n # pad input and convert to 8 channel 3D features\n raw_depth_input = blur_depth\n\n #blur_depht_pad = nn.ZeroPad2d((1,1,1,1))\n result_depth = blur_depth\n\n if sparse_depth is not None:\n sparse_mask = sparse_depth.sign()\n\n for i in range(self.prop_time):\n # one propagation\n spn_kernel = self.prop_kernel\n result_depth = self.pad_blur_depth(result_depth)\n neigbor_weighted_sum = self.sum_conv(gate_wb * result_depth)\n neigbor_weighted_sum = neigbor_weighted_sum.squeeze(1)\n neigbor_weighted_sum = neigbor_weighted_sum[:, :, 1:-1, 1:-1]\n result_depth = neigbor_weighted_sum\n\n if '8sum' in self.norm_type:\n result_depth = (1.0 - gate_sum) * raw_depth_input + result_depth\n else:\n raise ValueError('unknown norm %s' % self.norm_type)\n\n if sparse_depth is not None:\n result_depth = (1 - sparse_mask) * result_depth + sparse_mask * raw_depth_input\n\n return result_depth\n\n def affinity_normalization(self, guidance):\n\n # normalize features\n if 'abs' in self.norm_type:\n guidance = torch.abs(guidance)\n\n gate1_wb_cmb = guidance.narrow(1, 0 , self.out_feature)\n gate2_wb_cmb = guidance.narrow(1, 1 * self.out_feature, self.out_feature)\n gate3_wb_cmb = guidance.narrow(1, 2 * self.out_feature, self.out_feature)\n gate4_wb_cmb = guidance.narrow(1, 3 * self.out_feature, self.out_feature)\n gate5_wb_cmb = guidance.narrow(1, 4 * self.out_feature, self.out_feature)\n gate6_wb_cmb = guidance.narrow(1, 5 * self.out_feature, self.out_feature)\n gate7_wb_cmb = guidance.narrow(1, 6 * self.out_feature, self.out_feature)\n gate8_wb_cmb = guidance.narrow(1, 7 * self.out_feature, self.out_feature)\n\n\n # top pad\n left_top_pad = nn.ZeroPad2d((0,2,0,2))\n gate1_wb_cmb = left_top_pad(gate1_wb_cmb).unsqueeze(1)\n\n center_top_pad = nn.ZeroPad2d((1,1,0,2))\n gate2_wb_cmb = center_top_pad(gate2_wb_cmb).unsqueeze(1)\n\n right_top_pad = nn.ZeroPad2d((2,0,0,2))\n gate3_wb_cmb = right_top_pad(gate3_wb_cmb).unsqueeze(1)\n\n # center pad\n left_center_pad = nn.ZeroPad2d((0,2,1,1))\n gate4_wb_cmb = left_center_pad(gate4_wb_cmb).unsqueeze(1)\n\n right_center_pad = nn.ZeroPad2d((2,0,1,1))\n gate5_wb_cmb = right_center_pad(gate5_wb_cmb).unsqueeze(1)\n\n # bottom pad\n left_bottom_pad = nn.ZeroPad2d((0,2,2,0))\n gate6_wb_cmb = left_bottom_pad(gate6_wb_cmb).unsqueeze(1)\n\n center_bottom_pad = nn.ZeroPad2d((1,1,2,0))\n gate7_wb_cmb = center_bottom_pad(gate7_wb_cmb).unsqueeze(1)\n\n right_bottm_pad = nn.ZeroPad2d((2,0,2,0))\n gate8_wb_cmb = right_bottm_pad(gate8_wb_cmb).unsqueeze(1)\n\n gate_wb = torch.cat((gate1_wb_cmb,gate2_wb_cmb,gate3_wb_cmb,gate4_wb_cmb,\n gate5_wb_cmb,gate6_wb_cmb,gate7_wb_cmb,gate8_wb_cmb), 1)\n\n # normalize affinity using their abs sum\n gate_wb_abs = torch.abs(gate_wb)\n abs_weight = self.sum_conv(gate_wb_abs)\n\n gate_wb = torch.div(gate_wb, abs_weight + 1e-15)\n gate_sum = self.sum_conv(gate_wb)\n\n gate_sum = gate_sum.squeeze(1)\n gate_sum = gate_sum[:, :, 1:-1, 1:-1]\n\n return gate_wb, gate_sum\n\n\n def pad_blur_depth(self, blur_depth):\n # top pad\n left_top_pad = nn.ZeroPad2d((0,2,0,2))\n blur_depth_1 = left_top_pad(blur_depth).unsqueeze(1)\n center_top_pad = nn.ZeroPad2d((1,1,0,2))\n blur_depth_2 = center_top_pad(blur_depth).unsqueeze(1)\n right_top_pad = nn.ZeroPad2d((2,0,0,2))\n blur_depth_3 = right_top_pad(blur_depth).unsqueeze(1)\n\n # center pad\n left_center_pad = nn.ZeroPad2d((0,2,1,1))\n blur_depth_4 = left_center_pad(blur_depth).unsqueeze(1)\n right_center_pad = nn.ZeroPad2d((2,0,1,1))\n blur_depth_5 = right_center_pad(blur_depth).unsqueeze(1)\n\n # bottom pad\n left_bottom_pad = nn.ZeroPad2d((0,2,2,0))\n blur_depth_6 = left_bottom_pad(blur_depth).unsqueeze(1)\n center_bottom_pad = nn.ZeroPad2d((1,1,2,0))\n blur_depth_7 = center_bottom_pad(blur_depth).unsqueeze(1)\n right_bottm_pad = nn.ZeroPad2d((2,0,2,0))\n blur_depth_8 = right_bottm_pad(blur_depth).unsqueeze(1)\n\n result_depth = torch.cat((blur_depth_1, blur_depth_2, blur_depth_3, blur_depth_4,\n blur_depth_5, blur_depth_6, blur_depth_7, blur_depth_8), 1)\n return result_depth\n\n\n def normalize_gate(self, guidance):\n gate1_x1_g1 = guidance.narrow(1,0,1)\n gate1_x1_g2 = guidance.narrow(1,1,1)\n gate1_x1_g1_abs = torch.abs(gate1_x1_g1)\n gate1_x1_g2_abs = torch.abs(gate1_x1_g2)\n elesum_gate1_x1 = torch.add(gate1_x1_g1_abs, gate1_x1_g2_abs)\n gate1_x1_g1_cmb = torch.div(gate1_x1_g1, elesum_gate1_x1)\n gate1_x1_g2_cmb = torch.div(gate1_x1_g2, elesum_gate1_x1)\n return gate1_x1_g1_cmb, gate1_x1_g2_cmb\n\n\n def max_of_4_tensor(self, element1, element2, element3, element4):\n max_element1_2 = torch.max(element1, element2)\n max_element3_4 = torch.max(element3, element4)\n return torch.max(max_element1_2, max_element3_4)\n\n def max_of_8_tensor(self, element1, element2, element3, element4, element5, element6, element7, element8):\n max_element1_2 = self.max_of_4_tensor(element1, element2, element3, element4)\n max_element3_4 = self.max_of_4_tensor(element5, element6, element7, element8)\n return torch.max(max_element1_2, max_element3_4)\n\nif __name__ == '__main__':\n \n import cv2\n\n net = Affinity_Propagate(10)\n\n fpath = '/workspace/depth/AnyNet/data/SceneFlow/monkaa/monkaa__frames_cleanpass/eating_camera2_x2/left/0000.png'\n \n img = cv2.imread(fpath, cv2.IMREAD_GRAYSCALE) \n imgT = torch.from_numpy(img).unsqueeze(0).unsqueeze(0).cuda()\n\n B, C, H, W = imgT.shape\n guidance = torch.zeros((B, 8, H, W )).cuda()\n guidance[:, 4:, :,:] = 0\n\n output = net(guidance, imgT)\n\n cv2.imshow('img', img)\n cv2.imshow('output', (output/255.0).squeeze().cpu().numpy())\n cv2.waitKey(0)\n\n"
] |
[
[
"torch.abs",
"torch.div",
"torch.nn.Parameter",
"torch.ones",
"torch.add",
"torch.max",
"torch.cat",
"torch.zeros",
"torch.from_numpy",
"torch.nn.Conv3d",
"torch.nn.ZeroPad2d"
]
] |
Ji-Xinyou/DIP-proj-DepthEstimation
|
[
"5432c14ce1d0cdc9b8b6ab0a273678ffbe6086bd"
] |
[
"model/Res_Unet/modules.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torchvision.models as models\nimport torch.utils.model_zoo as model_zoo\nimport math\n\nclass Conv_Norm_ReLU(nn.Module):\n '''\n Conbination of Conv -> BN -> Leaky_ReLu\n Args:\n inp: inplane\n outp: outplane\n leaky_alpha: the negative slope of leaky relu\n '''\n \n def __init__(self, \n inp, \n outp, \n leaky_alpha=0.02, \n kernel_size=3, \n stride=1,\n padding=1):\n super().__init__()\n self.conv = nn.Conv2d(in_channels=inp,\n out_channels=outp,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding)\n self.norm = nn.BatchNorm2d(outp)\n self.acti = nn.LeakyReLU(negative_slope=leaky_alpha,\n inplace=True)\n \n def forward(self, x):\n x = self.conv(x)\n x = self.norm(x)\n x = self.acti(x)\n return x\n\nclass DoubleConv(nn.Module):\n '''\n Twice of Conv -> BN -> leakyrelu\n Args:\n inp, midp, outp:\n conv_norm_acti_1: inp ----> midp\n conv_norm_acti_2: midp ----> outp\n leaky_alpha: the negative slope of leaky relu\n '''\n \n def __init__(self, \n inp, \n outp, \n midp=None, \n leaky_alpha=0.02,\n kernel_size=3, \n stride=1,\n padding=1):\n super().__init__()\n if not midp:\n midp= outp\n \n self.conv_norm_acti_1 = Conv_Norm_ReLU(inp=inp,\n outp=midp,\n leaky_alpha=leaky_alpha,\n kernel_size=kernel_size, \n stride=stride,\n padding=padding)\n self.conv_norm_acti_2 = Conv_Norm_ReLU(inp=midp,\n outp=outp,\n leaky_alpha=leaky_alpha,\n kernel_size=kernel_size, \n stride=stride,\n padding=padding)\n\n def forward(self, x):\n x = self.conv_norm_acti_1(x)\n x = self.conv_norm_acti_2(x)\n return x\n\nclass ResidualConv(nn.Module):\n \n def __init__(self,\n inp, \n outp, \n leaky_alpha=0.02,\n kernel_size=3, \n stride=1,\n padding=1):\n super().__init__()\n self.conv_norm_acti = Conv_Norm_ReLU(inp=inp,\n outp=outp,\n leaky_alpha=leaky_alpha,\n kernel_size=kernel_size, \n stride=stride,\n padding=padding)\n \n def forward(self, x):\n return x + self.conv_norm_acti(x)\n\nclass ResidualDoubleConv(nn.Module):\n \n def __init__(self,\n inp, \n outp, \n midp=None, \n leaky_alpha=0.02,\n kernel_size=3, \n stride=1,\n padding=1):\n super().__init__()\n self.doubleconv = DoubleConv(inp=inp,\n outp=outp,\n leaky_alpha=leaky_alpha,\n kernel_size=kernel_size, \n stride=stride,\n padding=padding)\n \n def forward(self, x):\n return x + self.doubleconv(x)\n \nclass DownSampling(nn.Module):\n \"\"\"Downscaling with maxpool then double conv\"\"\"\n\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.pool = nn.MaxPool2d(2)\n self.conv = DoubleConv(in_channels, out_channels)\n\n def forward(self, x):\n x = self.pool(x)\n x = self.conv(x)\n return x\n\n# this function is from https://github.com/milesial/Pytorch-UNet/\nclass UpSampling(nn.Module):\n \"\"\"Upscaling then double conv\"\"\"\n\n def __init__(self, in_channels, out_channels, bilinear=True):\n super().__init__()\n\n # if bilinear, use the normal convolutions to reduce the number of channels\n if bilinear:\n self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)\n else:\n self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)\n self.conv = DoubleConv(in_channels, out_channels)\n\n def forward(self, x1, x2):\n x1 = self.up(x1)\n # input is CHW\n diffY = x2.size()[2] - x1.size()[2]\n diffX = x2.size()[3] - x1.size()[3]\n\n x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,\n diffY // 2, diffY - diffY // 2])\n # if you have padding issues, see\n # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a\n # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd\n x = torch.cat([x2, x1], dim=1)\n return self.conv(x)\n \n\n# part of codes are from\n# https://github.com/JunjH/Revisiting_Single_Depth_Estimation/\n\nclass Bottleneck(nn.Module):\n '''\n Bottleneck is a resnet block\n '''\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=1)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=1)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=1)\n self.avgpool = nn.AvgPool2d(7, stride=1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x \n \ndef get_resnet50(pretrained=True, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n url = 'https://download.pytorch.org/models/resnet50-19c8e357.pth'\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(url, 'pretrained_model/resnet50'))\n return model\n\nclass Encoder_resnet50(nn.Module):\n \n def __init__(self, base):\n super(Encoder_resnet50, self).__init__()\n # encoder is a pretrained resnet, inherit the architecture\n self.conv1 = base.conv1\n self.bn1 = base.bn1\n self.relu = base.relu\n self.maxpool = base.maxpool\n \n # layer1: out_channel = 64 * 4\n self.layer1 = base.layer1\n # layer2: out_channel = 128 * 4\n self.layer2 = base.layer2\n # layer3: out_channel = 256 * 4\n self.layer3 = base.layer3\n # layer4: out_channel = 512 * 4\n # layer4's output is the input of decoder\n self.layer4 = base.layer4\n \n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n \n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n \n # return all for multiscale mff\n return x\n\ndef get_resnet50_encoder(**kwargs):\n base_resnet50 = get_resnet50(pretrained=True)\n # encoder output a tuple of each block's output\n E = Encoder_resnet50(base=base_resnet50)\n return E"
] |
[
[
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.Linear",
"torch.nn.Upsample",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url",
"torch.nn.functional.pad"
]
] |
yangapku/models
|
[
"b50bc7b77288bcdaed676e70353310786c658d6e"
] |
[
"PaddleCV/ocr_recognition/train.py"
] |
[
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Trainer for OCR CTC or attention model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport paddle.fluid as fluid\nfrom utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_data\nfrom utility import check_gpu\nimport paddle.fluid.profiler as profiler\nfrom crnn_ctc_model import ctc_train_net\nfrom attention_model import attention_train_net\nimport data_reader\nimport argparse\nimport functools\nimport sys\nimport time\nimport os\nimport numpy as np\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('batch_size', int, 32, \"Minibatch size.\")\nadd_arg('total_step', int, 720000, \"The number of iterations. Zero or less means whole training set. More than 0 means the training set might be looped until # of iterations is reached.\")\nadd_arg('log_period', int, 1000, \"Log period.\")\nadd_arg('save_model_period', int, 15000, \"Save model period. '-1' means never saving the model.\")\nadd_arg('eval_period', int, 15000, \"Evaluate period. '-1' means never evaluating the model.\")\nadd_arg('save_model_dir', str, \"./models\", \"The directory the model to be saved to.\")\nadd_arg('train_images', str, None, \"The directory of images to be used for training.\")\nadd_arg('train_list', str, None, \"The list file of images to be used for training.\")\nadd_arg('test_images', str, None, \"The directory of images to be used for test.\")\nadd_arg('test_list', str, None, \"The list file of images to be used for training.\")\nadd_arg('model', str, \"crnn_ctc\", \"Which type of network to be used. 'crnn_ctc' or 'attention'\")\nadd_arg('init_model', str, None, \"The init model file of directory.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU to train.\")\nadd_arg('min_average_window',int, 10000, \"Min average window.\")\nadd_arg('max_average_window',int, 12500, \"Max average window. It is proposed to be set as the number of minibatch in a pass.\")\nadd_arg('average_window', float, 0.15, \"Average window.\")\nadd_arg('parallel', bool, False, \"Whether use parallel training.\")\nadd_arg('profile', bool, False, \"Whether to use profiling.\")\nadd_arg('skip_batch_num', int, 0, \"The number of first minibatches to skip as warm-up for better performance test.\")\nadd_arg('skip_test', bool, False, \"Whether to skip test phase.\")\n# yapf: enable\n\n\ndef train(args):\n \"\"\"OCR training\"\"\"\n\n if args.model == \"crnn_ctc\":\n train_net = ctc_train_net\n get_feeder_data = get_ctc_feeder_data\n else:\n train_net = attention_train_net\n get_feeder_data = get_attention_feeder_data\n\n num_classes = None\n num_classes = data_reader.num_classes(\n ) if num_classes is None else num_classes\n data_shape = data_reader.data_shape()\n # define network\n sum_cost, error_evaluator, inference_program, model_average = train_net(\n args, data_shape, num_classes)\n\n # data reader\n train_reader = data_reader.train(\n args.batch_size,\n train_images_dir=args.train_images,\n train_list_file=args.train_list,\n cycle=args.total_step > 0,\n model=args.model)\n test_reader = data_reader.test(\n test_images_dir=args.test_images,\n test_list_file=args.test_list,\n model=args.model)\n\n # prepare environment\n place = fluid.CPUPlace()\n if args.use_gpu:\n place = fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n\n if 'ce_mode' in os.environ:\n fluid.default_startup_program().random_seed = 90\n\n exe.run(fluid.default_startup_program())\n\n # load init model\n if args.init_model is not None:\n model_dir = args.init_model\n model_file_name = None\n if not os.path.isdir(args.init_model):\n model_dir = os.path.dirname(args.init_model)\n model_file_name = os.path.basename(args.init_model)\n fluid.io.load_params(exe, dirname=model_dir, filename=model_file_name)\n print(\"Init model from: %s.\" % args.init_model)\n\n train_exe = exe\n error_evaluator.reset(exe)\n if args.parallel:\n train_exe = fluid.ParallelExecutor(\n use_cuda=True if args.use_gpu else False, loss_name=sum_cost.name)\n\n fetch_vars = [sum_cost] + error_evaluator.metrics\n\n def train_one_batch(data):\n var_names = [var.name for var in fetch_vars]\n if args.parallel:\n results = train_exe.run(var_names,\n feed=get_feeder_data(data, place))\n results = [np.array(result).sum() for result in results]\n else:\n results = train_exe.run(feed=get_feeder_data(data, place),\n fetch_list=fetch_vars)\n results = [result[0] for result in results]\n return results\n\n def test(iter_num):\n error_evaluator.reset(exe)\n for data in test_reader():\n exe.run(inference_program, feed=get_feeder_data(data, place))\n _, test_seq_error = error_evaluator.eval(exe)\n print(\"\\nTime: %s; Iter[%d]; Test seq error: %s.\\n\" %\n (time.time(), iter_num, str(test_seq_error[0])))\n\n #Note: The following logs are special for CE monitoring.\n #Other situations do not need to care about these logs.\n print(\"kpis\ttest_acc\t%f\" % (1 - test_seq_error[0]))\n\n def save_model(args, exe, iter_num):\n filename = \"model_%05d\" % iter_num\n fluid.io.save_params(\n exe, dirname=args.save_model_dir, filename=filename)\n print(\"Saved model to: %s/%s.\" % (args.save_model_dir, filename))\n\n iter_num = 0\n stop = False\n start_time = time.time()\n while not stop:\n total_loss = 0.0\n total_seq_error = 0.0\n batch_times = []\n # train a pass\n for data in train_reader():\n if args.total_step > 0 and iter_num == args.total_step + args.skip_batch_num:\n stop = True\n break\n if iter_num < args.skip_batch_num:\n print(\"Warm-up iteration\")\n if iter_num == args.skip_batch_num:\n profiler.reset_profiler()\n start = time.time()\n results = train_one_batch(data)\n batch_time = time.time() - start\n fps = args.batch_size / batch_time\n batch_times.append(batch_time)\n total_loss += results[0]\n total_seq_error += results[2]\n\n iter_num += 1\n # training log\n if iter_num % args.log_period == 0:\n print(\"\\nTime: %s; Iter[%d]; Avg loss: %.3f; Avg seq err: %.3f\"\n % (time.time(), iter_num,\n total_loss / (args.log_period * args.batch_size),\n total_seq_error / (args.log_period * args.batch_size)))\n print(\"kpis\ttrain_cost\t%f\" % (total_loss / (args.log_period *\n args.batch_size)))\n print(\"kpis\ttrain_acc\t%f\" % (\n 1 - total_seq_error / (args.log_period * args.batch_size)))\n total_loss = 0.0\n total_seq_error = 0.0\n\n # evaluate\n if not args.skip_test and iter_num % args.eval_period == 0:\n if model_average:\n with model_average.apply(exe):\n test(iter_num)\n else:\n test(iter_num)\n\n # save model\n if iter_num % args.save_model_period == 0:\n if model_average:\n with model_average.apply(exe):\n save_model(args, exe, iter_num)\n else:\n save_model(args, exe, iter_num)\n end_time = time.time()\n print(\"kpis\ttrain_duration\t%f\" % (end_time - start_time))\n # Postprocess benchmark data\n latencies = batch_times[args.skip_batch_num:]\n latency_avg = np.average(latencies)\n latency_pc99 = np.percentile(latencies, 99)\n fpses = np.divide(args.batch_size, latencies)\n fps_avg = np.average(fpses)\n fps_pc99 = np.percentile(fpses, 1)\n\n # Benchmark output\n print('\\nTotal examples (incl. warm-up): %d' %\n (iter_num * args.batch_size))\n print('average latency: %.5f s, 99pc latency: %.5f s' % (latency_avg,\n latency_pc99))\n print('average fps: %.5f, fps for 99pc latency: %.5f' % (fps_avg,\n fps_pc99))\n\n\ndef main():\n args = parser.parse_args()\n print_arguments(args)\n check_gpu(args.use_gpu)\n if args.profile:\n if args.use_gpu:\n with profiler.cuda_profiler(\"cuda_profiler.txt\", 'csv') as nvprof:\n train(args)\n else:\n with profiler.profiler(\"CPU\", sorted_key='total') as cpuprof:\n train(args)\n else:\n train(args)\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.divide",
"numpy.array",
"numpy.average",
"numpy.percentile"
]
] |
raynalee4/models
|
[
"ae524736fe2c2786394c8e63b0f81f8edc5e4395"
] |
[
"models/image_recognition/tensorflow/inception_resnet_v2/preprocessing.py"
] |
[
"#\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n#\n\n\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Image pre-processing utilities.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nfrom tensorflow.contrib.data.python.ops import batching\nfrom tensorflow.contrib.data.python.ops import interleave_ops\nfrom tensorflow.python.layers import utils\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.platform import gfile\n\n\ndef parse_example_proto(example_serialized):\n \"\"\"Parses an Example proto containing a training example of an image.\n\n The output of the build_image_data.py image preprocessing script is a dataset\n containing serialized Example protocol buffers. Each Example proto contains\n the following fields:\n\n image/height: 462\n image/width: 581\n image/colorspace: 'RGB'\n image/channels: 3\n image/class/label: 615\n image/class/synset: 'n03623198'\n image/class/text: 'knee pad'\n image/object/bbox/xmin: 0.1\n image/object/bbox/xmax: 0.9\n image/object/bbox/ymin: 0.2\n image/object/bbox/ymax: 0.6\n image/object/bbox/label: 615\n image/format: 'JPEG'\n image/filename: 'ILSVRC2012_val_00041207.JPEG'\n image/encoded: <JPEG encoded string>\n\n Args:\n example_serialized: scalar Tensor tf.string containing a serialized\n Example protocol buffer.\n\n Returns:\n image_buffer: Tensor tf.string containing the contents of a JPEG file.\n label: Tensor tf.int32 containing the label.\n bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]\n where each coordinate is [0, 1) and the coordinates are arranged as\n [ymin, xmin, ymax, xmax].\n text: Tensor tf.string containing the human-readable label.\n \"\"\"\n # Dense features in Example proto.\n feature_map = {\n 'image/encoded': tf.FixedLenFeature([], dtype=tf.string,\n default_value=''),\n 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,\n default_value=-1),\n 'image/class/text': tf.FixedLenFeature([], dtype=tf.string,\n default_value=''),\n }\n sparse_float32 = tf.VarLenFeature(dtype=tf.float32)\n # Sparse features in Example proto.\n feature_map.update(\n {k: sparse_float32 for k in ['image/object/bbox/xmin',\n 'image/object/bbox/ymin',\n 'image/object/bbox/xmax',\n 'image/object/bbox/ymax']})\n\n features = tf.parse_single_example(example_serialized, feature_map)\n label = tf.cast(features['image/class/label'], dtype=tf.int32)\n\n xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)\n ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)\n xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)\n ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)\n\n # Note that we impose an ordering of (y, x) just to make life difficult.\n bbox = tf.concat([ymin, xmin, ymax, xmax], 0)\n\n # Force the variable number of bounding boxes into the shape\n # [1, num_boxes, coords].\n bbox = tf.expand_dims(bbox, 0)\n bbox = tf.transpose(bbox, [0, 2, 1])\n\n return features['image/encoded'], label, bbox, features['image/class/text']\n\n\ndef get_image_resize_method(resize_method, batch_position=0):\n \"\"\"Get tensorflow resize method.\n\n If resize_method is 'round_robin', return different methods based on batch\n position in a round-robin fashion. NOTE: If the batch size is not a multiple\n of the number of methods, then the distribution of methods will not be\n uniform.\n\n Args:\n resize_method: (string) nearest, bilinear, bicubic, area, or round_robin.\n batch_position: position of the image in a batch. NOTE: this argument can\n be an integer or a tensor\n Returns:\n one of resize type defined in tf.image.ResizeMethod.\n \"\"\"\n resize_methods_map = {\n 'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR,\n 'bilinear': tf.image.ResizeMethod.BILINEAR,\n 'bicubic': tf.image.ResizeMethod.BICUBIC,\n 'area': tf.image.ResizeMethod.AREA\n }\n\n if resize_method != 'round_robin':\n return resize_methods_map[resize_method]\n\n # return a resize method based on batch position in a round-robin fashion.\n resize_methods = resize_methods_map.values()\n\n def lookup(index):\n return resize_methods[index]\n\n def resize_method_0():\n return utils.smart_cond(batch_position % len(resize_methods) == 0,\n lambda: lookup(0), resize_method_1)\n\n def resize_method_1():\n return utils.smart_cond(batch_position % len(resize_methods) == 1,\n lambda: lookup(1), resize_method_2)\n\n def resize_method_2():\n return utils.smart_cond(batch_position % len(resize_methods) == 2,\n lambda: lookup(2), lambda: lookup(3))\n\n # NOTE(jsimsa): Unfortunately, we cannot use a single recursive function here\n # because TF would not be able to construct a finite graph.\n\n return resize_method_0()\n\n\ndef decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32):\n \"\"\"Decode a JPEG string into one 3-D float image Tensor.\n\n Args:\n image_buffer: scalar string Tensor.\n scope: Optional scope for op_scope.\n Returns:\n 3-D float Tensor with values ranging from [0, 1).\n \"\"\"\n # with tf.op_scope([image_buffer], scope, 'decode_jpeg'):\n # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]):\n with tf.name_scope(scope or 'decode_jpeg'):\n # Decode the string as an RGB JPEG.\n # Note that the resulting image contains an unknown height and width\n # that is set dynamically by decode_jpeg. In other words, the height\n # and width of image is unknown at compile-time.\n image = tf.image.decode_jpeg(image_buffer, channels=3) # ,\n # fancy_upscaling=False,\n # dct_method='INTEGER_FAST')\n\n # image = tf.Print(image, [tf.shape(image)], 'Image shape: ')\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\n return image\n\n\ndef preprocess_for_eval(image, height, width,\n central_fraction=0.875, scope=None):\n \"\"\"Prepare one image for evaluation.\n\n If height and width are specified it would output an image with that size by\n applying resize_bilinear.\n\n If central_fraction is specified it would crop the central fraction of the\n input image.\n\n Args:\n image: 3-D Tensor of image. If dtype is tf.float32 then the range should be\n [0, 1], otherwise it would converted to tf.float32 assuming that the range\n is [0, MAX], where MAX is largest positive representable number for\n int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).\n height: integer\n width: integer\n central_fraction: Optional Float, fraction of the image to crop.\n scope: Optional scope for name_scope.\n Returns:\n 3-D float Tensor of prepared image.\n \"\"\"\n with tf.name_scope(scope, 'eval_image', [image, height, width]):\n if image.dtype != tf.float32:\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n # Crop the central region of the image with an area containing 87.5% of\n # the original image.\n if central_fraction:\n image = tf.image.central_crop(image, central_fraction=central_fraction)\n\n if height and width:\n # Resize the image to the specified height and width.\n image = tf.expand_dims(image, 0)\n image = tf.image.resize_bilinear(image, [height, width],\n align_corners=False)\n image = tf.squeeze(image, [0])\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.0)\n return image\n\n\ndef apply_with_random_selector(x, func, num_cases):\n \"\"\"Computes func(x, sel), with sel sampled from [0...num_cases-1].\n\n Args:\n x: input Tensor.\n func: Python function to apply.\n num_cases: Python int32, number of cases to sample sel from.\n\n Returns:\n The result of func(x, sel), where func receives the value of the\n selector as a python integer, but sel is sampled dynamically.\n \"\"\"\n sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)\n # Pass the real x only to one of the func calls.\n return control_flow_ops.merge([\n func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)\n for case in range(num_cases)])[0]\n\n\ndef distort_color(image, color_ordering=0, fast_mode=True, scope=None):\n \"\"\"Distort the color of a Tensor image.\n\n Each color distortion is non-commutative and thus ordering of the color ops\n matters. Ideally we would randomly permute the ordering of the color ops.\n Rather then adding that level of complication, we select a distinct ordering\n of color ops for each preprocessing thread.\n\n Args:\n image: 3-D Tensor containing single image in [0, 1].\n color_ordering: Python int, a type of distortion (valid values: 0-3).\n fast_mode: Avoids slower ops (random_hue and random_contrast)\n scope: Optional scope for name_scope.\n Returns:\n 3-D Tensor color-distorted image on range [0, 1]\n Raises:\n ValueError: if color_ordering not in [0, 3]\n \"\"\"\n with tf.name_scope(scope, 'distort_color', [image]):\n if fast_mode:\n if color_ordering == 0:\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n else:\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n else:\n if color_ordering == 0:\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n elif color_ordering == 1:\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n elif color_ordering == 2:\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n elif color_ordering == 3:\n image = tf.image.random_hue(image, max_delta=0.2)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n else:\n raise ValueError('color_ordering must be in [0, 3]')\n\n # The random_* ops do not necessarily clamp.\n return tf.clip_by_value(image, 0.0, 1.0)\n\n\ndef distorted_bounding_box_crop(image,\n bbox,\n min_object_covered=0.1,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0),\n max_attempts=100,\n scope=None):\n \"\"\"Generates cropped_image using a one of the bboxes randomly distorted.\n\n See `tf.image.sample_distorted_bounding_box` for more documentation.\n\n Args:\n image: 3-D Tensor of image (it will be converted to floats in [0, 1]).\n bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]\n where each coordinate is [0, 1) and the coordinates are arranged\n as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole\n image.\n min_object_covered: An optional `float`. Defaults to `0.1`. The cropped\n area of the image must contain at least this fraction of any bounding box\n supplied.\n aspect_ratio_range: An optional list of `floats`. The cropped area of the\n image must have an aspect ratio = width / height within this range.\n area_range: An optional list of `floats`. The cropped area of the image\n must contain a fraction of the supplied image within in this range.\n max_attempts: An optional `int`. Number of attempts at generating a cropped\n region of the image of the specified constraints. After `max_attempts`\n failures, return the entire image.\n scope: Optional scope for name_scope.\n Returns:\n A tuple, a 3-D Tensor cropped_image and the distorted bbox\n \"\"\"\n with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):\n # Each bounding box has shape [1, num_boxes, box coords] and\n # the coordinates are ordered [ymin, xmin, ymax, xmax].\n\n # A large fraction of image datasets contain a human-annotated bounding\n # box delineating the region of the image containing the object of interest.\n # We choose to create a new bounding box for the object which is a randomly\n # distorted version of the human-annotated bounding box that obeys an\n # allowed range of aspect ratios, sizes and overlap with the human-annotated\n # bounding box. If no box is supplied, then we assume the bounding box is\n # the entire image.\n sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=bbox,\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=max_attempts,\n use_image_if_no_bounding_boxes=True)\n bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box\n\n # Crop the image to the specified bounding box.\n cropped_image = tf.slice(image, bbox_begin, bbox_size)\n return cropped_image, distort_bbox\n\n\ndef preprocess_for_train(image, height, width, bbox,\n batch_position,\n fast_mode=True,\n scope=None,\n add_image_summaries=True):\n \"\"\"Distort one image for training a network.\n\n Distorting images provides a useful technique for augmenting the data\n set during training in order to make the network invariant to aspects\n of the image that do not effect the label.\n\n Args:\n image: 3-D Tensor of image. If dtype is tf.float32 then the range should be\n [0, 1], otherwise it would converted to tf.float32 assuming that the range\n is [0, MAX], where MAX is largest positive representable number for\n int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).\n height: integer\n width: integer\n bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]\n where each coordinate is [0, 1) and the coordinates are arranged\n as [ymin, xmin, ymax, xmax].\n batch_position: position of the image in a batch, which affects how images\n are distorted and resized. NOTE: this argument can be an integer or a\n tensor\n scope: Optional scope for op_scope.\n add_image_summaries: Enable image summaries.\n Returns:\n 3-D float Tensor of distorted image used for training with range [-1, 1].\n \"\"\"\n\n with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):\n if bbox is None:\n bbox = tf.constant([0.0, 0.0, 1.0, 1.0],\n dtype=tf.float32,\n shape=[1, 1, 4])\n if image.dtype != tf.float32:\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n # Each bounding box has shape [1, num_boxes, box coords] and\n # the coordinates are ordered [ymin, xmin, ymax, xmax].\n image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),\n bbox)\n if add_image_summaries:\n tf.summary.image('image_with_bounding_boxes', image_with_box)\n\n distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox)\n # Restore the shape since the dynamic slice based upon the bbox_size loses\n # the third dimension.\n distorted_image.set_shape([None, None, 3])\n image_with_distorted_box = tf.image.draw_bounding_boxes(\n tf.expand_dims(image, 0), distorted_bbox)\n if add_image_summaries:\n tf.summary.image('images_with_distorted_bounding_box',\n image_with_distorted_box)\n\n # This resizing operation may distort the images because the aspect\n # ratio is not respected. We select a resize method in a round robin\n # fashion based on the thread number.\n # Note that ResizeMethod contains 4 enumerated resizing methods.\n\n # We select only 1 case for fast_mode bilinear.\n num_resize_cases = 1 if fast_mode else 4\n distorted_image = apply_with_random_selector(\n distorted_image,\n lambda x, method: tf.image.resize_images(x, [height, width], method),\n num_cases=num_resize_cases)\n\n if add_image_summaries:\n tf.summary.image('cropped_resized_image',\n tf.expand_dims(distorted_image, 0))\n\n # Randomly flip the image horizontally.\n distorted_image = tf.image.random_flip_left_right(distorted_image)\n # Randomly distort the colors. There are 1 or 4 ways to do it.\n num_distort_cases = 1 if fast_mode else 4\n distorted_image = apply_with_random_selector(\n distorted_image,\n lambda x, ordering: distort_color(x, ordering, fast_mode),\n num_cases=num_distort_cases)\n\n if add_image_summaries:\n tf.summary.image('final_distorted_image',\n tf.expand_dims(distorted_image, 0))\n distorted_image = tf.subtract(distorted_image, 0.5)\n distorted_image = tf.multiply(distorted_image, 2.0)\n return distorted_image\n\n\ndef distort_color(image, batch_position=0, distort_color_in_yiq=False,\n scope=None):\n \"\"\"Distort the color of the image.\n\n Each color distortion is non-commutative and thus ordering of the color ops\n matters. Ideally we would randomly permute the ordering of the color ops.\n Rather then adding that level of complication, we select a distinct ordering\n of color ops based on the position of the image in a batch.\n\n Args:\n image: float32 Tensor containing single image. Tensor values should be in\n range [0, 1].\n batch_position: the position of the image in a batch. NOTE: this argument\n can be an integer or a tensor\n distort_color_in_yiq: distort color of input images in YIQ space.\n scope: Optional scope for op_scope.\n Returns:\n color-distorted image\n \"\"\"\n with tf.name_scope(scope or 'distort_color'):\n def distort_fn_0(image=image):\n \"\"\"Variant 0 of distort function.\"\"\"\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n # if distort_color_in_yiq:\n # image = distort_image_ops.random_hsv_in_yiq(\n # image, lower_saturation=0.5, upper_saturation=1.5,\n # max_delta_hue=0.2 * math.pi)\n # else:\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n return image\n\n def distort_fn_1(image=image):\n \"\"\"Variant 1 of distort function.\"\"\"\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n # if distort_color_in_yiq:\n # image = distort_image_ops.random_hsv_in_yiq(\n # image, lower_saturation=0.5, upper_saturation=1.5,\n # max_delta_hue=0.2 * math.pi)\n # else:\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n return image\n\n image = utils.smart_cond(batch_position % 2 == 0, distort_fn_0,\n distort_fn_1)\n # The random_* ops do not necessarily clamp.\n image = tf.clip_by_value(image, 0.0, 1.0)\n return image\n\n\nclass RecordInputImagePreprocessor(object):\n \"\"\"Preprocessor for images with RecordInput format.\"\"\"\n\n def __init__(self,\n height,\n width,\n batch_size,\n num_splits,\n dtype,\n train,\n distortions=False,\n resize_method=\"bilinear\",\n shift_ratio=0,\n summary_verbosity=1,\n distort_color_in_yiq=False,\n fuse_decode_and_crop=False):\n self.height = height\n self.width = width\n self.batch_size = batch_size\n self.num_splits = num_splits\n self.dtype = dtype\n self.train = train\n self.resize_method = resize_method\n self.shift_ratio = shift_ratio\n self.distortions = distortions\n self.distort_color_in_yiq = distort_color_in_yiq\n self.fuse_decode_and_crop = fuse_decode_and_crop\n if self.batch_size % self.num_splits != 0:\n raise ValueError(\n ('batch_size must be a multiple of num_splits: '\n 'batch_size %d, num_splits: %d') %\n (self.batch_size, self.num_splits))\n self.batch_size_per_split = self.batch_size // self.num_splits\n self.summary_verbosity = summary_verbosity\n\n def image_preprocess(self, image_buffer, bbox, batch_position):\n \"\"\"Preprocessing image_buffer as a function of its batch position.\"\"\"\n if self.train:\n image_buffer = tf.image.decode_jpeg(\n image_buffer, channels=3, dct_method='INTEGER_FAST')\n image = preprocess_for_train(image_buffer, self.height, self.width, bbox,\n batch_position)\n else:\n image = tf.image.decode_jpeg(\n image_buffer, channels=3, dct_method='INTEGER_FAST')\n image = preprocess_for_eval(image, self.height, self.width)\n return image\n\n def parse_and_preprocess(self, value, batch_position):\n image_buffer, label_index, bbox, _ = parse_example_proto(value)\n image = self.image_preprocess(image_buffer, bbox, batch_position)\n return (label_index, image)\n\n def minibatch(self, dataset, subset, use_datasets, cache_data,\n shift_ratio=-1):\n if shift_ratio < 0:\n shift_ratio = self.shift_ratio\n with tf.name_scope('batch_processing'):\n # Build final results per split.\n images = [[] for _ in range(self.num_splits)]\n labels = [[] for _ in range(self.num_splits)]\n if use_datasets:\n glob_pattern = dataset.tf_record_pattern(subset)\n file_names = gfile.Glob(glob_pattern)\n if not file_names:\n raise ValueError('Found no files in --data_dir matching: {}'\n .format(glob_pattern))\n ds = tf.data.TFRecordDataset.list_files(file_names)\n ds = ds.apply(\n interleave_ops.parallel_interleave(\n tf.data.TFRecordDataset, cycle_length=10))\n if cache_data:\n ds = ds.take(1).cache().repeat()\n counter = tf.data.Dataset.range(self.batch_size)\n counter = counter.repeat()\n ds = tf.data.Dataset.zip((ds, counter))\n ds = ds.prefetch(buffer_size=self.batch_size)\n ds = ds.shuffle(buffer_size=10000)\n ds = ds.repeat()\n ds = ds.apply(\n batching.map_and_batch(\n map_func=self.parse_and_preprocess,\n batch_size=self.batch_size_per_split,\n num_parallel_batches=self.num_splits))\n ds = ds.prefetch(buffer_size=self.num_splits)\n ds_iterator = ds.make_one_shot_iterator()\n for d in xrange(self.num_splits):\n labels[d], images[d] = ds_iterator.get_next()\n\n else:\n record_input = data_flow_ops.RecordInput(\n file_pattern=dataset.tf_record_pattern(subset),\n seed=301,\n parallelism=64,\n buffer_size=10000,\n batch_size=self.batch_size,\n shift_ratio=shift_ratio,\n name='record_input')\n records = record_input.get_yield_op()\n records = tf.split(records, self.batch_size, 0)\n records = [tf.reshape(record, []) for record in records]\n for idx in xrange(self.batch_size):\n value = records[idx]\n (label, image) = self.parse_and_preprocess(value, idx)\n split_index = idx % self.num_splits\n labels[split_index].append(label)\n images[split_index].append(image)\n\n for split_index in xrange(self.num_splits):\n if not use_datasets:\n images[split_index] = tf.parallel_stack(images[split_index])\n labels[split_index] = tf.concat(labels[split_index], 0)\n images[split_index] = tf.cast(images[split_index], self.dtype)\n depth = 3\n images[split_index] = tf.reshape(\n images[split_index],\n shape=[self.batch_size_per_split, self.height, self.width, depth])\n labels[split_index] = tf.reshape(labels[split_index],\n [self.batch_size_per_split])\n return images, labels\n"
] |
[
[
"tensorflow.contrib.data.python.ops.batching.map_and_batch",
"tensorflow.concat",
"tensorflow.FixedLenFeature",
"tensorflow.image.random_contrast",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.image.random_saturation",
"tensorflow.image.central_crop",
"tensorflow.image.random_hue",
"tensorflow.image.random_flip_left_right",
"tensorflow.summary.image",
"tensorflow.squeeze",
"tensorflow.subtract",
"tensorflow.data.Dataset.zip",
"tensorflow.name_scope",
"tensorflow.parse_single_example",
"tensorflow.image.decode_jpeg",
"tensorflow.image.resize_bilinear",
"tensorflow.image.random_brightness",
"tensorflow.shape",
"tensorflow.image.resize_images",
"tensorflow.python.layers.utils.smart_cond",
"tensorflow.data.Dataset.range",
"tensorflow.VarLenFeature",
"tensorflow.split",
"tensorflow.parallel_stack",
"tensorflow.clip_by_value",
"tensorflow.multiply",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.slice",
"tensorflow.reshape",
"tensorflow.python.platform.gfile.Glob",
"tensorflow.expand_dims",
"tensorflow.data.TFRecordDataset.list_files",
"tensorflow.image.convert_image_dtype",
"tensorflow.contrib.data.python.ops.interleave_ops.parallel_interleave",
"tensorflow.random_uniform"
]
] |
alfa-bravo/ekstrakto
|
[
"de2dc38815f9e8657b401fc71b9c54fa16b849c7"
] |
[
"ekstrakto/helpers.py"
] |
[
"\"\"\"\nAuthor: Vincent Brubaker-Gianakos\n\"\"\"\n\nimport numpy as np\nimport kdtree\n\n\ndef get_normalized_pixel_data(image, channel_bit_depth):\n return np.array(image.getdata()) / ((2 ** channel_bit_depth) - 1)\n\n\ndef progressive_peak_find(h, distinctness=1.0):\n array = sorted(np.ndenumerate(h), key=lambda a: a[1], reverse=True)\n peaks = dict()\n peaks[array[0][0]] = array[0][1]\n visited = kdtree.create([array[0][0]])\n for idx, value in array[1:]:\n nearest_visited, d = visited.search_nn(idx)\n d = d / h.shape[0]\n if d > distinctness:\n peaks[idx] = value\n visited.add(idx)\n ordered_peaks = sorted(peaks.items(), key=lambda p: p[1], reverse=True)\n coords, values = zip(*ordered_peaks)\n return coords, values\n\n\ndef peak_find_3d(pixels, n_bins=19, distinctness=1.0):\n bin_range = np.linspace(0, 1, n_bins)\n bins = (bin_range,) * 3\n H, edges = np.histogramdd(pixels, bins)\n coords, values = progressive_peak_find(H, distinctness)\n normalized_coords = np.array(coords) / (H.shape[0] - 1)\n return normalized_coords, np.array(values)\n\n\ndef normalized_histogram(pixels, n_bins=31, bias=0):\n bin_range = np.arange(-1, n_bins + 2) / n_bins\n bins = (bin_range,) * 3\n H, edges = np.histogramdd(pixels, bins)\n coords, values = zip(*list(np.ndenumerate(H)))\n return np.array(coords) / (H.shape[0] - 1), values\n\n\ndef rgb_to_hex_color(r, g, b):\n r, g, b = np.clip(r, 0, 255), np.clip(g, 0, 255), np.clip(b, 0, 255)\n return f'#{r:02X}{g:02X}{b:02X}'\n"
] |
[
[
"numpy.linspace",
"numpy.clip",
"numpy.arange",
"numpy.histogramdd",
"numpy.ndenumerate",
"numpy.array"
]
] |
BM-K/transformer_pytorch
|
[
"1ff84ae43a7134dc549312655c5058ec723ff44c"
] |
[
"translate.py"
] |
[
"''' Translate input text with trained model. '''\n\nimport torch\nimport argparse\nimport dill as pickle\nfrom tqdm import tqdm\n\nimport transformer.Constants as Constants\nfrom torchtext.data import Dataset\nfrom transformer.Models import Transformer\nfrom transformer.Translator import Translator\n\n# model 불러오기\ndef load_model(opt, device):\n\n checkpoint = torch.load(opt.model, map_location=device) # 모델 checkpoint에 저장\n model_opt = checkpoint['settings']\n\n # transformer parameter model에 저장\n model = Transformer(\n model_opt.src_vocab_size,\n model_opt.trg_vocab_size,\n\n model_opt.src_pad_idx,\n model_opt.trg_pad_idx,\n\n trg_emb_prj_weight_sharing=model_opt.proj_share_weight,\n emb_src_trg_weight_sharing=model_opt.embs_share_weight,\n d_k=model_opt.d_k,\n d_v=model_opt.d_v,\n d_model=model_opt.d_model,\n d_word_vec=model_opt.d_word_vec,\n d_inner=model_opt.d_inner_hid,\n n_layers=model_opt.n_layers,\n n_head=model_opt.n_head,\n dropout=model_opt.dropout).to(device)\n\n # transformer model에 불러온 model 설정\n model.load_state_dict(checkpoint['model'])\n print('[Info] Trained model state loaded.')\n return model \n\n# main 호출\ndef main():\n '''Main Function'''\n\n parser = argparse.ArgumentParser(description='translate.py')\n\n parser.add_argument('-model', required=True,\n help='Path to model weight file')\n parser.add_argument('-data_pkl', required=True, # input data 파일\n help='Pickle file with both instances and vocabulary.')\n parser.add_argument('-output', default='pred.txt', # output data 파일\n help=\"\"\"Path to output the predictions (each line will\n be the decoded sequence\"\"\")\n parser.add_argument('-beam_size', type=int, default=5)\n parser.add_argument('-max_seq_len', type=int, default=100)\n parser.add_argument('-no_cuda', action='store_true')\n\n # TODO: Translate bpe encoded files \n #parser.add_argument('-src', required=True,\n # help='Source sequence to decode (one line per sequence)')\n #parser.add_argument('-vocab', required=True,\n # help='Source sequence to decode (one line per sequence)')\n # TODO: Batch translation\n #parser.add_argument('-batch_size', type=int, default=30,\n # help='Batch size')\n #parser.add_argument('-n_best', type=int, default=1,\n # help=\"\"\"If verbose is set, will output the n_best\n # decoded sentences\"\"\")\n\n opt = parser.parse_args()\n opt.cuda = not opt.no_cuda\n\n data = pickle.load(open(opt.data_pkl, 'rb'))\n SRC, TRG = data['vocab']['src'], data['vocab']['trg']\n opt.src_pad_idx = SRC.vocab.stoi[Constants.PAD_WORD]\n opt.trg_pad_idx = TRG.vocab.stoi[Constants.PAD_WORD]\n opt.trg_bos_idx = TRG.vocab.stoi[Constants.BOS_WORD]\n opt.trg_eos_idx = TRG.vocab.stoi[Constants.EOS_WORD]\n\n test_loader = Dataset(examples=data['test'], fields={'src': SRC, 'trg': TRG}) # test dataset load / 정확히 이해는 안 됨\n \n device = torch.device('cuda' if opt.cuda else 'cpu') # cuda 불러오기\n translator = Translator(\n model=load_model(opt, device),\n beam_size=opt.beam_size,\n max_seq_len=opt.max_seq_len,\n src_pad_idx=opt.src_pad_idx,\n trg_pad_idx=opt.trg_pad_idx,\n trg_bos_idx=opt.trg_bos_idx,\n trg_eos_idx=opt.trg_eos_idx).to(device)\n\n unk_idx = SRC.vocab.stoi[SRC.unk_token]\n with open(opt.output, 'w') as f:\n for example in tqdm(test_loader, mininterval=2, desc=' - (Test)', leave=False):\n #print(' '.join(example.src))\n src_seq = [SRC.vocab.stoi.get(word, unk_idx) for word in example.src] # src sequence\n pred_seq = translator.translate_sentence(torch.LongTensor([src_seq]).to(device)) # src_seq 수치화 후 문장 번역\n pred_line = ' '.join(TRG.vocab.itos[idx] for idx in pred_seq) # seq -> line 합치기\n pred_line = pred_line.replace(Constants.BOS_WORD, '').replace(Constants.EOS_WORD, '') # BOS, EOS 공백 변환\n #print(pred_line)\n f.write(pred_line.strip() + '\\n') # 쓰기\n\n print('[Info] Finished.')\n\nif __name__ == \"__main__\":\n '''\n Usage: python translate.py -model trained.chkpt -data multi30k.pt -no_cuda\n '''\n main()\n"
] |
[
[
"torch.device",
"torch.LongTensor",
"torch.load"
]
] |
mwusdv/CarND-Traffic-Sign-Classifier-Project
|
[
"6ce73639ebebad26bb8f6d10ea2478d03ec0a7b8"
] |
[
"report_utils.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 15 00:03:21 2019\n\n@author: mingrui\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\nfrom experiment_pipeline import ExperimentParam\nimport utils\n\ndef hist_eq(img):\n # histogram equalization\n n_rows, n_cols, n_channels = img.shape\n o = np.copy(img)\n for c in range(n_channels):\n o[:, :, c] = cv2.equalizeHist(img[:, :, c])\n \n return o\n\ndef show_hist_eq():\n X, y = utils.load_data('./data/train.p')\n \n img1 = X[1000]\n o1 = np.copy(img1)\n for c in range(3):\n o1[:, :, c] = cv2.equalizeHist(img1[:, :, c])\n \n img2 = X[1100]\n o2 = np.copy(img2)\n for c in range(3):\n o2[:, :, c] = cv2.equalizeHist(img2[:, :, c])\n \n \n imgs = [img1, o1, img2, o2]\n titles = ['original', 'pre-processed', 'original', 'pre-processed']\n n_rows = 2\n n_cols = 2\n img_size = 2\n \n hsize = img_size * n_rows\n vsize = img_size * n_cols\n fig = plt.figure(figsize=(hsize,vsize))\n plt.subplots_adjust(wspace=0.1, hspace=0.15)\n for i in range(4):\n sub = fig.add_subplot(n_rows, n_cols, i+1)\n sub.set_aspect('auto')\n sub.set_title(titles[i])\n sub.imshow(imgs[i])\n \n sub.axis('off')\n \n plt.show()\n \ndef show_augment(n_classes, n_each):\n X_train, y_train = utils.load_data('./data/train.p')\n param = ExperimentParam(n_rows=32, n_cols=32, n_channels=3, n_classes=43)\n param._affine_aug_ratio = 1.0\n X_aug, y_aug = utils.augment_data_affine(X_train, y_train, param)\n \n X_train = np.array([hist_eq(X_train[i]) for i in range(len(X_train))])\n X_aug = np.array([hist_eq(X_aug[i]) for i in range(len(X_aug))])\n \n class_indices = np.random.choice(range(43), n_classes, replace=False)\n img_size = 3\n hsize = img_size * n_each\n vsize = img_size * n_classes * 2\n fig = plt.figure(figsize=(hsize,vsize))\n plt.subplots_adjust(wspace=0.1, hspace=0.1)\n count = 0\n for c in class_indices:\n for i in range(2):\n if i == 0:\n X = X_train\n y = y_train\n else:\n X = X_aug\n y = y_aug\n \n img_indices = np.where(y==c)[0]\n indices = np.random.choice(img_indices, n_each, replace=False)\n for idx in indices:\n count += 1\n sub = fig.add_subplot(n_classes*2, n_each, count)\n sub.set_aspect('auto')\n \n sub.imshow(X[idx])\n sub.text(2,2,str(y[idx]), color='k',backgroundcolor='w')\n \n sub.axis('off')\n \n \n plt.show()\n\nif __name__ == '__main__':\n show_augment(3, 5)"
] |
[
[
"numpy.random.choice",
"numpy.copy",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"numpy.where",
"matplotlib.pyplot.figure"
]
] |
gchhablani/MLRC-2020-Towards-Interpreting-BERT-for-RCQA
|
[
"5ffc36f3b03ab0fe64fe5e6fffeddc9d3afe2baf"
] |
[
"run_integrated_gradients.py"
] |
[
"\"\"\"\nScript to run integrated gradients on SQuAD or DuoRC\n\nThis script uses datasets, captum, omegaconf and transformers libraries.\nPlease install them in order to run this script.\n\nUsage:\n $python run_integrated_gradients.py --config ./configs/integrated_gradients/squad.yaml\n\n\"\"\"\nimport os\nimport argparse\nimport pickle as pkl\nimport pandas as pd\n\nfrom omegaconf import OmegaConf\n\n# from transformers import BertTokenizer, BertForQuestionAnswering\n\nfrom src.utils.integrated_gradients import BertIntegratedGradients\n\n# from src.utils.misc import seed\n\ndirname = os.path.dirname(__file__)\n## Config\nparser = argparse.ArgumentParser(\n prog=\"run_integrated_gradients.py\",\n description=\"Run integrated gradients on a model.\",\n)\nparser.add_argument(\n \"--config\",\n type=str,\n action=\"store\",\n help=\"The configuration for integrated gradients\",\n default=os.path.join(dirname, \"./configs/integrated_gradients/squad.yaml\"),\n)\n\nargs = parser.parse_args()\nig_config = OmegaConf.load(args.config)\n\n# Load dataset\nprint(\"### Loading Dataset ###\")\npredictions = pd.read_json(ig_config.predictions_path)\n\n# Initialize BertIntegratedGradients\nbig = BertIntegratedGradients(ig_config, predictions)\n\nprint(\"### Running IG ###\")\n(\n samples,\n word_importances,\n token_importances,\n) = big.get_random_samples_and_importances_across_all_layers(\n n_samples=ig_config.n_samples\n)\n\nprint(\"### Saving the Scores ###\")\nwith open(os.path.join(ig_config.store_dir, \"samples\"), \"wb\") as out_file:\n pkl.dump(samples, out_file)\nwith open(os.path.join(ig_config.store_dir, \"token_importances\"), \"wb\") as out_file:\n pkl.dump(token_importances, out_file)\nwith open(os.path.join(ig_config.store_dir, \"word_importances\"), \"wb\") as out_file:\n pkl.dump(word_importances, out_file)\n\nprint(\"### Finished ###\")\n"
] |
[
[
"pandas.read_json"
]
] |
clarkyu2016/machine-learning-nanodegree
|
[
"44d4afe83d57f6fabd1ec6db1db173eae94a8869"
] |
[
"Reports/P6 Capstone/Rossmann Sales/data_processing.py"
] |
[
"import pandas as pd\nimport numpy as np\nfrom datetime import datetime, timedelta\n\ntrain = pd.read_csv('train.csv')\ntest = pd.read_csv('test.csv')\nstore = pd.read_csv('store.csv')\n\n#1、去除销量中的极值,由于无法估计极值对结果的影响,所以拟合模型的时候可以进行两次,去除极值和未去除极值\n#再试一下标准差标准误差\ndef rm_outliers(df): \n q1 = np.percentile(df['Sales'], 25, axis=0)\n q3 = np.percentile(df['Sales'], 75, axis=0)\n k = 2.5\n iqr = q3 - q1\n df = df[df['Sales'] > q1 - k*iqr]\n df = df[df['Sales'] < q3 + k*iqr]\n return df\n\ndef rm_outliers_std(df): \n std = df['Sales'].std()\n mean = df['Sales'].mean()\n k = 3\n df = df[df['Sales'] > mean - k*std]\n df = df[df['Sales'] < mean + k*std]\n return df\n\n#2、对时间的拆分\ndef data_processing(df):\n df['Date'] = pd.to_datetime(df['Date'])\n df['DayOfYear'] = df['Date'].apply(lambda x: x.dayofyear)\n df['WeekOfYear'] = df['Date'].apply(lambda x: x.week)\n df['Month'] = df['Date'].apply(lambda x: x.month)\n df['DayOfMonth'] = df['Date'].apply(lambda x: x.day)\n df['Year'] = df['Date'].apply(lambda x: x.year)\n return df\n\n#4、为每个日期添加过去一个季度,过去半年,过去一年,过去两年的这家店的平均日销售量\ndef store_sales_each_day(sale):\n \n def add_mean(store,sale,current_date,past_time):\n past_date = current_date - timedelta(days=past_time)\n mean_sale = sale[(sale['Date'] < current_date) & (sale['Date'] > past_date) & (sale['Store'] == store)]['Sales'].mean()\n return mean_sale\n \n sale['past_quater_mean_sale'] = sale.apply(lambda row: add_mean(row['Store'], sale, row['Date'], 90), axis=1)\n sale['past_year_mean_sale'] = sale.apply(lambda row: add_mean(row['Store'], sale, row['Date'], 183), axis=1)\n \n return sale\n\n#测试集调整\ndef store_sales_each_day_for_test(sale,test):\n \n def add_mean(store,sale,current_date,past_time):\n past_date = current_date - timedelta(days=past_time)\n mean_sale = sale[(sale['Date'] < current_date) & (sale['Date'] > past_date) & (sale['Store'] == store)]['Sales'].mean()\n return mean_sale\n \n test['past_quater_mean_sale'] = test.apply(lambda row: add_mean(row['Store'], sale, row['Date'], 90), axis=1)\n test['past_year_mean_sale'] = test.apply(lambda row: add_mean(row['Store'], sale, row['Date'], 183), axis=1)\n\n return test\n\n#3、为每家店添加销售量客流量相关的均值,执行顺序在对时间进行拆分后\ndef add_mean_for_store(sales,store_df=store) :\n mean_sales_promo = []\n mean_sales_no_promo = []\n mean_sales = []\n mean_sales_2013 = []\n mean_sales_2014 = []\n mean_sales_2015 = []\n mean_store_sales_1month = []\n mean_store_sales_2months = []\n mean_store_sales_3months = []\n mean_store_sales_6months = []\n \n \n mean_customers_promo = []\n mean_customers_no_promo = []\n mean_customers = []\n mean_customers_2013 = []\n mean_customers_2014 = []\n mean_customers_2015 = []\n mean_customers_1month = [] \n mean_customers_2months = [] \n mean_customers_3months = []\n mean_customers_6months = []\n \n \n for store in store_df['Store']:\n sale = sales[sales['Store']==store]\n \n # mean of sales\n mean_sales.append(sale['Sales'].mean())\n mean_sales_promo.append(sale[sale['Promo'] == 1]['Sales'].mean())\n mean_sales_no_promo.append(sale[sale['Promo'] == 0]['Sales'].mean())\n mean_sales_2013.append(sale[sale['Year'] == 2013]['Sales'].mean())\n mean_sales_2014.append(sale[sale['Year'] == 2014]['Sales'].mean())\n mean_sales_2015.append(sale[sale['Year'] == 2015]['Sales'].mean())\n mean_store_sales_1month.append(sale[(sale['Month'] == 7) & (sale['Year'] == 2015)]['Sales'].mean()) \n mean_store_sales_2months.append(sale[(sale['Month'] <= 7) ^(sale['Month'] >= 6) & (sale['Year'] == 2015)]['Sales'].mean())\n mean_store_sales_3months.append(sale[(sale['Month'] <= 7) ^(sale['Month'] >= 5) & (sale['Year'] == 2015)]['Sales'].mean()) \n mean_store_sales_6months.append(sale[(sale['Month'] <= 7) ^(sale['Month'] >= 2) & (sale['Year'] == 2015)]['Sales'].mean())\n \n # mean of customers\n mean_customers.append(sale['Customers'].mean())\n mean_customers_promo.append(sale[sale['Promo'] == 1]['Customers'].mean())\n mean_customers_no_promo.append(sale[sale['Promo'] == 0]['Customers'].mean())\n mean_customers_2013.append(sale[sale['Year'] == 2013]['Customers'].mean())\n mean_customers_2014.append(sale[sale['Year'] == 2014]['Customers'].mean())\n mean_customers_2015.append(sale[sale['Year'] == 2015]['Customers'].mean())\n mean_customers_1month.append(sale[(sale['Month'] == 7) & (sale['Year'] == 2015)]['Customers'].mean()) \n mean_customers_2months.append(sale[(sale['Month'] <= 7) ^(sale['Month'] >= 6) & (sale['Year'] == 2015)]['Customers'].mean())\n mean_customers_3months.append(sale[(sale['Month'] <= 7) ^(sale['Month'] >= 5) & (sale['Year'] == 2015)]['Customers'].mean()) \n mean_customers_6months.append(sale[(sale['Month'] <= 7) ^(sale['Month'] >= 2) & (sale['Year'] == 2015)]['Customers'].mean())\n \n store_df['mean_sales'] = mean_sales\n store_df['mean_sales_promo'] = mean_sales_promo\n store_df['mean_sales_no_promo'] = mean_sales_no_promo\n store_df['mean_sales_2013'] = mean_sales_2013\n store_df['mean_sales_2014'] = mean_sales_2014\n store_df['mean_sales_2015'] = mean_sales_2015\n store_df['mean_store_sales_1month'] = mean_store_sales_1month\n store_df['mean_store_sales_2months'] = mean_store_sales_2months\n store_df['mean_store_sales_3months'] = mean_store_sales_3months\n store_df['mean_store_sales_6months'] = mean_store_sales_6months\n \n store_df['mean_customers'] = mean_customers\n store_df['mean_customers_promo'] = mean_customers_promo\n store_df['mean_customers_no_promo'] = mean_customers_no_promo\n store_df['mean_customers_2013'] = mean_customers_2013\n store_df['mean_customers_2014'] = mean_customers_2014\n store_df['mean_customers_2015'] = mean_customers_2015\n store_df['mean_customers_1month'] = mean_customers_1month\n store_df['mean_customers_2months'] = mean_customers_2months\n store_df['mean_customers_3months'] = mean_customers_3months\n store_df['mean_customers_6months'] = mean_customers_6months\n \n return store_df\n\ndef drop_stores(data_test, data):\n\tstores = data_test['Store'].unique() \n\treturn data[data_test['Store'].isin(stores)]\n\n#合并销售和商店 \ndef merge_sale(sale_data, store_data):\n train = sale_data.join(store_data, on='Store', rsuffix='_')\n train = train.drop('Store_',axis=1)\n return train\n\n#添加其他特征\ndef extra_features(data):\n data['CompetitionOpen'] = 12 * (data['Year'] - data['CompetitionOpenSinceYear']) + (data['Month'] - data['CompetitionOpenSinceMonth'])\n data['PromoOpen'] = 12 * (data['Year'] - data['Promo2SinceYear'])+ (data['WeekOfYear'] - data['Promo2SinceWeek']) / 4.0\n data['PromoOpen'] = data['PromoOpen'].apply(lambda x: x if x > 0 else 0)\n data.loc[data.Promo2SinceYear == 0, 'PromoOpen'] = 0\n data = data.drop(['CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear', 'Promo2SinceWeek', 'Promo2SinceYear'], axis=1) #删除特征\n \n mappings = {'0': 0, 'a': 1, 'b': 2, 'c': 3, 'd': 4,'Jan,Apr,Jul,Oct':1,'Feb,May,Aug,Nov':2,'Mar,Jun,Sept,Dec':3} \n data['StoreType'].replace(mappings, inplace=True) \n data['Assortment'].replace(mappings, inplace=True) \n data['StateHoliday'].replace(mappings, inplace=True)\n data['PromoInterval'].replace(mappings, inplace=True)\n data = data.drop(['Date'],axis= 1)\n return data\n\n#去除极大极小值\nprint('Moving outliers')\ntrain = rm_outliers(train)\n\n#转换年月日\nprint('Convert Time')\ntrain = data_processing(train)\ntest = data_processing(test)\n\n#给商店计算平均销售量\nstore = add_mean_for_store(train)\nstore = drop_stores(test, store)\n\nprint('add additional past_quater_mean_sale and past_year_mean_sale')\n#添加额外特征'past_quater_mean_sale'和'past_year_mean_sale',这段代码运行时间可能会过长\ntrain = store_sales_each_day(train)\ntest = store_sales_each_day_for_test(train,test)\n\nprint('Merging')\n#合并\ntrain = merge_sale(train, store)\ntest = merge_sale(test, store)\n\n\n#额外的特征\ntrain = extra_features(train)\ntest = extra_features(test)\n\nholidayofyear = sorted(train[train['StateHoliday'].isin([1,2,3,4])]['DayOfYear'].unique())\ndef day2holiday(df,holidayofyear):\n for holiday in holidayofyear:\n df['DaysToHoliday' + str(holiday)] = holiday - df['DayOfYear']\n return df\n\n#计算距离节日的日子数\ntrain = day2holiday(train,holidayofyear)\ntest = day2holiday(test,holidayofyear)\n\nprint('Final output')\n#生成最终输入\ntrain.to_csv('train_withextra.csv',index=False)\ntest.to_csv('test_withextra.csv',index=False)\n"
] |
[
[
"pandas.read_csv",
"pandas.to_datetime",
"numpy.percentile"
]
] |
Vishal-Bhatia/dsmp-pre-work
|
[
"48f23464fd7775008999735d38321aadb13a0676"
] |
[
"Data-Science-Course-Prep---4/code.py"
] |
[
"# --------------\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# code starts here\n##Using read.csv() to load the data onto a dataframe variable\ndf = pd.read_csv(path)\n\n##Computing the probability that the FICO score is above 700\ndf1 = df[df.fico > 700]\np_a = len(df1)/len(df)\n\n##Computing the probability that the purpose of the loan is debt consolidation\ndf2 = df[df.purpose == 'debt_consolidation']\np_b = len(df2)/len(df)\n\n##Computing the probability that the purpose of the loan is debt consolidation given a FICO score of above 700\ndf3 = df1[df1.purpose == 'debt_consolidation']\np_a_b = len(df3)/len(df1)\n\n##Checking for independency\nresult = bool(p_a_b == p_b)\n\n##Outputting the independency result\nprint(result)\n\n# code ends here\n\n\n# --------------\n# code starts here\n##Computing the probability that the loan has been paid back\ndf4 = df[df[\"paid.back.loan\"] == \"Yes\"]\nprob_lp = len(df4)/len(df)\n\n##Computing the probability that the underwriting criteria of LendingClub.com are met\ndf5 = df[df[\"credit.policy\"] == \"Yes\"]\nprob_cs = len(df5)/len(df)\n\n##Subsetting a new database comprising only those entries where the loan has been paid back [recreating the first subset to meet GreyAtom checks on the code]\nnew_df = df[df[\"paid.back.loan\"] == \"Yes\"]\n\n##Computing the probability that the the loan has been paid back given the underwriting criteria of LendingClub.com are met\ndf6 = df4[df4[\"credit.policy\"] == \"Yes\"]\nprob_pd_cs = len(df6)/len(df4)\n\n##Applying Bayes theorem\nbayes = prob_pd_cs*prob_lp/prob_cs\n\n##Outputting the conditional probability\nprint(bayes)\n\n# code ends here\n\n\n# --------------\n# code starts here\n##Subsetting a new database comprising only those entries where the loan has not been paid back\ndf1 = df[df[\"paid.back.loan\"] == \"No\"]\n\n##Using value_counts() on purpose in the subsetted dataframe, and outputting the same\npurpose_vc = df1[\"purpose\"].value_counts()\n\n##Plotting the bar chart for purpose\npurpose_vc.plot.bar()\n\n# code ends here\n\n\n# --------------\n# code starts here\n##Computing the median and mean for installment\ninst_median = df.installment.median()\ninst_mean = df.installment.mean()\n\n##Plotting the histogram for installment\ndf_inst = df.installment\nhist_inst = df_inst.plot.hist(bins = 30)\nprint(hist_inst)\n\n##Plotting the histogram for log of annual income\ndf_loginc = df[\"log.annual.inc\"]\nhist_loginc = df_loginc.plot.hist(bins = 30)\nprint(hist_loginc)\n\n# code ends here\n\n\n"
] |
[
[
"pandas.read_csv"
]
] |
scaramagus/synthingie
|
[
"efbc11e199e9489f3c8d979b576ef9415cf70a1c"
] |
[
"tests/test_core.py"
] |
[
"import os\n\nimport numpy as np\nimport pytest\n\nfrom synthingie.core import Module, Audio\n\n\nSAMPLERATE = 48000\nFRAMESIZE = 1024\n\n\[email protected]\ndef module():\n return Module(SAMPLERATE, FRAMESIZE)\n\n\ndef test_render(module):\n zero = module.value(0)\n duration = 10.05\n audio = module.render(zero, duration)\n assert audio.samples.shape[0] == int(SAMPLERATE * duration)\n assert np.all(audio.samples == 0)\n\n\ndef test_audio(tmp_path):\n fname = os.path.join(tmp_path, \"filename.WAV\")\n audio = Audio(SAMPLERATE, np.zeros([SAMPLERATE]))\n audio.save(fname)\n audio2 = Audio.load(fname)\n assert audio.samplerate == audio2.samplerate\n assert all(audio.samples.astype(np.float32) == audio2.samples)\n"
] |
[
[
"numpy.all",
"numpy.zeros"
]
] |
mhyzy155/02456DeepLearningObjDet
|
[
"efd49590dad6c60654ceb532ee0b10d1d5093db2"
] |
[
"ColaBeerDataset.py"
] |
[
"import os\r\nimport numpy as np\r\nimport torch\r\nimport xml.etree.ElementTree as ET\r\nfrom PIL import Image\r\n\r\n\r\nclass ColaBeerDataset(torch.utils.data.Dataset):\r\n def __init__(self, root, transforms=None):\r\n self.root = root\r\n self.transforms = transforms\r\n # load all image files, sorting them to\r\n # ensure that they are aligned\r\n self.imgs = list(sorted(os.listdir(os.path.join(root, \"images\"))))\r\n self.xmls = list(sorted(os.listdir(os.path.join(root, \"frames\"))))\r\n\r\n def __getitem__(self, idx):\r\n # load images and xmls\r\n img_path = os.path.join(self.root, \"images\", self.imgs[idx])\r\n xml_path = os.path.join(self.root, \"frames\", self.xmls[idx])\r\n img = Image.open(img_path).convert(\"RGB\")\r\n\r\n \r\n #Get boundary boxes and labels\r\n target = self.__xml_to_dict(xml_path)\r\n image_id = torch.tensor([idx])\r\n #Add image_id to target dictionary\r\n target[\"image_id\"] = image_id\r\n\r\n if self.transforms is not None:\r\n img, target = self.transforms(img, target)\r\n\r\n return img, target\r\n\r\n def __xml_to_dict(self, xml_path):\r\n boxes = []\r\n\r\n #Parsing XML\r\n if os.path.exists(xml_path):\r\n tree = ET.parse(xml_path)\r\n root = tree.getroot()\r\n #Reading boundary boxes and labels from xml\r\n labels = []\r\n for obj in root.findall('object'):\r\n bbx = obj.find('bndbox')\r\n xmin = float(bbx.find('xmin').text)\r\n xmax = float(bbx.find('xmax').text)\r\n ymin = float(bbx.find('ymin').text)\r\n ymax = float(bbx.find('ymax').text)\r\n boxes.append([xmin, ymin, xmax, ymax])\r\n label = obj.findtext('name')\r\n #Converting label to integer\r\n if label == \"beer\":\r\n label = 1\r\n else:\r\n label = 2\r\n labels.append(label)\r\n\r\n #Converting everything to tensor\r\n num_objs = len(boxes)\r\n if num_objs == 0:\r\n boxes = torch.zeros((0, 4), dtype=torch.float32)\r\n area = torch.as_tensor(0, dtype=torch.float32)\r\n iscrowd = torch.zeros(0, dtype=torch.int64)\r\n else:\r\n boxes = torch.as_tensor(boxes, dtype=torch.float32)\r\n area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])\r\n iscrowd = torch.zeros((num_objs,), dtype=torch.int64)\r\n \r\n labels = torch.as_tensor(labels, dtype=torch.int64)\r\n\r\n #Combining boxes and labels to dictionary\r\n target = {}\r\n target['boxes'] = boxes\r\n target['labels'] = labels\r\n target[\"area\"] = area\r\n target[\"iscrowd\"] = iscrowd\r\n return target\r\n\r\n def __len__(self):\r\n return len(self.imgs)\r\n"
] |
[
[
"torch.as_tensor",
"torch.zeros",
"torch.tensor"
]
] |
zhu-edward/DGSQP
|
[
"b907d961f292695e4cddbc1266313e0e1030c4f0"
] |
[
"DGSQP/solvers/ALGAMES.py"
] |
[
"#!/usr/bin python3\n\nimport numpy as np\nimport scipy as sp\nimport casadi as ca\n\nimport pathlib\nimport os\nimport copy\nimport shutil\nimport pdb\nfrom datetime import datetime\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom typing import List, Dict\n\nfrom DGSQP.types import VehicleState, VehiclePrediction\n\nfrom DGSQP.dynamics.dynamics_models import CasadiDecoupledMultiAgentDynamicsModel\n\nfrom DGSQP.solvers.abstract_solver import AbstractSolver\nfrom DGSQP.solvers.solver_types import ALGAMESParams\n\nclass ALGAMES(AbstractSolver):\n def __init__(self, joint_dynamics: CasadiDecoupledMultiAgentDynamicsModel, \n costs: List[Dict[str, ca.Function]], \n constraints: List[ca.Function], \n bounds: Dict[str, VehicleState],\n params=ALGAMESParams()):\n self.joint_dynamics = joint_dynamics\n self.M = self.joint_dynamics.n_a\n\n self.N = params.N\n\n self.outer_iters = params.outer_iters\n self.line_search_iters = params.line_search_iters\n self.newton_iters = params.newton_iters\n\n self.verbose = params.verbose\n self.code_gen = params.code_gen\n self.jit = params.jit\n self.opt_flag = params.opt_flag\n self.solver_name = params.solver_name\n if params.solver_dir is not None:\n self.solver_dir = os.path.join(params.solver_dir, self.solver_name)\n\n if not params.enable_jacobians:\n jac_opts = dict(enable_fd=False, enable_jacobian=False, enable_forward=False, enable_reverse=False)\n else:\n jac_opts = dict()\n\n if self.code_gen:\n if self.jit:\n self.options = dict(jit=True, jit_name=self.solver_name, compiler='shell', jit_options=dict(compiler='gcc', flags=['-%s' % self.opt_flag], verbose=self.verbose), **jac_opts)\n else:\n self.options = dict(jit=False, **jac_opts)\n self.c_file_name = self.solver_name + '.c'\n self.so_file_name = self.solver_name + '.so'\n if params.solver_dir is not None:\n self.solver_dir = pathlib.Path(params.solver_dir).expanduser().joinpath(self.solver_name)\n else:\n self.options = dict(jit=False, **jac_opts)\n\n # The costs should be a dict of casadi functions with keys 'stage' and 'terminal'\n if len(costs) != self.M:\n raise ValueError('Number of agents: %i, but only %i cost functions were provided' % (self.M, len(costs)))\n self.costs_sym = costs\n\n # The constraints should be a list (of length N+1) of casadi functions such that constraints[i] <= 0\n if len(constraints) != self.N+1:\n raise ValueError('Horizon length: %i, but only %i constraint functions were provide' % (self.N+1, len(constraints)))\n self.constraints_sym = constraints\n # Process box constraints\n self.state_ub, self.input_ub = self.joint_dynamics.state2qu(bounds['ub'])\n self.state_lb, self.input_lb = self.joint_dynamics.state2qu(bounds['lb'])\n self.state_ub_idxs = np.where(self.state_ub < np.inf)[0]\n self.state_lb_idxs = np.where(self.state_lb > -np.inf)[0]\n self.input_ub_idxs = np.where(self.input_ub < np.inf)[0]\n self.input_lb_idxs = np.where(self.input_lb > -np.inf)[0]\n\n self.n_c = 0\n # for k in range(self.N+1):\n # self.n_c += self.constraints_sym[k].size1_out(0) # Number of constraints\n\n self.state_input_predictions = [VehiclePrediction() for _ in range(self.M)]\n\n self.n_u = self.joint_dynamics.n_u\n self.n_q = self.joint_dynamics.n_q\n\n self.newton_step_tol = params.newton_step_tol\n # Convergence tolerance for Newton's method\n self.ineq_tol = params.ineq_tol\n self.eq_tol = params.eq_tol\n self.opt_tol = params.opt_tol\n self.rel_tol_req = 5\n\n # Lagrangian Regularization\n self.rho_init = params.rho\n self.gamma = params.gamma\n self.rho_val = copy.copy(self.rho_init)\n self.rho_max = params.rho_max\n self.lam_max = params.lam_max\n\n # Jacobian regularization\n self.q_reg_init = params.q_reg\n self.u_reg_init = params.u_reg\n\n # Line search parameters\n self.beta = params.beta\n self.tau = params.tau\n self.line_search_tol = params.line_search_tol\n\n self.debug_plot = params.debug_plot\n self.pause_on_plot = params.pause_on_plot\n self.local_pos = params.local_pos\n if self.debug_plot:\n matplotlib.use('TkAgg')\n plt.ion()\n self.fig = plt.figure(figsize=(10,5))\n self.ax_xy = self.fig.add_subplot(1,2,1)\n self.ax_a = self.fig.add_subplot(2,2,2)\n self.ax_s = self.fig.add_subplot(2,2,4)\n # self.joint_dynamics.dynamics_models[0].track.remove_phase_out()\n self.joint_dynamics.dynamics_models[0].track.plot_map(self.ax_xy, close_loop=False)\n self.colors = ['b', 'g', 'r', 'm', 'c']\n self.l_xy, self.l_a, self.l_s = [], [], []\n for i in range(self.M):\n self.l_xy.append(self.ax_xy.plot([], [], f'{self.colors[i]}o')[0])\n self.l_a.append(self.ax_a.plot([], [], f'-{self.colors[i]}o')[0])\n self.l_s.append(self.ax_s.plot([], [], f'-{self.colors[i]}o')[0])\n self.ax_a.set_ylabel('accel')\n self.ax_s.set_ylabel('steering')\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()\n \n self.q_pred = np.zeros((self.N+1, self.n_q))\n self.u_pred = np.zeros((self.N, self.n_u))\n\n self.q_ws = None\n self.u_ws = None\n self.l_ws = None\n self.m_ws = None\n\n self.debug = False\n\n self.u_prev = np.zeros(self.n_u)\n\n if params.solver_dir:\n self._load_solver()\n else:\n self._build_solver()\n\n self.initialized = True\n\n def initialize(self):\n pass\n\n def set_warm_start(self, q_ws: np.ndarray, u_ws: np.ndarray, l_ws: np.ndarray = None, m_ws: np.ndarray = None):\n if q_ws.shape[0] != self.N+1 or q_ws.shape[1] != self.n_q:\n raise(RuntimeError('Warm start state sequence of shape (%i,%i) is incompatible with required shape (%i,%i)' % (q_ws.shape[0],q_ws.shape[1],self.N+1,self.n_q)))\n if u_ws.shape[0] != self.N or u_ws.shape[1] != self.n_u:\n raise(RuntimeError('Warm start state sequence of shape (%i,%i) is incompatible with required shape (%i,%i)' % (u_ws.shape[0],u_ws.shape[1],self.N,self.n_u)))\n self.q_ws = q_ws\n self.u_ws = u_ws\n\n self.l_ws = l_ws\n self.m_ws = m_ws\n\n def step(self, states: List[VehicleState], env_state=None):\n info = self.solve(states)\n\n self.joint_dynamics.qu2state(states, None, self.u_pred[0])\n self.joint_dynamics.qu2prediction(self.state_input_predictions, self.q_pred, self.u_pred)\n for q in self.state_input_predictions:\n q.t = states[0].t\n\n self.u_prev = self.u_pred[0]\n\n q_ws = np.vstack((self.q_pred[1:], self.joint_dynamics.fd(self.q_pred[-1], self.u_pred[-1]).toarray().squeeze()))\n u_ws = np.vstack((self.u_pred[1:], self.u_pred[-1]))\n # self.set_warm_start(q_ws, u_ws, lam_bar.toarray(), mu_bar.toarray())\n self.set_warm_start(q_ws, u_ws)\n\n return info\n\n def solve(self, states: List[VehicleState]):\n solve_info = {}\n solve_start = datetime.now()\n self.u_prev = np.zeros(self.n_u)\n\n if self.q_ws is None or self.u_ws is None:\n # Rollout trajectory using input sequence from last solve\n q_bar = np.zeros((self.N+1, self.n_q))\n q_bar[0] = self.joint_dynamics.state2q(states)\n u_bar = np.vstack((self.u_pred[1:], self.u_pred[-1].reshape((1,-1)), self.u_prev.reshape((1,-1))))\n for k in range(self.N):\n # Update dynamics\n q_bar[k+1] = self.joint_dynamics.fd(q_bar[k], u_bar[k]).toarray().squeeze()\n else:\n q_bar = copy.copy(self.q_ws)\n u_bar = np.vstack((copy.copy(self.u_ws), self.u_prev.reshape((1,-1))))\n \n q_bar = ca.DM(q_bar.T)\n u_bar = ca.DM(u_bar.T)\n \n lam_bar = ca.DM.zeros(self.n_c)\n mu_bar = ca.DM.zeros((self.n_q*self.N, self.M))\n \n init = dict(q=copy.copy(q_bar), \n u=copy.copy(u_bar), \n l=copy.copy(lam_bar), \n m=copy.copy(mu_bar))\n\n if self.debug_plot:\n self._update_debug_plot(q_bar, u_bar)\n if self.pause_on_plot:\n pdb.set_trace()\n\n q_reg = copy.copy(self.q_reg_init)\n u_reg = copy.copy(self.u_reg_init)\n self.rho_val = copy.copy(self.rho_init)\n\n # Do ALGAMES\n converged = False\n rel_tol_its = 0\n iter_data = []\n print('ALGAMES')\n for i in range(self.outer_iters):\n it_start = datetime.now()\n if self.verbose:\n print('===================================================')\n print(f'ALGAMES iteration: {i}')\n \n u_im1 = copy.copy(u_bar)\n l_im1 = copy.copy(lam_bar)\n m_im1 = copy.copy(mu_bar)\n\n # Compute constraint violation for initial guess and construct inital regularization matrix\n C_bar = self.f_C(*ca.horzsplit(q_bar, 1), *ca.horzsplit(u_bar, 1))\n rho_bar = ca.DM([0 if c < 0 and l == 0 else self.rho_val for (c, l) in zip(ca.vertsplit(C_bar), ca.vertsplit(lam_bar))])\n # rho_bar = ca.DM([0 if c < -1e-7 and l < 1e-7 else self.rho_val for (c, l) in zip(ca.vertsplit(C_bar), ca.vertsplit(lam_bar))])\n\n # Newton's method w/ backtracking line search\n newton_converged = False\n for j in range(self.newton_iters):\n # Scheduled increase of regularization\n q_reg_it = q_reg*(j+1)**4 \n u_reg_it = u_reg*(j+1)**4\n # Compute search direction\n dq, du, dm, Gy = self.f_dy(*ca.horzsplit(q_bar, 1), \n *ca.horzsplit(u_bar, 1),\n *ca.horzsplit(mu_bar, 1),\n lam_bar,\n rho_bar,\n q_reg_it,\n u_reg_it)\n if ca.norm_inf(Gy) < self.opt_tol:\n if self.verbose:\n print(f' - Newton iteration: {j} | G norm: {np.linalg.norm(Gy, ord=np.inf):.4e} | converged: Gradient of Lagrangian within specified tolerance')\n newton_converged = True\n newton_status = 'stat_size'\n Gy_bar = ca.DM(Gy)\n break\n\n norm_Gy = np.linalg.norm(Gy, ord=1)/Gy.size1()\n\n # Do line search\n line_search_converged = False\n alpha = 1.0\n q_tmp = ca.DM(q_bar); u_tmp = ca.DM(u_bar); mu_tmp = ca.DM(mu_bar)\n for k in range(self.line_search_iters):\n q_trial = q_tmp + ca.horzcat(ca.DM.zeros((self.n_q, 1)), alpha*dq)\n u_trial = u_tmp + ca.horzcat(alpha*du, ca.DM.zeros((self.n_u, 1)))\n mu_trial = mu_tmp + alpha*dm\n Gy_trial = self.f_G_reg(*ca.horzsplit(q_trial, 1), \n *ca.horzsplit(u_trial, 1),\n *ca.horzsplit(mu_trial, 1),\n lam_bar,\n rho_bar,\n q_reg_it,\n u_reg_it,\n *ca.horzsplit(q_bar, 1), \n *ca.horzsplit(u_bar, 1))\n norm_Gy_trial = np.linalg.norm(Gy_trial, ord=1)/Gy_trial.size1()\n norm_Gy_thresh = (1-alpha*self.beta)*norm_Gy\n if self.verbose:\n print(f' - Line search iteration: {k} | LS G norm: {norm_Gy_trial:.4e} | G norm: {norm_Gy_thresh:.4e} | a: {alpha:.4e}')\n # if norm_Gy_trial-norm_Gy_thresh <= 1e-3:\n if norm_Gy_trial <= norm_Gy_thresh:\n line_search_converged = True\n break\n else:\n alpha *= self.tau\n q_bar = ca.DM(q_trial); u_bar = ca.DM(u_trial); mu_bar = ca.DM(mu_trial); Gy_bar = ca.DM(Gy_trial)\n if not line_search_converged:\n if self.verbose:\n print(' - Max line search iterations reached, did not converge')\n print(f' - Newton iteration: {j} | Line search did not converge')\n newton_converged = False\n newton_status = 'ls_fail'\n break\n\n # Compute average step size\n d = 0\n for k in range(self.N):\n d += (np.linalg.norm(dq[:,k], ord=1) + np.linalg.norm(du[:,k], ord=1))\n d *= (alpha/((self.n_q + self.n_u)*self.N))\n\n if self.debug:\n pdb.set_trace()\n\n # Check for convergence\n if d < self.newton_step_tol:\n if self.verbose:\n print(f' - Newton iteration: {j} | converged: Average step size within specified tolerance')\n newton_converged = True\n newton_status = 'step_size'\n break\n \n if self.verbose:\n print(f' - Newton iteration: {j} | G norm: {np.linalg.norm(Gy_bar, ord=np.inf):.4e} | step size: {d:.4e} | reg: {u_reg_it:.4e}')\n newton_solves = j + 1\n if newton_solves == self.newton_iters:\n newton_status = 'max_it'\n if self.verbose:\n print(f' - Newton iteration: {j} | Max Newton iterations reached, did not converge')\n\n # Compute constraint violation\n ineq_val, eq_val = self.f_CD(*ca.horzsplit(q_bar, 1), *ca.horzsplit(u_bar, 1))\n max_ineq_vio = np.linalg.norm(ca.fmax(ineq_val, ca.DM.zeros(self.n_c)), ord=np.inf)\n max_eq_vio = np.linalg.norm(eq_val, ord=np.inf)\n max_opt_vio = np.linalg.norm(self.f_opt(*ca.horzsplit(q_bar, 1), \n *ca.horzsplit(u_bar, 1),\n *ca.horzsplit(mu_bar, 1),\n lam_bar), ord=np.inf)\n comp = float(ca.dot(lam_bar, ineq_val))\n cond = {'p_feas': max(max_ineq_vio, max_eq_vio), 'd_feas': 0, 'comp': comp, 'stat': max_opt_vio}\n\n if self.verbose:\n print(f'ALGAMES iteration: {i} | ineq vio: {max_ineq_vio:.4e} | eq vio: {max_eq_vio:.4e} | comp vio: {comp:.4e} | opt vio: {max_opt_vio:.4e}')\n \n if max_ineq_vio < self.ineq_tol \\\n and max_eq_vio < self.eq_tol \\\n and comp < self.opt_tol \\\n and max_opt_vio < self.opt_tol:\n if self.verbose:\n print('ALGAMES iterations converged within specified tolerances')\n print('===================================================')\n it_dur = (datetime.now()-it_start).total_seconds()\n iter_data.append(dict(cond=cond,\n newton_solves=newton_solves,\n newton_converged=newton_converged,\n newton_status=newton_status,\n it_time=it_dur,\n u_sol=copy.copy(u_bar), \n l_sol=copy.copy(lam_bar), \n m_sol=copy.copy(mu_bar)))\n msg = 'conv_abs_tol'\n converged = True\n self.q_pred = copy.copy(q_bar.toarray().T)\n self.u_pred = copy.copy(u_bar[:,:-1].toarray().T)\n if self.debug_plot:\n self._update_debug_plot(q_bar, u_bar)\n if self.pause_on_plot:\n pdb.set_trace()\n break\n \n if np.linalg.norm(u_bar[:,:-1].toarray().ravel()-u_im1[:,:-1].toarray().ravel()) < self.opt_tol/2 \\\n and np.linalg.norm(lam_bar.toarray()-l_im1.toarray()) < self.opt_tol/2 \\\n and np.linalg.norm(mu_bar.toarray().ravel()-m_im1.toarray().ravel()) < self.opt_tol/2:\n rel_tol_its += 1\n if rel_tol_its >= self.rel_tol_req and max_ineq_vio < self.ineq_tol and max_eq_vio < self.eq_tol:\n it_dur = (datetime.now()-it_start).total_seconds()\n iter_data.append(dict(cond=cond,\n newton_solves=newton_solves,\n newton_converged=newton_converged,\n newton_status=newton_status,\n it_time=it_dur,\n u_sol=copy.copy(u_bar), \n l_sol=copy.copy(lam_bar), \n m_sol=copy.copy(mu_bar)))\n converged = True\n msg = 'conv_rel_tol'\n if self.verbose: print('ALGAMES iterations converged via relative tolerance')\n break\n else:\n rel_tol_its = 0\n\n if max_opt_vio > 1e5:\n it_dur = (datetime.now()-it_start).total_seconds()\n iter_data.append(dict(cond=cond,\n newton_solves=newton_solves,\n newton_converged=newton_converged,\n newton_status=newton_status,\n it_time=it_dur,\n u_sol=copy.copy(u_bar), \n l_sol=copy.copy(lam_bar), \n m_sol=copy.copy(mu_bar)))\n if self.verbose:\n print('ALGAMES diverged')\n print('===================================================')\n msg = 'diverged'\n converged = False\n break\n\n # Do dual ascent\n for k in range(self.n_c):\n lam_bar[k] = min(max(0, lam_bar[k]+rho_bar[k]*ineq_val[k]), self.lam_max) # Update ineq multipliers\n # Scheduled increase of rho\n self.rho_val = min(self.rho_max, self.gamma*self.rho_val)\n\n it_dur = (datetime.now()-it_start).total_seconds()\n if self.verbose:\n print(f'ALGAMES iteration time: {it_dur}')\n\n iter_data.append(dict(cond=cond,\n newton_solves=newton_solves,\n newton_converged=newton_converged,\n newton_status=newton_status,\n it_time=it_dur,\n u_sol=copy.copy(u_bar), \n l_sol=copy.copy(lam_bar), \n m_sol=copy.copy(mu_bar)))\n\n if self.debug:\n pdb.set_trace()\n if self.debug_plot:\n self._update_debug_plot(q_bar, u_bar)\n if self.pause_on_plot:\n pdb.set_trace()\n\n if not converged and i == self.outer_iters-1:\n if self.verbose:\n # print('Max ALGAMES iterations reached, did not converge, using best solution from iter %i' % self.best_iter)\n print('Max ALGAMES iterations reached, did not converge')\n print('===================================================')\n msg = 'max_it'\n self.q_pred = copy.copy(q_bar.toarray().T)\n self.u_pred = copy.copy(u_bar[:,:-1].toarray().T)\n\n solve_dur = (datetime.now()-solve_start).total_seconds()\n print(f'Solve status: {msg}')\n print(f'Solve iters: {i+1}')\n print(f'Solve time: {solve_dur}')\n J = self.f_J(*ca.horzsplit(q_bar, 1), *ca.horzsplit(u_bar, 1))\n print('Cost: ' + str(J))\n\n solve_info['time'] = solve_dur\n solve_info['num_iters'] = i+1\n solve_info['status'] = converged\n solve_info['cost'] = J\n solve_info['cond'] = cond\n solve_info['iter_data'] = iter_data\n solve_info['msg'] = msg\n solve_info['init'] = init\n\n if self.debug_plot:\n plt.ioff()\n\n return solve_info\n\n def _build_solver(self):\n # =================================\n # Create Lagrangian\n # =================================\n # Placeholder symbolic variables\n q_ph = [ca.MX.sym('q_ph_%i' % k, self.n_q) for k in range(self.N+1)] # Joint state\n ui_ph = [[ca.MX.sym('u_%i_ph_%i' % (i, k), self.joint_dynamics.dynamics_models[i].n_u) for k in range(self.N+1)] for i in range(self.M)] # Agent input\n u_ph = [ca.vertcat(*[ui_ph[i][k] for i in range(self.M)]) for k in range(self.N+1)]\n m_ph = [ca.MX.sym('m_ph_%i' % i, self.n_q*self.N) for i in range(self.M)] # Kinodynamic eq constraint multipliers\n q_ref_ph = [ca.MX.sym('q_ref_ph_%i' % k, self.n_q) for k in range(self.N+1)] # Joint state\n ui_ref_ph = [[ca.MX.sym('u_%i_ref_ph_%i' % (i, k), self.joint_dynamics.dynamics_models[i].n_u) for k in range(self.N+1)] for i in range(self.M)] # Agent input\n u_ref_ph = [ca.vertcat(*[ui_ref_ph[i][k] for i in range(self.M)]) for k in range(self.N+1)]\n\n # Cost over the horizon\n J = [ca.DM.zeros(1) for i in range(self.M)]\n for i in range(self.M):\n for k in range(self.N):\n J[i] += self.costs_sym[i][k](q_ph[k], ui_ph[i][k], ui_ph[i][k-1])\n J[i] += self.costs_sym[i][-1](q_ph[-1])\n self.f_J = ca.Function('J', q_ph + u_ph, J)\n \n Dq_J = [ca.jacobian(J[a], ca.vertcat(*q_ph)).T for a in range(self.M)]\n Du_J = [ca.jacobian(J[a], ca.vertcat(*ui_ph[a])).T for a in range(self.M)]\n self.f_Dq_J = ca.Function(f'f_Dq_J', q_ph + u_ph, Dq_J)\n self.f_Du_J = ca.Function(f'f_Du_J', q_ph + u_ph, Du_J)\n\n # Residual of kinodynamic constraints\n D = []\n for k in range(self.N):\n D.append(q_ph[k+1] - self.joint_dynamics.fd(q_ph[k], u_ph[k]))\n # D.append(self.joint_dynamics.fd(q_ph[k], u_ph[k]) - q_ph[k+1])\n D = ca.vertcat(*D)\n self.f_D = ca.Function('D', q_ph + u_ph, [D])\n\n Dq_D = [ca.jacobian(D, ca.vertcat(*q_ph))]\n Du_D = [ca.jacobian(D, ca.vertcat(*ui_ph[a])) for a in range(self.M)]\n self.f_Dq_D = ca.Function('f_Dq_D', q_ph + u_ph, Dq_D)\n self.f_Du_D = ca.Function('f_Du_D', q_ph + u_ph, Du_D)\n\n # Residual of inequality constraints\n C = []\n for k in range(self.N):\n if self.constraints_sym[k] is not None:\n C.append(self.constraints_sym[k](q_ph[k], u_ph[k], u_ph[k-1]))\n # Add box constraints\n if len(self.input_ub_idxs) > 0:\n C.append(u_ph[k][self.input_ub_idxs] - self.input_ub[self.input_ub_idxs])\n if len(self.input_lb_idxs) > 0:\n C.append(self.input_lb[self.input_lb_idxs] - u_ph[k][self.input_lb_idxs])\n if len(self.state_ub_idxs) > 0:\n C.append(q_ph[k][self.state_ub_idxs] - self.state_ub[self.state_ub_idxs])\n if len(self.state_lb_idxs) > 0:\n C.append(self.state_lb[self.state_lb_idxs] - q_ph[k][self.state_lb_idxs])\n if self.constraints_sym[-1] is not None:\n C.append(self.constraints_sym[-1](q_ph[-1]))\n # Add box constraints\n if len(self.state_ub_idxs) > 0:\n C.append(q_ph[-1][self.state_ub_idxs] - self.state_ub[self.state_ub_idxs])\n if len(self.state_lb_idxs) > 0:\n C.append(self.state_lb[self.state_lb_idxs] - q_ph[-1][self.state_lb_idxs])\n C = ca.vertcat(*C)\n self.n_c = C.shape[0]\n self.f_C = ca.Function('C', q_ph + u_ph, [C])\n self.f_CD = ca.Function('CD', q_ph + u_ph, [C, D])\n\n Dq_C = [ca.jacobian(C, ca.vertcat(*q_ph))]\n Du_C = [ca.jacobian(C, ca.vertcat(*ui_ph[a])) for a in range(self.M)]\n self.f_Dq_C = ca.Function('f_Dq_C', q_ph + u_ph, Dq_C)\n self.f_Du_C = ca.Function('f_Du_C', q_ph + u_ph, Du_C)\n\n l_ph = ca.MX.sym('l_ph', self.n_c) # Ineq constraint multipliers\n jac_reg_q_ph = ca.MX.sym('jac_reg_q_ph', 1)\n jac_reg_u_ph = ca.MX.sym('jac_reg_u_ph', 1)\n reg_ph = ca.MX.sym('reg_ph', self.n_c)\n \n Lr = []\n for i in range(self.M):\n Lr.append(J[i] + ca.dot(m_ph[i], D) + ca.dot(l_ph, C))\n opt = []\n for i in range(self.M):\n opt_qi, opt_ui = [], []\n for k in range(self.N):\n opt_qi.append(ca.jacobian(Lr[i], q_ph[k+1]).T)\n opt_ui.append(ca.jacobian(Lr[i], ui_ph[i][k]).T)\n # pdb.set_trace()\n opt.append(ca.vertcat(*opt_qi, *opt_ui))\n opt = ca.vertcat(*opt)\n self.f_opt = ca.Function('opt', q_ph + u_ph + m_ph + [l_ph], [opt])\n\n\n L = []\n for i in range(self.M):\n L.append(J[i] + ca.dot(m_ph[i], D) + ca.dot(l_ph, C) + ca.bilin(ca.diag(reg_ph), C, C)/2)\n\n # Gradient of agent Lagrangian w.r.t. joint state and agent input\n G = []\n for i in range(self.M):\n G_qi, G_ui = [], []\n for k in range(self.N):\n G_qi.append(ca.jacobian(L[i], q_ph[k+1]).T)\n G_ui.append(ca.jacobian(L[i], ui_ph[i][k]).T)\n # pdb.set_trace()\n G.append(ca.vertcat(*G_qi, *G_ui))\n G = ca.vertcat(*G, D)\n self.f_G = ca.Function('G', q_ph + u_ph + m_ph + [l_ph, reg_ph], [G])\n \n # Regularized gradient\n G_reg = []\n for i in range(self.M):\n G_qi, G_ui = [], []\n for k in range(self.N):\n G_qi.append(ca.jacobian(L[i], q_ph[k+1]).T + jac_reg_q_ph*(q_ph[k+1]-q_ref_ph[k+1]))\n G_ui.append(ca.jacobian(L[i], ui_ph[i][k]).T + jac_reg_u_ph*(ui_ph[i][k]-ui_ref_ph[i][k]))\n G_reg.append(ca.vertcat(*G_qi, *G_ui))\n G_reg = ca.vertcat(*G_reg, D)\n self.f_G_reg = ca.Function('G_reg', q_ph + u_ph + m_ph + [l_ph, reg_ph, jac_reg_q_ph, jac_reg_u_ph] + q_ref_ph + u_ref_ph, [G_reg])\n\n # Gradient of G w.r.t. state trajectory (not including initial state), input sequence, and eq constraint multipliers\n y = ca.vertcat(*q_ph[1:], *u_ph[:-1], *m_ph)\n H = ca.jacobian(G, y)\n reg = ca.vertcat(jac_reg_q_ph*ca.DM.ones(self.n_q*self.N), jac_reg_u_ph*ca.DM.ones(self.n_u*self.N), ca.DM.zeros(self.n_q*self.N*self.M))\n H_reg = H + ca.diag(reg)\n self.f_H = ca.Function('H', q_ph + u_ph + m_ph + [l_ph, reg_ph, jac_reg_q_ph, jac_reg_u_ph], [H_reg])\n \n # Search direction\n dy = -ca.solve(H_reg, G, 'lapacklu')\n # dy = -ca.solve(H_reg, G)\n\n dq = ca.reshape(dy[:self.n_q*self.N], (self.n_q, self.N))\n du = ca.reshape(dy[self.n_q*self.N:self.n_q*self.N+self.n_u*self.N], (self.n_u, self.N))\n dm = ca.reshape(dy[self.n_q*self.N+self.n_u*self.N:], (self.n_q*self.N, self.M))\n self.f_dy = ca.Function('dy', q_ph + u_ph + m_ph + [l_ph, reg_ph, jac_reg_q_ph, jac_reg_u_ph], [dq, du, dm, G])\n\n if self.code_gen and not self.jit:\n generator = ca.CodeGenerator(self.c_file_name)\n generator.add(self.f_dy)\n generator.add(self.f_J)\n generator.add(self.f_G)\n generator.add(self.f_C)\n generator.add(self.f_CD)\n\n # Set up paths\n cur_dir = pathlib.Path.cwd()\n gen_path = cur_dir.joinpath(self.solver_name)\n c_path = gen_path.joinpath(self.c_file_name)\n if gen_path.exists():\n shutil.rmtree(gen_path)\n gen_path.mkdir(parents=True)\n\n os.chdir(gen_path)\n if self.verbose:\n print('- Generating C code for solver %s at %s' % (self.solver_name, str(gen_path)))\n generator.generate()\n pdb.set_trace()\n # Compile into shared object\n so_path = gen_path.joinpath(self.so_file_name)\n if self.verbose:\n print('- Compiling shared object %s from %s' % (so_path, c_path))\n print('- Executing \"gcc -fPIC -shared -%s %s -o %s\"' % (self.opt_flag, c_path, so_path))\n os.system('gcc -fPIC -shared -%s %s -o %s' % (self.opt_flag, c_path, so_path))\n\n # Swtich back to working directory\n os.chdir(cur_dir)\n install_dir = self.install()\n\n # Load solver\n self._load_solver(install_dir.joinpath(self.so_file_name))\n\n def _load_solver(self, solver_path=None):\n if solver_path is None:\n solver_path = pathlib.Path(self.solver_dir, self.so_file_name).expanduser()\n if self.verbose:\n print('- Loading solver from %s' % str(solver_path))\n self.f_dy = ca.external('dy', str(solver_path))\n self.f_G = ca.external('G', str(solver_path))\n self.f_C = ca.external('C', str(solver_path))\n self.f_J = ca.external('J', str(solver_path))\n self.f_CD = ca.external('CD', str(solver_path))\n\n def get_prediction(self) -> List[VehiclePrediction]:\n return self.state_input_predictions\n \n def _update_debug_plot(self, q_nom, u_nom):\n if not self.local_pos:\n for i in range(self.M):\n self.l_xy[i].set_data(q_nom.toarray()[0+int(np.sum(self.num_qa_d[:i])),:], q_nom.toarray()[1+int(np.sum(self.num_qa_d[:i])),:])\n else:\n raise NotImplementedError('Conversion from local to global pos has not been implemented for debug plot')\n self.ax_xy.set_aspect('equal')\n J = self.f_J(*ca.horzsplit(q_nom, 1), *ca.horzsplit(u_nom, 1))\n self.ax_xy.set_title(str(J))\n for i in range(self.M):\n self.l_a[i].set_data(np.arange(self.N), u_nom.toarray()[0+int(np.sum(self.num_ua_d[:i])),:-1])\n self.l_s[i].set_data(np.arange(self.N), u_nom.toarray()[1+int(np.sum(self.num_ua_d[:i])),:-1])\n self.ax_a.relim()\n self.ax_a.autoscale_view()\n self.ax_s.relim()\n self.ax_s.autoscale_view()\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()\n\nif __name__ == '__main__':\n pass\n"
] |
[
[
"numpy.sum",
"matplotlib.use",
"numpy.arange",
"numpy.linalg.norm",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.ion",
"numpy.zeros",
"numpy.where",
"numpy.vstack",
"matplotlib.pyplot.figure"
]
] |
Angus1996/HuaweiCloud_AI_Competition2019
|
[
"08f4a262a7563bc26561acae8bfe3b41aab9af6b"
] |
[
"using_pretrainedmodels_package/SENet154/src/prepare_data.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n数据集准备脚本\n\"\"\"\nimport os\nimport codecs\nimport shutil\ntry:\n import moxing as mox\nexcept:\n print('not use moxing')\nfrom glob import glob\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\n\ndef prepare_data_on_modelarts(args):\n \"\"\"\n 如果数据集存储在OBS,则需要将OBS上的数据拷贝到 ModelArts 中\n \"\"\"\n # Create some local cache directories used for transfer data between local path and OBS path\n if not args.data_url.startswith('s3://'):\n args.data_local = args.data_url\n else:\n args.data_local = os.path.join(args.local_data_root, 'train_val')\n if not os.path.exists(args.data_local):\n mox.file.copy_parallel(args.data_url, args.data_local)\n else:\n print('args.data_local: %s is already exist, skip copy' % args.data_local)\n\n if not args.train_url.startswith('s3://'):\n args.train_local = args.train_url\n else:\n args.train_local = os.path.join(args.local_data_root, 'model_snapshots')\n if not os.path.exists(args.train_local):\n os.mkdir(args.train_local)\n\n if not args.test_data_url.startswith('s3://'):\n args.test_data_local = args.test_data_url\n else:\n args.test_data_local = os.path.join(args.local_data_root, 'test_data/')\n if not os.path.exists(args.test_data_local):\n mox.file.copy_parallel(args.test_data_url, args.test_data_local)\n else:\n print('args.test_data_local: %s is already exist, skip copy' % args.test_data_local)\n\n args.tmp = os.path.join(args.local_data_root, 'tmp')\n if not os.path.exists(args.tmp):\n os.mkdir(args.tmp)\n\n return args\n\n\ndef split_train_val(input_dir, output_train_dir, output_val_dir):\n \"\"\"\n 大赛发布的公开数据集是所有图片和标签txt都在一个目录中的格式\n 如果需要使用 torch.utils.data.DataLoader 来加载数据,则需要将数据的存储格式做如下改变:\n 1)划分训练集和验证集,分别存放为 train 和 val 目录;\n 2)train 和 val 目录下有按类别存放的子目录,子目录中都是同一个类的图片\n 本函数就是实现如上功能,建议先在自己的机器上运行本函数,然后将处理好的数据上传到OBS\n \"\"\"\n if not os.path.exists(input_dir):\n print(input_dir, 'is not exist')\n return\n\n # 1. 检查图片和标签的一一对应\n label_file_paths = glob(os.path.join(input_dir, '*.txt'))\n valid_img_names = []\n valid_labels = []\n for file_path in label_file_paths:\n with codecs.open(file_path, 'r', 'utf-8') as f:\n line = f.readline()\n line_split = line.strip().split(', ')\n img_name = line_split[0]\n label_id = line_split[1]\n if os.path.exists(os.path.join(input_dir, img_name)):\n valid_img_names.append(img_name)\n valid_labels.append(int(label_id))\n else:\n print('error', img_name, 'is not exist')\n\n # 2. 使用 StratifiedShuffleSplit 划分训练集和验证集,可保证划分后各类别的占比保持一致\n # TODO,数据集划分方式可根据您的需要自行调整\n sss = StratifiedShuffleSplit(n_splits=1, test_size=500, random_state=0)\n sps = sss.split(valid_img_names, valid_labels)\n for sp in sps:\n train_index, val_index = sp\n\n label_id_name_dict = \\\n {\n \"0\": \"工艺品/仿唐三彩\",\n \"1\": \"工艺品/仿宋木叶盏\",\n \"2\": \"工艺品/布贴绣\",\n \"3\": \"工艺品/景泰蓝\",\n \"4\": \"工艺品/木马勺脸谱\",\n \"5\": \"工艺品/柳编\",\n \"6\": \"工艺品/葡萄花鸟纹银香囊\",\n \"7\": \"工艺品/西安剪纸\",\n \"8\": \"工艺品/陕历博唐妞系列\",\n \"9\": \"景点/关中书院\",\n \"10\": \"景点/兵马俑\",\n \"11\": \"景点/南五台\",\n \"12\": \"景点/大兴善寺\",\n \"13\": \"景点/大观楼\",\n \"14\": \"景点/大雁塔\",\n \"15\": \"景点/小雁塔\",\n \"16\": \"景点/未央宫城墙遗址\",\n \"17\": \"景点/水陆庵壁塑\",\n \"18\": \"景点/汉长安城遗址\",\n \"19\": \"景点/西安城墙\",\n \"20\": \"景点/钟楼\",\n \"21\": \"景点/长安华严寺\",\n \"22\": \"景点/阿房宫遗址\",\n \"23\": \"民俗/唢呐\",\n \"24\": \"民俗/皮影\",\n \"25\": \"特产/临潼火晶柿子\",\n \"26\": \"特产/山茱萸\",\n \"27\": \"特产/玉器\",\n \"28\": \"特产/阎良甜瓜\",\n \"29\": \"特产/陕北红小豆\",\n \"30\": \"特产/高陵冬枣\",\n \"31\": \"美食/八宝玫瑰镜糕\",\n \"32\": \"美食/凉皮\",\n \"33\": \"美食/凉鱼\",\n \"34\": \"美食/德懋恭水晶饼\",\n \"35\": \"美食/搅团\",\n \"36\": \"美食/枸杞炖银耳\",\n \"37\": \"美食/柿子饼\",\n \"38\": \"美食/浆水面\",\n \"39\": \"美食/灌汤包\",\n \"40\": \"美食/烧肘子\",\n \"41\": \"美食/石子饼\",\n \"42\": \"美食/神仙粉\",\n \"43\": \"美食/粉汤羊血\",\n \"44\": \"美食/羊肉泡馍\",\n \"45\": \"美食/肉夹馍\",\n \"46\": \"美食/荞面饸饹\",\n \"47\": \"美食/菠菜面\",\n \"48\": \"美食/蜂蜜凉粽子\",\n \"49\": \"美食/蜜饯张口酥饺\",\n \"50\": \"美食/西安油茶\",\n \"51\": \"美食/贵妃鸡翅\",\n \"52\": \"美食/醪糟\",\n \"53\": \"美食/金线油塔\"\n }\n\n # 3. 创建 output_train_dir 目录下的所有标签名子目录\n for id in label_id_name_dict.keys():\n if not os.path.exists(os.path.join(output_train_dir, id)):\n os.mkdir(os.path.join(output_train_dir, id))\n\n # 4. 将训练集图片拷贝到 output_train_dir 目录\n for index in train_index:\n file_path = label_file_paths[index]\n with codecs.open(file_path, 'r', 'utf-8') as f:\n gt_label = f.readline()\n img_name = gt_label.split(',')[0].strip()\n id = gt_label.split(',')[1].strip()\n shutil.copy(os.path.join(input_dir, img_name), os.path.join(output_train_dir, id, img_name))\n\n # 5. 创建 output_val_dir 目录下的所有标签名子目录\n for id in label_id_name_dict.keys():\n if not os.path.exists(os.path.join(output_val_dir, id)):\n os.mkdir(os.path.join(output_val_dir, id))\n\n # 6. 将验证集图片拷贝到 output_val_dir 目录\n for index in val_index:\n file_path = label_file_paths[index]\n with codecs.open(file_path, 'r', 'utf-8') as f:\n gt_label = f.readline()\n img_name = gt_label.split(',')[0].strip()\n id = gt_label.split(',')[1].strip()\n shutil.copy(os.path.join(input_dir, img_name), os.path.join(output_val_dir, id, img_name))\n\n print('total samples: %d, train samples: %d, val samples:%d'\n % (len(valid_labels), len(train_index), len(val_index)))\n print('end')\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description='data prepare')\n parser.add_argument('--input_dir', required=True, type=str, help='input data dir')\n parser.add_argument('--output_train_dir', required=True, type=str, help='output train data dir')\n parser.add_argument('--output_val_dir', required=True, type=str, help='output validation data dir')\n args = parser.parse_args()\n if args.input_dir == '' or args.output_train_dir == '' or args.output_val_dir == '':\n raise Exception('You must specify valid arguments')\n if not os.path.exists(args.output_train_dir):\n os.makedirs(args.output_train_dir)\n if not os.path.exists(args.output_val_dir):\n os.makedirs(args.output_val_dir)\n split_train_val(args.input_dir, args.output_train_dir, args.output_val_dir)\n"
] |
[
[
"sklearn.model_selection.StratifiedShuffleSplit"
]
] |
indigoLovee/TD3
|
[
"0e86a40c27ec376b52e9f8e0e70db28e7411276b"
] |
[
"networks.py"
] |
[
"import torch as T\nimport torch.nn as nn\nimport torch.optim as optim\n\ndevice = T.device(\"cuda:0\" if T.cuda.is_available() else \"cpu\")\n\n\nclass ActorNetwork(nn.Module):\n def __init__(self, alpha, state_dim, action_dim, fc1_dim, fc2_dim):\n super(ActorNetwork, self).__init__()\n self.fc1 = nn.Linear(state_dim, fc1_dim)\n self.ln1 = nn.LayerNorm(fc1_dim)\n self.fc2 = nn.Linear(fc1_dim, fc2_dim)\n self.ln2 = nn.LayerNorm(fc2_dim)\n self.action = nn.Linear(fc2_dim, action_dim)\n\n self.optimizer = optim.Adam(self.parameters(), lr=alpha)\n self.to(device)\n\n def forward(self, state):\n x = T.relu(self.ln1(self.fc1(state)))\n x = T.relu(self.ln2(self.fc2(x)))\n action = T.tanh(self.action(x))\n\n return action\n\n def save_checkpoint(self, checkpoint_file):\n T.save(self.state_dict(), checkpoint_file, _use_new_zipfile_serialization=False)\n\n def load_checkpoint(self, checkpoint_file):\n self.load_state_dict(T.load(checkpoint_file))\n\n\nclass CriticNetwork(nn.Module):\n def __init__(self, beta, state_dim, action_dim, fc1_dim, fc2_dim):\n super(CriticNetwork, self).__init__()\n self.fc1 = nn.Linear(state_dim+action_dim, fc1_dim)\n self.ln1 = nn.LayerNorm(fc1_dim)\n self.fc2 = nn.Linear(fc1_dim, fc2_dim)\n self.ln2 = nn.LayerNorm(fc2_dim)\n self.q = nn.Linear(fc2_dim, 1)\n\n self.optimizer = optim.Adam(self.parameters(), lr=beta)\n self.to(device)\n\n def forward(self, state, action):\n x = T.cat([state, action], dim=-1)\n x = T.relu(self.ln1(self.fc1(x)))\n x = T.relu(self.ln2(self.fc2(x)))\n q = self.q(x)\n\n return q\n\n def save_checkpoint(self, checkpoint_file):\n T.save(self.state_dict(), checkpoint_file, _use_new_zipfile_serialization=False)\n\n def load_checkpoint(self, checkpoint_file):\n self.load_state_dict(T.load(checkpoint_file))\n\n"
] |
[
[
"torch.load",
"torch.cat",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.cuda.is_available"
]
] |
Charlottez112/ovito-scripts
|
[
"3c677ccd6edbf7602c802f8fa67524c15b0cea0c"
] |
[
"freud/overlay_DiffractionPattern.py"
] |
[
"# Copyright (c) 2021 The Regents of the University of Michigan\n# All rights reserved.\n# This software is licensed under the BSD 3-Clause License.\n\nimport numpy as np\nimport PySide2.QtGui\nimport rowan\n\nimport freud\n\nprint(\"Diffraction, freud version\", freud.__version__)\n\n\ndef render(\n args, grid_size=256, output_size=256, draw_x: float = 10, draw_y: float = 10\n):\n pipeline = args.scene.selected_pipeline\n if not pipeline:\n return\n data = pipeline.compute(args.frame)\n view_orientation = rowan.from_matrix(args.viewport.viewMatrix[:, :3])\n dp = freud.diffraction.DiffractionPattern(\n grid_size=grid_size,\n output_size=output_size,\n )\n dp.compute(\n system=data,\n view_orientation=view_orientation,\n zoom=1,\n peak_width=1,\n )\n buf = dp.to_image(cmap=\"afmhot\", vmax=np.max(dp.diffraction))\n width, height, bytes_per_pixel = buf.shape\n img = PySide2.QtGui.QImage(\n buf,\n width,\n height,\n width * bytes_per_pixel,\n PySide2.QtGui.QImage.Format_RGBA8888,\n )\n # Paint QImage onto viewport canvas\n args.painter.drawImage(draw_x, draw_y, img)\n"
] |
[
[
"numpy.max"
]
] |
mengbingrock/trt-samples-for-hackathon-cn
|
[
"bf0e6379c358a72ea96093ec41fa1b1b9275feaf"
] |
[
"python/app_onnx_resnet50.py"
] |
[
"#\n# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport torch\nimport torchvision\nfrom torchsummary import summary\nimport time\nimport pycuda.driver as cuda\nimport pycuda.autoinit\n\ntorch.manual_seed(0)\n\nresnet50 = torchvision.models.resnet50().cuda()\nresnet50.eval()\n#summary(resnet50, (3, 1080, 1920), device='cuda')\n\ninput_data = torch.randn(1, 3, 1080, 1920, dtype=torch.float32, device='cuda')\n\noutput_data_pytorch = resnet50(input_data).cpu().detach().numpy()\n\nnRound = 10\ntorch.cuda.synchronize()\nt0 = time.time()\nfor i in range(nRound):\n resnet50(input_data)\ntorch.cuda.synchronize()\ntime_pytorch = (time.time() - t0) / nRound\nprint('PyTorch time:', time_pytorch)\n\ninput_names = ['input']\noutput_names = ['output']\ntorch.onnx.export(resnet50, input_data, 'resnet50.onnx', input_names=input_names, output_names=output_names, verbose=False, opset_version=11)\ntorch.onnx.export(resnet50, input_data, 'resnet50.dynamic_shape.onnx', dynamic_axes={\"input\": [0, 2, 3]}, input_names=input_names, output_names=output_names, verbose=False, opset_version=11)\n\n#继续运行python代码前,先运行如下命令\n#trtexec --verbose --onnx=resnet50.onnx --saveEngine=resnet50.trt\n#trtexec --verbose --onnx=resnet50.onnx --saveEngine=resnet50_fp16.trt --fp16\n#以下命令不必运行,仅供参考\n#trtexec --verbose --onnx=resnet50.dynamic_shape.onnx --saveEngine=resnet50.dynamic_shape.trt --optShapes=input:1x3x1080x1920 --minShapes=input:1x3x1080x1920 --maxShapes=input:1x3x1080x1920\n\nfrom trt_lite import TrtLite\nimport numpy as np\nimport os\n\nclass PyTorchTensorHolder(pycuda.driver.PointerHolderBase):\n def __init__(self, tensor):\n super(PyTorchTensorHolder, self).__init__()\n self.tensor = tensor\n def get_pointer(self):\n return self.tensor.data_ptr()\n\nfor engine_file_path in ['resnet50.trt', 'resnet50_fp16.trt']:\n if not os.path.exists(engine_file_path):\n print('Engine file', engine_file_path, 'doesn\\'t exist. Please run trtexec and re-run this script.')\n exit(1)\n \n print('====', engine_file_path, '===')\n trt = TrtLite(engine_file_path=engine_file_path)\n trt.print_info()\n i2shape = {0: (1, 3, 1080, 1920)}\n io_info = trt.get_io_info(i2shape)\n d_buffers = trt.allocate_io_buffers(i2shape, True)\n output_data_trt = np.zeros(io_info[1][2], dtype=np.float32)\n\n #利用PyTorch和PyCUDA的interop,保留数据始终在显存上\n cuda.memcpy_dtod(d_buffers[0], PyTorchTensorHolder(input_data), input_data.nelement() * input_data.element_size())\n #下面一行的作用跟上一行一样,不过它是把数据拷到cpu再拷回gpu,效率低。作为注释留在这里供参考\n #cuda.memcpy_htod(d_buffers[0], input_data.cpu().detach().numpy())\n trt.execute(d_buffers, i2shape)\n cuda.memcpy_dtoh(output_data_trt, d_buffers[1])\n\n cuda.Context.synchronize()\n t0 = time.time()\n for i in range(nRound):\n trt.execute(d_buffers, i2shape)\n cuda.Context.synchronize()\n time_trt = (time.time() - t0) / nRound\n print('TensorRT time:', time_trt)\n\n print('Speedup:', time_pytorch / time_trt)\n print('Average diff percentage:', np.mean(np.abs(output_data_pytorch - output_data_trt) / np.abs(output_data_pytorch)))\n"
] |
[
[
"torch.onnx.export",
"torch.cuda.synchronize",
"numpy.abs",
"torch.manual_seed",
"torch.randn",
"numpy.zeros"
]
] |
shweta-29/Forbes_Companies_Sustainability
|
[
"3db3c2127b13c5e355a3c10b1ef3b8ba31fb64d5",
"3db3c2127b13c5e355a3c10b1ef3b8ba31fb64d5"
] |
[
"ESGmetrics/msci.py",
"ESGmetrics/csrhub.py"
] |
[
"\"\"\"\nMSCI website Scrape\n\nThis script allows the user to scrape the companies' ESG ratings from the MSCI\nwebsite. Website link:\n\"https://www.msci.com/our-solutions/esg-investing/esg-ratings/esg-ratings-corporate-search-tool/\"\n\nThis tool accepts Company's names list in comma separated value\nfile (.csv) format as input.\n\nThis script requires that `pandas` be installed within the Python\nenvironment you are running this script in.\n\nThe output is a .csv file with Company name and its corresponding ESG ratings\n\"\"\"\n\nimport pandas as pd\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.keys import Keys\nfrom time import sleep\nfrom tqdm import tqdm\nfrom .scraper import WebScraper\n\n\ndef _append_dict(temp: str) -> str:\n ''' Append the MSCI dictionary with Company Name and its MSCI ESG rating\n\n Parameters\n ----------\n temp : str\n The previous company name appended to the dictionary\n\n Returns\n -------\n str\n The latest company name appended to the dictionary\n '''\n if temp == company:\n bot.append_empty_values(msci)\n\n else:\n msci['MSCI_Company'].append(company.text)\n msci['MSCI_ESG'].append(esg_score.get_attribute('class'))\n temp = company\n return temp\n\n\n# Read Forbes dataset\ndf = pd.read_csv('Forbes.csv', index_col=0)\ndata_length = len(df)\nheader_name = 'Name'\n\n# Set up the webdriver\nURL = \"https://www.msci.com/our-solutions/esg-investing/esg-ratings/esg\\\n -ratings-corporate-search-tool/\"\nbot = WebScraper(URL)\n\n# Accept cookies on the website\ncookies_xpath = '//*[@id=\"portlet_mscicookiebar_WAR_mscicookiebar\"]/div/div[2]/ \\\n div/div/div[1]/div/button[1]'\nbot.accept_cookies(cookies_xpath)\n\n# Extract company names and their ESG score and store it in the dictionary\ntemp = 0\nfor i in tqdm(range(data_length)):\n msci = {'MSCI_Company': [], 'MSCI_ESG': []}\n # Starting the search by finding the search bar & searching for the company\n search_bar = bot.send_request_to_search_bar(\n header_name, df, i, xpath='//*[@id=\"_esgratingsprofile_keywords\"]')\n search_bar.send_keys(Keys.DOWN, Keys.RETURN)\n sleep(4)\n\n try:\n xpath = '//*[@id=\"_esgratingsprofile_esg-ratings-profile-header\"]/div[2]/div[1]/div[2]/div'\n esg_score = bot.find_element(xpath)\n company = bot.find_element(\n '//*[@id=\"_esgratingsprofile_esg-ratings-profile-header\"]/div[1]/div[1]')\n temp = _append_dict(temp)\n\n except NoSuchElementException:\n bot.append_empty_values(msci)\n\n # Save the data into a csv file\n df1 = bot.convert_dict_to_csv(msci, 'MSCI')\n",
"\"\"\" CSR HUB website Scrape\n\nThis script allows the user to scrape the companies' CSR ratings from\nthe CSR HUB website. Website link: \"https://www.csrhub.com/search/name/\"\n\nThis tool accepts Company's names list in comma separated value\nfile (.csv) format as input.\n\nThis script requires that `pandas` be installed within the Python\nenvironment you are running this script in.\n\nThe output is a .csv file with Company name and its corresponding CSR ratings\n\"\"\"\n\nimport pandas as pd\nfrom time import sleep\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nfrom tqdm import tqdm\nfrom .scraper import WebScraper\n\n\ndef _append_dict() -> dict:\n ''' Append the CSR dictionary with Company Name and its CSR score\n Returns\n -------\n dict\n The CSR dictionary\n '''\n try:\n csr_score = bot.find_element(\n '//*[@id=\"wrapper\"]/div[3]/section[3]/div[2]/table/tbody/tr[2]/td[2]')\n csr['CSR_Ratings'].append(csr_score.text)\n company = bot.find_element(\n '//*[@id=\"wrapper\"]/div[3]/section[3]/div[2]/table/tbody/tr[2]/td[1]/a')\n csr['CSR_Company'].append(company.text)\n\n except NoSuchElementException:\n bot.append_empty_values(csr)\n return csr\n\n\n# Read Forbes dataset\ndf = pd.read_csv('Forbes.csv', index_col=0)\ndata_length = len(df)\nheader_name = 'Name'\n\n# Set up driver\nURL = \"https://www.csrhub.com/search/name/\"\nbot = WebScraper(URL)\n\n# Accept cookies\ncookies_xpath = '//*[@id=\"body-content-holder\"]/div[2]/div/span[2]/button'\nbot.accept_cookies(cookies_xpath)\n\n# Scrape the website. Extract company names and their respective CSR score\ni = 0\nprogress_bar = tqdm(total=data_length)\nwhile i < data_length:\n csr = {'CSR_Company': [], 'CSR_Ratings': []}\n delay = 2 # seconds\n\n try:\n search_bar = bot.send_request_to_search_bar(\n header_name, df, i, xpath='//*[@id=\"search_company_names_0\"]')\n search_bar.send_keys(Keys.RETURN)\n sleep(1)\n csr = _append_dict()\n # Save the data into a csv file\n df1 = bot.convert_dict_to_csv(csr, 'csr_hub')\n i += 1\n # If no element found, the page is restarted\n except NoSuchElementException:\n bot = bot.restart_driver(cookies_xpath)\n\n sleep(0.1)\n progress_bar.update(1)\n\nprogress_bar.close()\n"
] |
[
[
"pandas.read_csv"
],
[
"pandas.read_csv"
]
] |
ananyak100-dev/Transformer-Explainability
|
[
"75b5f34276f9a840f98df8a87c3387fa55147acf"
] |
[
"philly_exp/ViT/perturbationMetricsNew.py"
] |
[
"import os\n# os.chdir(f'./Transformer-Explainability')\n#\n# !pip install -r requirements.txt\n\nfrom PIL import Image\nimport torchvision\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport torch\nimport numpy as np\n\nmodels = ['DINO/DINO_Small Perturbations', 'DeiT/DeiT_Small Perturbations', 'DeiT/DeiT_Base Perturbations', 'ViT/ViT_Base Perturbations']\nposNeg = ['False', 'True']\n\nROOT_DIR = os.environ['AMLT_DATA_DIR']\n\nfor model in models:\n for arg in posNeg:\n OUTPUT_DIR = ROOT_DIR + \"/output/\" + model + '/' + arg\n\n\n\n\n\nprint(np.mean(num_correct_model_full), np.std(num_correct_model_full))\nprint(np.mean(dissimilarity_model_full), np.std(dissimilarity_model_full))\nprint(np.mean(num_correct_perturb_full, axis=1), np.std(num_correct_perturb_full, axis=1))\nprint(np.mean(dissimilarity_perturb_full, axis=1), np.std(dissimilarity_perturb_full, axis=1))\n\nmeans_num_correct_perturb = np.mean(num_correct_perturb_full, axis=1)\nprint('Means:')\nfor mean in means_num_correct_perturb:\n print(mean)\n\nnp.save(os.path.join(OUTPUT_PERTURB_DIR, 'means_num_correct_perturb_full.npy'), means_num_correct_perturb)\n\nfrom numpy import savetxt\nsavetxt(os.path.join(OUTPUT_PERTURB_DIR, 'means_num_correct_perturb_full_csv.csv'), means_num_correct_perturb, delimiter=',')\n\n# AUC\n\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\n\nx = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\ny = [100 * i for i in means_num_correct_perturb]\n\nprint('AUC: ')\nprint(metrics.auc(x, y))\n\nplt.plot(x, y)\nplt.xlabel('Perturbation Levels')\nplt.ylabel('Accuracy (%)')\nplt.savefig(OUTPUT_PERTURB_DIR + '/plot.png')"
] |
[
[
"sklearn.metrics.auc",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.std",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] |
Isaac-W/vision-measurement
|
[
"0133a135c0a40785975eb3b433ad570f31ed88e0"
] |
[
"test.py"
] |
[
"import peakutils\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n'''\ndata = [-0.15238721652,\n 0.226482259955,\n 0.526641892155,\n 0.590276999704,\n 0.391593530271,\n 0.149487519326,\n -0.0464324239059,\n -0.214412432486,\n -0.399365551117,\n -0.78249236592,\n -1.39743628254,\n -2.12135372321,\n -2.4414543013,\n -2.10576560302,\n -1.45197676735,\n -0.718998380028,\n -0.0489282142727,\n 0.329713080875,\n 0.632288112611,\n 0.813656834684,\n 0.788783912896,\n 0.583532737115,\n 0.26490990456,\n -0.0550061028609,\n -0.204124245458,\n -0.413068508087,\n -0.413068508087,\n -2.51865641418,\n -3.19472318458,\n -3.25479975779,\n -3.35534026774,\n -2.03007689523,\n -1.14272760807,\n -0.460896379405,\n 0.115842032703,\n 0.454346076784,\n 0.696686886974,\n 0.831096319069,\n 0.870736806389,\n 0.574412251486,\n 0.200983011821,\n -0.053325352345,\n -0.272528390764,\n -0.532293961852,\n -1.09188729712,\n -1.95314294359,\n -2.91710141937,\n -2.47373197362,\n -2.04441831455,\n -1.34801016396,\n -0.470370067137,\n 0.0994785276813,\n 0.456221035689,\n 0.650423019675,\n 0.478678636502,\n 0.200997779093,\n -0.0464496402702,\n -0.208785044251,\n -0.330719074996,\n -0.550990357407,\n -0.87196620878,\n -1.71307475742,\n -1.91851771846,\n -2.88890405543,\n -2.66298086624,\n -1.92714431099,\n -1.06139833397,\n -0.29624950148,\n 0.180283998186,\n 0.526295849222,\n 0.673532656157,\n 0.426462186417,\n 0.060120159328,\n -0.14594738248,\n -0.269081387275,\n -0.404828588061,\n -0.742044810136,\n -0.742044810136,\n -1.98679944735,\n -2.81202239783,\n -2.86007533844,\n -2.35135790845,\n -1.34669868327,\n -0.602662592149,\n 0.00170301190652,\n 0.00170301190652,\n 1.26181157041,\n 0.751985827159,\n 0.454792491419,\n 0.14107860018,\n -0.096446361373,\n -0.321997987567,\n -0.512380739993,\n -0.512380739993,\n -1.46137288168,\n -2.30506888031,\n -2.85600776016,\n -3.11585066214,\n -2.84500557881,\n -1.99540337077,\n -1.99540337077,\n -1.99540337077,\n 0.419423165143,\n 0.511663196427,\n 0.473687578856,\n 0.309711668411,\n -0.0304144752569,\n -0.227464653615,\n -0.45853074006,\n -1.11898357561,\n -1.11898357561,\n -2.08920896931,\n -2.87885024538,\n -2.58651190777,\n -2.17506212147,\n -1.30980861482,\n -1.30980861482,\n 0.053867457816,\n 0.468634558648,\n 0.724129201681,\n 0.640776100176,\n 0.371220972916,\n 0.00062589024688,\n -0.183872193313,\n -0.361346044001,\n -0.595823005208,\n -1.10714576345,\n -1.6499208654,\n -2.58106532646,\n -2.61920660828,\n -2.18349097801,\n -1.28511828926,\n -1.28511828926,\n -0.0349441314052,\n 0.366163090718,\n 0.700846185742,\n 0.725981238323,\n 0.430724819408,\n -0.013151347623,\n -0.219383581303,\n -0.355442491807,\n -0.601167618242,\n -1.29136920468,\n -1.82792576011,\n -3.06477211856,\n -2.78974033952,\n -2.17532750576,\n -2.17532750576,\n -2.17532750576,\n 0.0100824452739,\n 0.358600267654]\n'''\n\n'''\ndata = [-2.52224738206,\n -2.43286966597,\n -1.84749290261,\n -1.17561330528,\n -0.257505541003,\n 0.225763359075,\n 0.484975179579,\n 0.590294697296,\n 0.538122687123,\n 0.389105930861,\n 0.127409021954,\n -0.0976112044392,\n -0.282028968142,\n -0.606184580378,\n -1.21115775443,\n -2.17626466557,\n -2.64932321828,\n -2.3906022696,\n -1.60525961606,\n -0.900653908075,\n -0.0220441546468,\n 0.378001400458,\n 0.621486801155,\n 0.676242611087,\n 0.486036211774,\n 0.125416263581,\n -0.201245178062,\n -0.535102228662,\n -1.18602770271,\n -2.1829433139,\n -2.6433974597,\n -2.30850139356,\n -1.74219710589,\n -0.809943952804,\n -0.0304982752981,\n 0.413889553232,\n 0.662663460059,\n 0.808114596491,\n 0.804205469722,\n 0.612341290937,\n 0.23741741988,\n -0.0824271628307,\n -0.35051682177,\n -0.917671536605,\n -1.8090418013,\n -2.42988862532,\n -2.28857768152,\n -1.77367461676,\n -1.12489412763,\n -0.169460620914,\n 0.246098649879,\n 0.476429573705,\n 0.569721963755,\n 0.443159016559,\n 0.166218891789,\n -0.116946105081,\n -0.263111393389,\n -0.541731029045,\n -1.31177208794,\n -2.12509680506,\n -2.60491281536,\n -2.37254638148,\n -1.8483109875,\n -1.03621026182,\n -0.132681072493,\n 0.25723614515,\n 0.529346614309,\n 0.684379308347,\n 0.651173584636,\n 0.376075580057,\n 0.018171763631,\n -0.18927552813,\n -0.367259824133,\n -0.734740923219,\n -1.37424534445,\n -2.48113020771,\n -2.75365237332,\n -2.47676967334,\n -1.8209902897,\n -1.01338176439,\n -0.127981726016,\n 0.235411140932,\n 0.45409844315,\n 0.487381711888,\n 0.389440168451,\n 0.17190962931,\n -0.0895438424741,\n -0.297179155264,\n -0.696916022791,\n -1.49920403376,\n -2.26056581485,\n -2.29691651749,\n -1.88536642124,\n -1.18099757478,\n -0.350096671141,\n 0.147834639032,\n 0.387157151381,\n 0.551642564063,\n 0.611877913525,\n 0.524816070093,\n 0.27062489373,\n -0.0200760928386,\n -0.269903933463,\n -0.788851580541,\n -1.58962734054,\n -2.38247410085,\n -2.49917598204,\n -1.98293509099,\n -1.36584393133,\n -0.509170389181,\n 0.114705686173,\n 0.4184421981,\n 0.596063867754,\n 0.637323253302,\n 0.501799178021,\n 0.195284110981,\n -0.109178484199,\n -0.315635048922,\n -0.704592326398,\n -1.6024720669,\n -2.40932347925,\n -2.49212632566,\n -2.07146003545,\n -1.39615349565,\n -0.59679090194,\n 0.0931138403406,\n 0.379811963829,\n 0.583768923804,\n 0.712685259493,\n 0.649596743732,\n 0.374521951738,\n 0.0229532428255,\n -0.272619431026,\n -0.701669750037,\n -1.5358550592,\n -2.21298678879,\n -2.36449220513,\n -2.02572333913,\n -1.31249100245,\n -0.573434543623,\n 0.0898082658308,\n 0.371075758249,\n 0.483610377373,\n 0.435925163413,\n 0.307692746221,\n 0.0833775484509,\n -0.0991633544532,\n -0.25621845783,\n -0.714702905138,\n -1.5903042854,\n -2.45064814922]\n'''\n\ndata = [0.0481802960076,\n 0.294296947338,\n 0.447603903886,\n 0.497841706227,\n 0.488011054591,\n 0.414856557444,\n 0.319857697008,\n 0.227989585792,\n 0.150080238132,\n 0.0963145079273,\n 0.0325458419301,\n -0.026162991363,\n -0.108760603109,\n -0.205062781053,\n -0.296605486984,\n -0.411350522941,\n -0.474483437651,\n -0.566470551165,\n -0.66857461351,\n -0.777354999752,\n -0.914522890134,\n -1.0628316454,\n -1.24209135536,\n -1.37850954009,\n -1.55137088677,\n -1.63345414127,\n -1.64196569337,\n -1.71034238332,\n -1.68061067731,\n -1.64735387151,\n -1.71400480781,\n -1.76222090958,\n -1.89883918653,\n -1.95592900229,\n -1.95910369219,\n -1.88932385873,\n -1.84742151112,\n -1.6528244753,\n -1.34418612144,\n -1.00906558786,\n -0.65518761106,\n -0.286683281017,\n 0.0630678787807,\n 0.317467957894,\n 0.459065748629,\n 0.535303237891,\n 0.515398348064,\n 0.429868610637,\n 0.323547124786,\n 0.232088643539,\n 0.180041055865,\n 0.118336729484,\n 0.0823596871819,\n 0.0394898189686,\n -0.00284911683641,\n -0.071185956993,\n -0.14314799998,\n -0.21094491473,\n -0.290457253214,\n -0.375884940913,\n -0.449572492701,\n -0.508757299921,\n -0.59014227449,\n -0.663052089228,\n -0.732749359701,\n -0.8215459586,\n -0.911016090211,\n -0.99386395345,\n -1.11846985198,\n -1.24599020129,\n -1.34885479018,\n -1.47409820792,\n -1.56540002294,\n -1.63434473928,\n -1.69467149811,\n -1.7707020886,\n -1.8181415291,\n -1.88469963783,\n -1.89482575382,\n -1.95691091999,\n -2.01269152385,\n -2.03748909499,\n -1.99907697927,\n -1.94164694152,\n -1.86382585217,\n -1.78233203214,\n -1.67199085142,\n -1.58990771303,\n -1.45667641643,\n -1.32472807726,\n -1.12799901573,\n -0.933604599536,\n -0.797868401248,\n -0.637242291198,\n -0.433128421343,\n -0.250632415388,\n -0.0697693771625,\n 0.0874335035309,\n 0.210369193452,\n 0.309461372126,\n 0.361639781721,\n 0.376612584821,\n 0.335387076321,\n 0.283403126364,\n 0.191682292752,\n 0.104898703386,\n -0.0129178495232,\n -0.11895847851,\n -0.241182905009,\n -0.341899903288,\n -0.447073195383,\n -0.544199725148,\n -0.636881660595,\n -0.731214271112,\n -0.850415962409,\n -0.980946144218,\n -1.10239354196,\n -1.26547279504,\n -1.4084542865,\n -1.49030936299,\n -1.60038261326,\n -1.65637193332,\n -1.71803285252,\n -1.75232242529,\n -1.81739802389,\n -1.87595058581,\n -1.87451759293,\n -1.99825849199,\n -2.07071294165,\n -2.14065521897,\n -2.14100885619,\n -2.16872491579,\n -2.17743269223,\n -2.10132955094,\n -2.05114423522,\n -1.93187594484,\n -1.73449266555,\n -1.51925240505,\n -1.30091072479,\n -1.03890030068,\n -0.791984719846,\n -0.554750443134,\n -0.345584095961,\n -0.163299555784,\n -0.0315076612649,\n 0.0815511716345,\n 0.179165172242,\n 0.246650138283,\n 0.277007247098]\n\n# Smooth the data\nwindow_size = 5\nsmooth_filter = np.array([1 / float(window_size) for x in range(window_size)])\nsmoothed = np.convolve(np.array(data), smooth_filter, mode='valid')\ndata = smoothed\n\nx_vals = [x for x in range(0, len(data))]\n\n# Get peaks (for recoil)\npeaks = peakutils.indexes(np.array(data), 0.2, 5)\npeaks = peaks.astype(int).tolist()\n\n# Get troughs (depth peaks)\nneg_data = [-x for x in data]\ntroughs = peakutils.indexes(np.array(neg_data), 0.2, 5)\ntroughs = troughs.astype(int).tolist()\n\n#rate_points = peakutils.interpolate(x_vals, neg_data, troughs)\nrate_points = troughs\n\navg_rate = 0\nfor i in range(1, len(rate_points)):\n # Get time between peaks and convert to rate\n avg_rate += ((len(data) / 5.0) * 60) / float(rate_points[i] - rate_points[i - 1])\nif len(rate_points) > 1:\n avg_rate /= (len(rate_points) - 1)\n\nplt.plot(x_vals, data)\nplt.plot(peaks, [data[x] for x in peaks], 'rx')\nplt.plot(troughs, [data[x] for x in troughs], 'bx')\nplt.show()\n\npass\n"
] |
[
[
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.show"
]
] |
kennethwdk/PINet
|
[
"3a0abbd653146c56e39612384891c94c3fb49b35"
] |
[
"lib/core/trainer.py"
] |
[
"import logging\nimport time\nimport torch\n\nfrom loss.heatmaploss import HeatmapLoss\nfrom loss.offsetloss import OffsetLoss\nfrom loss.refineloss import RefineLoss\n\nclass Trainer(object):\n def __init__(self, cfg, model, rank, output_dir):\n self.model = model\n self.output_dir = output_dir\n self.rank = rank\n self.print_freq = cfg.PRINT_FREQ\n self.refine = cfg.REFINE.USE_REFINE\n self.max_num_proposal = cfg.REFINE.MAX_PROPOSAL\n\n self.heatmap_loss = HeatmapLoss()\n self.offset_loss = OffsetLoss()\n self.refine_loss = RefineLoss()\n self.heatmap_loss_weight = cfg.LOSS.HEATMAP_LOSS_FACTOR\n self.offset_loss_weight = cfg.LOSS.OFFSET_LOSS_FACTOR\n self.refine_loss_weight = cfg.LOSS.REFINE_LOSS_FACTOR\n\n def train(self, epoch, data_loader, optimizer):\n logger = logging.getLogger(\"Training\")\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n heatmap_loss_meter = AverageMeter()\n offset_loss_meter = AverageMeter()\n if self.refine:\n refine_loss_meter = AverageMeter()\n\n self.model.train()\n\n end = time.time()\n for i, (images, heatmaps, kpt_masks, detkpt_maps, detkpt_masks, offsets, weights) in enumerate(data_loader):\n data_time.update(time.time() - end)\n\n heatmaps, kpt_masks, detkpt_maps, detkpt_masks, offsets, weights = heatmaps.cuda(non_blocking=True), kpt_masks.cuda(non_blocking=True), detkpt_maps.cuda(non_blocking=True), detkpt_masks.cuda(non_blocking=True), offsets.cuda(non_blocking=True), weights.cuda(non_blocking=True)\n\n gt_inds, gt_offsets, reg_weights = get_gt_proposals(heatmaps, detkpt_maps, offsets, weights, max_num_proposal=self.max_num_proposal)\n batch_inputs = {}\n batch_inputs.update({'images': images})\n batch_inputs.update({'gt_inds': gt_inds})\n outputs = self.model(batch_inputs)\n\n pred_heatmaps, pred_detkptmaps, pred_offsets = outputs[0:3]\n pred_heatmaps_all = torch.cat((pred_heatmaps, pred_detkptmaps), dim=1)\n gt_heatmaps = torch.cat((heatmaps, detkpt_maps), dim=1)\n gt_masks = torch.cat((kpt_masks, detkpt_masks), dim=1)\n\n heatmap_loss = self.heatmap_loss(pred_heatmaps_all, gt_heatmaps, gt_masks)\n offset_loss = self.offset_loss(pred_offsets, offsets, weights)\n if self.refine:\n refine_offsets = outputs[3]\n refine_loss = self.refine_loss(refine_offsets, gt_offsets, reg_weights)\n\n loss = 0\n if heatmap_loss is not None:\n heatmap_loss = heatmap_loss.mean(dim=0) * self.heatmap_loss_weight\n heatmap_loss_meter.update(heatmap_loss.item(), images.size(0))\n loss = loss + heatmap_loss\n if offset_loss is not None:\n offset_loss = offset_loss * self.offset_loss_weight\n offset_loss_meter.update(offset_loss.item(), images.size(0))\n loss = loss + offset_loss\n if self.refine:\n refine_loss = refine_loss * self.refine_loss_weight\n refine_loss_meter.update(refine_loss.item(), images.size(0))\n loss = loss + refine_loss\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % self.print_freq == 0 and self.rank == 0:\n msg = 'Epoch: [{0}][{1}/{2}]\\t' \\\n 'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\\t' \\\n 'Speed: {speed:.1f} samples/s\\t' \\\n 'Data: {data_time.val:.3f}s ({data_time.avg:.3f}s)\\t' \\\n '{heatmaps_loss}{offset_loss}'.format(\n epoch, i, len(data_loader),\n batch_time=batch_time,\n speed=images.size(0) / batch_time.val,\n data_time=data_time,\n heatmaps_loss=_get_loss_info(heatmap_loss_meter, 'heatmap'),\n offset_loss=_get_loss_info(offset_loss_meter, 'offset')\n )\n if self.refine:\n msg += _get_loss_info(refine_loss_meter, 'refine')\n logger.info(msg)\n\ndef _get_loss_info(meter, loss_name):\n msg = '{name}: {meter.val:.3e} ({meter.avg:.3e})\\t'.format(name=loss_name, meter=meter)\n return msg\n\ndef get_gt_proposals(heatmaps, detkptmaps, offsets, weights, pos_inds=None, max_num_proposal=200):\n b, c, h, w = heatmaps.shape\n num_det = detkptmaps.size(1)\n num_joints = heatmaps.size(1)\n step = num_joints * 2\n if pos_inds is None:\n pos_inds = []\n for i in range(num_det):\n offset_w = (detkptmaps[:, i:i+1, :, :] * torch.max(weights[:, i*step:(i+1)*step, :, :], dim=1, keepdim=True)[0]).view(b, -1)\n num_nonzero = (offset_w > 0).sum(1).min().item()\n if num_nonzero == 0: num_nonzero = max_num_proposal // num_det\n num_nonzero = min(max_num_proposal // num_det, num_nonzero)\n _, pos_ind = offset_w.topk(num_nonzero, dim=1)\n pos_inds.append(pos_ind)\n\n gt_offsets, reg_weights = [], []\n step = num_joints * 2\n for i in range(num_det):\n gt_offset = offsets[:, i*step:(i+1)*step, :, :].permute(0, 2, 3, 1).reshape(b, h * w, step)\n gt_offset = gt_offset[torch.arange(b, device=offsets.device)[:, None], pos_inds[i]]\n gt_offsets.append(gt_offset)\n reg_weight = weights[:, i*step:(i+1)*step, :, :].permute(0, 2, 3, 1).reshape(b, h * w, step)\n reg_weight = reg_weight[torch.arange(b, device=offsets.device)[:, None], pos_inds[i]]\n reg_weights.append(reg_weight)\n gt_offsets = torch.cat(gt_offsets, dim=1)\n reg_weights = torch.cat(reg_weights, dim=1)\n\n return pos_inds, gt_offsets, reg_weights\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count if self.count != 0 else 0"
] |
[
[
"torch.max",
"torch.arange",
"torch.cat"
]
] |
Kishan-07/Hexagonal-Flow-Patterns-in-RBC
|
[
"421fc2e9c42c779b6f918e41d4443cbb080a0a3c"
] |
[
"hex.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport matplotlib.colors as colors\n\n# Function that returns the time derivative of all the seven modes. Input requires an array of the modes values in the following order: [U011, U111, U122, T011, T111, T122, T002].\ndef f(x):\n global Ra, Pr\n \n dU011_dt = -np.sqrt(3.0/7)*x[1]*x[2] - np.sqrt(0.5)*Ra*Pr*x[3] - 2*Pr*x[0]\n dU111_dt = -0.5*np.sqrt(3.0/7)*x[2]*x[0] - np.sqrt(0.5)*Ra*Pr*x[4] - 2*Pr*x[1]\n dU122_dt = np.sqrt(3.0/7)*x[0]*x[1] - np.sqrt(3.0/7)*Ra*Pr*x[5] - 7*Pr*x[2]\n \n dT011_dt = -np.sqrt(0.5)*x[1]*x[5] - np.sqrt(2)*x[0]*x[6] - np.sqrt(0.5)*x[0] - 2*x[3]\n dT111_dt = -0.5*np.sqrt(0.5)*x[0]*x[5] - np.sqrt(2)*x[1]*x[6] - np.sqrt(0.5)*x[1] - 2*x[4]\n dT122_dt = 0.5*np.sqrt(0.5)*(x[0]*x[4] + x[1]*x[3]) - np.sqrt(3.0/7)*x[2] - 7*x[5]\n dT002_dt = 4*np.sqrt(2)*x[1]*x[4] + 2*np.sqrt(2)*x[0]*x[3] - 4*x[6]\n\n return np.array([dU011_dt, dU111_dt, dU122_dt, dT011_dt, dT111_dt, dT122_dt, dT002_dt])\n \n\n# This function produces and saves a plot showing the temperature and velocity distribution at a given time. The input requires an array X of the modes values in the following order: [U011, U111, U122, T011, T111, T122, T002]. The second input 'i' is the index value of time in the array T at which the plot is being made.\ndef plot(X, i):\n global x, y, xv, yv, dt, no\n \n z = np.pi/3 # Plot the z = pi/3 plane\n \n # Defining T, u and v fields. u is the x-component of velocity and v is the y-component.\n T = 4*X[3]*np.cos(y)*np.sin(z) + 8*X[4]*np.cos(np.sqrt(3)*x/2)*np.cos(0.5*y)*np.sin(z) + 8*X[5]*np.cos(np.sqrt(3)*x/2)*np.cos(1.5*y)*np.sin(2*z) + 2*X[6]*np.sin(2*z)\n \n u = 2*np.sqrt(6)*X[1]*np.sin(np.sqrt(3)*xv/2)*np.cos(0.5*yv)*np.cos(z) + 8/np.sqrt(7)*X[2]*np.sin(np.sqrt(3)*xv/2)*np.cos(1.5*yv)*np.cos(2*z)\n \n v = 2*np.sqrt(2)*X[0]*np.sin(yv)*np.cos(z) + 2*np.sqrt(2)*X[1]*np.cos(np.sqrt(3)*xv/2)*np.sin(0.5*yv)*np.cos(z) + 8*np.sqrt(3.0/7)*X[2]*np.cos(np.sqrt(3)*xv/2)*np.sin(1.5*yv)*np.cos(2*z)\n \n # Plot the density plot for temperature field and quiver plot for velocity field\n plt.close()\n fig = plt.figure()\n ax = plt.gca()\n heatmap = ax.pcolormesh(x, y, T, cmap = cm.jet, vmin = -25, vmax = 25)\n cbar = plt.colorbar(heatmap, orientation='vertical')\n cbar.set_label(r'$\\theta$', fontsize = 14, rotation = 0)\n cbar.ax.tick_params(labelsize=12)\n ax.quiver(xv, yv, u, v, scale = 3000)\n \n ax.set_title(r't = %.3f, $|\\theta|_{max}$ = %.3f' %(i*dt, np.max(np.abs(T))), fontsize=14)\n ax.set_xlabel(r'$x$', fontsize=14)\n ax.set_ylabel(r'$y$', fontsize=14)\n plt.xticks([0, 2*np.pi, 4*np.pi], fontsize=12)\n plt.yticks([0, 2*np.pi, 4*np.pi], fontsize=12)\n filename = '~/T_%03d.png' %(i/no)\n plt.savefig(filename)\n\n return\n\n\n# Rayleigh and Prandtl no.\nRa = 40\nPr = 10\n\n# Time range t and time-step dt\nt = 20\ndt = 0.0001\nN = int(t/dt) + 1\nno = (N-1)/250 # 'no' is the number of steps after which a plot is saved. A total of 251 frames will be saved to the user's computer.\n\nti = 10 # Time from which time series of U and T modes is to be plotted (in the end)\ntf = 20 # Time till which time series of U and T modes is to be plotted (in the end)\n\nX = np.zeros([7, N]) # Array to store the 7 modes for all the times from 0 to t.\nT = np.linspace(0, t, N) # Array for time\nX[:, 0] = np.array([1, 0.1, 0, 1, 0.1, 0, 0]) # Initial values in the following order: [U011, U111, U122, T011, T111, T122, T002]\n\n# Meshgrid for plotting the density plot for temperature.\nx = np.linspace(0, 4*np.pi, 101)\ny = np.linspace(0, 4*np.pi, 101)\nx, y = np.meshgrid(x, y)\n\n# Meshgrid for plotting the quiver plot for velocity.\nxv = np.linspace(0, 4*np.pi, 11)\nyv = np.linspace(0, 4*np.pi, 11)\nxv, yv = np.meshgrid(xv, yv)\n\nplot(X[:, 0], 0)\n\n# Evaluate the modes for all times.\nfor i in range(1, N):\n k1 = f(X[:, i-1])\n k2 = f(X[:, i-1] + k1*dt/2)\n k3 = f(X[:, i-1] + k2*dt/2)\n k4 = f(X[:, i-1] + k3*dt)\n\n X[:, i] = X[:, i-1] + dt*(k1 + 2*k2 + 2*k3 + k4)/6\n \n if i % no == 0:\n plot(X[:, i], i)\n\n# Plot the time series for the U modes and save to user's computer. We start from t = 25 (to remove the transients)\nplt.close()\nfig = plt.figure()\nax = plt.gca()\nax.plot(T[ti*10000:tf*10000], X[0, ti*10000:tf*10000], 'r-', T[ti*10000:tf*10000], X[1, ti*10000:tf*10000], 'b-', T[ti*10000:tf*10000], X[2, ti*10000:tf*10000], 'g-')\nplt.xticks(fontsize = 12)\nplt.yticks(fontsize = 12)\nax.set_xlabel(r'$t$', fontsize = 14)\nplt.legend([r'U$_{011}$', r'U$_{111}$', r'U$_{122}$'], loc='best')\nax.set_title(r'Ra = %d, Pr = %d' %(Ra, Pr), fontsize = 16)\nax.set_xlim([ti, tf])\nplt.plot([ti, tf], [0, 0], 'k--')\nfilename = '~/U.png'\nplt.savefig(filename)\n\n# Plot the time series for the T modes and save to user's computer. We start from t = 25 (to remove the transients)\nplt.close()\nfig = plt.figure()\nax = plt.gca()\nax.plot(T[ti*10000:tf*10000], X[3, ti*10000:tf*10000], 'r-', T[ti*10000:tf*10000], X[4, ti*10000:tf*10000], 'b-', T[ti*10000:tf*10000], X[5, ti*10000:tf*10000], 'g-', T[ti*10000:tf*10000], X[6, ti*10000:tf*10000], 'k-')\nplt.xticks(fontsize = 12)\nplt.yticks(fontsize = 12)\nax.set_xlabel(r'$t$', fontsize = 14)\nplt.legend([r'$\\theta_{011}$', r'$\\theta_{111}$', r'$\\theta_{122}$', r'$\\theta_{002}$'], loc='best')\nax.set_title(r'Ra = %d, Pr = %d' %(Ra, Pr), fontsize = 16)\nax.set_xlim([ti, tf])\nplt.plot([ti, tf], [0, 0], 'k--')\nfilename = '~/T.png'\nplt.savefig(filename)\n\nplt.close()\n"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.legend",
"numpy.sqrt",
"numpy.linspace",
"numpy.meshgrid",
"numpy.abs",
"numpy.cos",
"matplotlib.pyplot.savefig",
"numpy.sin",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xticks",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] |
CRitter93/beehyve
|
[
"8e21dda571cfecb8ec903b8db79e200d744806a2"
] |
[
"beehyve/steps.py"
] |
[
"from ast import literal_eval\nfrom typing import Any, Dict, Tuple\n\nimport pandas as pd\nfrom behave import given, register_type, then, when\nfrom behave.runner import Context\nfrom behave_pandas import table_to_dataframe\n\nfrom beehyve import types\nfrom beehyve.functions import add_var, get_var, raises_error, run_func\n\nDEFAULT_COLUMN_LEVEL = 1\n\nregister_type(Tuple=types.parse_tuple)\nregister_type(Dict=types.parse_dict)\nregister_type(Result=types.parse_func_result)\nregister_type(Module=types.parse_module)\n\n\n@given(\"the following table is loaded into dataframe {name:w}\")\n@raises_error\ndef step_load_table_into_df(context: Context, name: str) -> None:\n \"\"\"Loads and parses a behave table into a :class:`pandas.DataFrame`\n using `behave_pandas <https://pypi.org/project/behave-pandas/>`_.\n\n :param context: the current context\n :type context: :class:`behave.runner.Context`\n :param name: the name of the variable to which the dataframe should be assigned\n :type name: str\n \"\"\"\n add_var(\n context,\n name,\n table_to_dataframe(context.table, column_levels=DEFAULT_COLUMN_LEVEL),\n )\n\n\n@given(\n \"the CSV file {file_name} is loaded into dataframe {name:w} (read_csv kwargs: {kwargs:Dict})\"\n)\n@raises_error\ndef step_load_csv_into_df_with_kwargs(\n context: Context, file_name: str, name: str, kwargs: Dict[str, str]\n) -> None:\n \"\"\"Loads a CSV into a :class:`pandas.DataFrame` with additional kwargs.\n\n :param context: the current context\n :type context: :class:`behave.runner.Context`\n :param file_name: the name of the CSV file to load\n :type file_name: str\n :param name: the name of the variable to which the dataframe should be assigned\n :type name: str\n :param kwargs: a dict containing kwargs to be passed to :func:`pandas.read_csv`\n :type kwargs: Dict[str, str]\n \"\"\"\n add_var(context, name, pd.read_csv(file_name, **kwargs))\n\n\n@given(\"the CSV file {file_name} is loaded into dataframe {name:w}\")\n@raises_error\ndef step_load_csv_into_df(context: Context, file_name: str, name: str) -> None:\n \"\"\"Loads a CSV into a :class:`pandas.DataFrame`.\n\n :param context: the current context\n :type context: :class:`behave.runner.Context`\n :param file_name: the name of the CSV file to load\n :type file_name: str\n :param name: the name of the variable to which the dataframe should be assigned\n :type name: str\n \"\"\"\n add_var(context, name, pd.read_csv(file_name))\n\n\n@given(\"the value {val} is loaded into variable {name:w}\")\n@raises_error\ndef step_load_value_into_variable(context: Context, val: Any, name: str) -> None:\n \"\"\"Loads a value into a variable.\n\n :param context: the current context\n :type context: :class:`behave.runner.Context`\n :param val: a value to be loaded\n :type val: Any\n :param name: the name of the variable to which the value should be assigned\n :type name: str\n \"\"\"\n add_var(context, name, literal_eval(val))\n\n\n@given(\"the following variables are loaded\")\n@raises_error\ndef step_load_table_into_variables(context: Context) -> None:\n \"\"\"Loads a table of values into variables.\n\n :param context: the current context\n :type context: :class:`behave.runner.Context`\n :raises ValueError: if the given table does not contain the necessary columns 'val' and 'var'\n \"\"\"\n table = context.table\n\n if \"var\" not in table.headings and \"val\" not in table.headings:\n raise ValueError(\n \"table has to contain a 'val' and 'var' column\"\n ) # pragma: no cover\n\n for row in table:\n add_var(context, row[\"var\"], literal_eval(row[\"val\"]))\n\n\n@when(\n \"the function {func_name:w} of module {module:Module} is called writing the results to {result_vars:Result}\"\n)\n@raises_error\ndef step_run_function(\n context: Context, func_name: str, module: str, result_vars: Tuple[str]\n) -> None:\n \"\"\"Executes an arbitrary python function using the variables in the context.\n For more details see :func:`run_func`.\n\n :param context: the current context\n :type context: :class:`behave.runner.Context`\n :param func_name: the name of the function to execute\n :type func_name: str\n :param module: the module where the function can be found,\n i.e., you should be able to import <func_name> from <module>\n :type module: str\n :param result_vars: a tuple of variable names\n to which the result(s) of the function should be assigned\n :type result_vars: Tuple[str]\n \"\"\"\n run_func(context, func_name, module, result_vars)\n\n\n@when(\"the function {func_name:w} of module {module:Module} is called\")\n@raises_error\ndef step_run_function_wo_return(context: Context, func_name: str, module: str) -> None:\n \"\"\"Executes an arbitrary python function using the variables in the context.\n For more details see :func:`run_func`.\n\n :param context: the current context\n :type context: :class:`behave.runner.Context`\n :param func_name: the name of the function to execute\n :type func_name: str\n :param module: the module where the function can be found,\n i.e., you should be able to import <func_name> from <module>\n :type module: str\n \"\"\"\n run_func(context, func_name, module, ())\n\n\n@then(\"dataframe {name:w} is equal to\")\ndef step_df_equal_to_table(context: Context, name: str) -> None:\n \"\"\"Checks whether a dataframe is equal to the given table\n using :func:`pandas.testing.assert_frame_equal`.\n\n :param context: the current context\n :type context: :class:`behave.runner.Context`\n :param name: the name of the variable in which the dataframe is stored\n :type name: str\n \"\"\"\n pd.testing.assert_frame_equal(\n get_var(context, name),\n table_to_dataframe(context.table, column_levels=DEFAULT_COLUMN_LEVEL),\n )\n\n\n@then(\"dataframe {name1:w} is equal to dataframe {name2:w}\")\ndef step_df_equal_to_df(context: Context, name1: str, name2: str) -> None:\n \"\"\"Checks whether two dataframes are equal\n using :func:`pandas.testing.assert_frame_equal`.\n\n :param context: the current context\n :type context: :class:`behave.runner.Context`\n :param name1: the name of the variable in which the first dataframe is stored\n :type name1: str\n :param name2: the name of the variable in which the second dataframe is stored\n :type name2: str\n \"\"\"\n pd.testing.assert_frame_equal(get_var(context, name1), get_var(context, name2))\n\n\n@then(\"the value of variable {name:w} is {true_val}\")\n@raises_error\ndef step_variable_equal_to_value(context: Context, name: str, true_val: str) -> None:\n \"\"\"Checks whether a variable is equal to a value.\n\n :param context: the current context\n :type context: :class:`behave.runner.Context`\n :param name: the name of the variable to check\n :type name: str\n :param true_val: the expected value\n :type true_val: str\n \"\"\"\n actual_val = get_var(context, name)\n assert actual_val == literal_eval(true_val), f\"actual value is {actual_val}\"\n\n\n@then(\"the type of variable {name:w} is {true_type:w}\")\n@raises_error\ndef step_variable_type_equal_to(context: Context, name: str, true_type: str) -> None:\n \"\"\"Checks whether the type of a variable matches a given type.\n\n :param context: the current context\n :type context: :class:`behave.runner.Context`\n :param name: the name of the variable to check\n :type name: str\n :param true_type: the expected type\n :type true_type: str\n \"\"\"\n actual_type = type(get_var(context, name)).__name__\n assert actual_type == true_type, f\"actual type is {actual_type}\"\n\n\n@given(\"an error is expected\")\ndef step_error_expected(context: Context):\n \"\"\"Sets up expecting an error in following steps.\n Do not use without :func:`step_exception_raised`!\n\n :param context: the current context\n :type context: :class:`behave.runner.Context`\n \"\"\"\n context.error_expected = True\n\n\n@then(\"the exception {exception:w} was raised\")\ndef step_exception_raised(context: Context, exception: str):\n \"\"\"Checks whether a specific exception was raised in previous steps.\n Do not use without :func:`step_error_expected`!\n\n :param context: the current context\n :type context: :class:`behave.runner.Context`\n :param exception: the type of the expected exception\n :type exception: str\n :raises RuntimeError: if :func:`step_error_expected` was not executed as previous step\n \"\"\"\n if not context.error_expected:\n raise RuntimeError(\n \"this step should only be used when 'Given an error is expected' step is executed before\"\n ) # pragma: no cover\n\n assert context.exception_type, \"No exception has been thrown\"\n assert (\n context.exception_type == exception\n ), f\"Actual exception {context.exception_type} does not match expected exception {exception}\"\n"
] |
[
[
"pandas.read_csv"
]
] |
svip-lab/IVOS-W
|
[
"4b66f14e140d395107ba95a343b1d4fe25b6751d"
] |
[
"utils/config_manet/config.py"
] |
[
"import torch\nimport argparse\nimport os\nimport sys\nimport cv2\nimport time\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n# parser = argparse.ArgumentParser(description='intvos config')\n# parser.add_argument('--ROOT_DIR' ,type=str, default=os.path.abspath(os.path.join(os.path.dirname(\"__file__\"))))\n# parser.add_argument('--EXP_NAME', type=str, default='deeplabv3+coco')\n# parser.add_argument('--SAVE_RESULT_DIR',type=str,default='../afs/result/')\n# parser.add_argument('--SAVE_VOS_RESULT_DIR',type=str,default='')\n# parser.add_argument('--NUM_WORKER',type=int,default=1)\n# parser.add_argument('--KNNS',type=int,default=1)\n# parser.add_argument('--PRETRAINED_MODEL',type=str,default='./model_best.pth.tar')\n# parser.add_argument('--RESULT_ROOT',type=str,default= os.path.join('../afs/vos_result/result_total_80000'))\n# ######DATA_CONFIG\n# parser.add_argument('--DATA_NAME',type=str,default= 'COCO2017')\n# parser.add_argument('--DATA_AUG' ,type=str2bool,default= True)\n# parser.add_argument('--DATA_WORKERS',type=int,default=4)\n# parser.add_argument('--DATA_RESCALE',type=int,default= 416)\n# parser.add_argument('--DATA_RANDOMCROP',type=int,default = 416)\n# parser.add_argument('--DATA_RANDOMROTATION',type=int,default = 0)\n# parser.add_argument('--DATA_RANDOM_H',type=int,default= 10)\n# parser.add_argument('--DATA_RANDOM_S',type=int,default = 10)\n# parser.add_argument('--DATA_RANDOM_V' ,type=int,default= 10)\n# parser.add_argument('--DATA_RANDOMFLIP',type=float, default=0.5)\n# parser.add_argument('--DATA_ROOT',type=str,default= '../data/DAVIS')\n#\n# ######MODEL_CONFIG\n# parser.add_argument('--MODEL_NAME',type=str,default = 'deeplabv3plus')\n# parser.add_argument('--MODEL_BACKBONE',type=str,default = 'res101_atrous')\n# parser.add_argument('--MODEL_OUTPUT_STRIDE',type=int,default = 16)\n# parser.add_argument('--MODEL_ASPP_OUTDIM',type=int,default = 256)\n# parser.add_argument('--MODEL_SHORTCUT_DIM',type=int,default = 48)\n# parser.add_argument('--MODEL_SHORTCUT_KERNEL',type=int,default = 1)\n# parser.add_argument('--MODEL_NUM_CLASSES',type=int,default = 21)\n# parser.add_argument('--MODEL_SEMANTIC_EMBEDDING_DIM',type=int,default=100)\n# parser.add_argument('--MODEL_HEAD_EMBEDDING_DIM',type=int,default=256)\n# parser.add_argument('--MODEL_LOCAL_DOWNSAMPLE',type=str2bool,default=True)\n# parser.add_argument('--MODEL_MAX_LOCAL_DISTANCE',type=int,default=12)\n# parser.add_argument('--MODEL_SELECT_PERCENT',type=float,default=0.8)\n# parser.add_argument('--MODEL_USEIntSeg',type=str2bool,default=False)\n#\n# ######TRAIN_CONFIG\n# parser.add_argument('--TRAIN_LR',type=float,default = 0.0007)\n# parser.add_argument('--TRAIN_LR_GAMMA',type=float,default = 0.1)\n# parser.add_argument('--TRAIN_MOMENTUM',type=float,default = 0.9)\n# parser.add_argument('--TRAIN_WEIGHT_DECAY',type=float,default = 0.00004)\n# parser.add_argument('--TRAIN_POWER',type=float,default = 0.9)\n# parser.add_argument('--TRAIN_BATCH_SIZE',type=int,default = 2)\n# parser.add_argument('--TRAIN_SHUFFLE',type=str2bool,default = True)\n# parser.add_argument('--TRAIN_CLIP_GRAD_NORM',type=float,default= 5.)\n# parser.add_argument('--TRAIN_MINEPOCH',type=int,default = 9)\n# parser.add_argument('--TRAIN_TOTAL_STEPS',type=int,default=101000)\n# parser.add_argument('--TRAIN_LOSS_LAMBDA',type=int,default = 0)\n# parser.add_argument('--TRAIN_TBLOG',type=str2bool,default = False)\n# parser.add_argument('--TRAIN_BN_MOM',type=float,default = 0.0003)\n# parser.add_argument('--TRAIN_TOP_K_PERCENT_PIXELS',type=float,default=0.15)\n# parser.add_argument('--TRAIN_HARD_MINING_STEP',type=int,default=50000)\n# parser.add_argument('--TRAIN_LR_STEPSIZE',type=int,default=2000)\n# parser.add_argument('--TRAIN_INTER_USE_TRUE_RESULT',type=str2bool,default=True)\n# parser.add_argument('--TRAIN_RESUME_DIR',type=str,default='')\n#\n# parser.add_argument('--LOG_DIR',type=str,default = os.path.join('./log'))\n#\n# parser.add_argument('--TEST_CHECKPOINT',type=str,default='save_step_100000.pth')\n# parser.add_argument('--TEST_MODE',type=str2bool,default=False)\n#\n# cfg=parser.parse_args()\n# cfg.TRAIN_EPOCHS=int(200000*cfg.TRAIN_BATCH_SIZE/60.)\n\n# transform\ncfg = argparse.Namespace()\nd = vars(cfg)\nd['ROOT_DIR'] = os.path.abspath(os.path.join(os.path.dirname(\"__file__\")))\nd['EXP_NAME'] = 'deeplabv3+coco'\nd['SAVE_RESULT_DIR'] = '../afs/result/'\nd['SAVE_VOS_RESULT_DIR'] = ''\nd['NUM_WORKER'] = 1\nd['KNNS'] = 1\nd['PRETRAINED_MODEL'] = './model_best.pth.tar'\nd['RESULT_ROOT'] = os.path.join('../afs/vos_result/result_total_80000')\n\n######DATA_CONFIG\nd['DATA_NAME'] = 'COCO2017'\nd['DATA_AUG'] = True\nd['DATA_WORKERS'] = 4\nd['DATA_RESCALE'] = 416\nd['DATA_RANDOMCROP'] = 416\nd['DATA_RANDOMROTATION'] = 0\nd['DATA_RANDOM_H'] = 10\nd['DATA_RANDOM_S'] = 10\nd['DATA_RANDOM_V'] = 10\nd['DATA_RANDOMFLIP'] = 0.5\nd['DATA_ROOT'] = '../data/DAVIS'\n\n######MODEL_CONFIG\nd['MODEL_NAME'] = 'deeplabv3plus'\nd['MODEL_BACKBONE'] = 'res101_atrous'\nd['MODEL_OUTPUT_STRIDE'] = 16\nd['MODEL_ASPP_OUTDIM'] = 256\nd['MODEL_SHORTCUT_DIM'] = 48\nd['MODEL_SHORTCUT_KERNEL'] = 1\nd['MODEL_NUM_CLASSES'] = 21\nd['MODEL_SEMANTIC_EMBEDDING_DIM'] = 100\nd['MODEL_HEAD_EMBEDDING_DIM'] = 256\nd['MODEL_LOCAL_DOWNSAMPLE'] = True\nd['MODEL_MAX_LOCAL_DISTANCE'] = 12\nd['MODEL_SELECT_PERCENT'] = 0.8\nd['MODEL_USEIntSeg'] = False\n\n######TRAIN_CONFIG\nd['TRAIN_LR'] = 0.0007\nd['TRAIN_LR_GAMMA'] = 0.1\nd['TRAIN_MOMENTUM'] = 0.9\nd['TRAIN_WEIGHT_DECAY'] = 0.00004\nd['TRAIN_POWER'] = 0.9\nd['TRAIN_BATCH_SIZE'] = 2\nd['TRAIN_SHUFFLE'] = True\nd['TRAIN_CLIP_GRAD_NORM'] = 5.\nd['TRAIN_MINEPOCH'] = 9\nd['TRAIN_TOTAL_STEPS'] = 101000\nd['TRAIN_LOSS_LAMBDA'] = 0\nd['TRAIN_TBLOG'] = False\nd['TRAIN_BN_MOM'] = 0.0003\nd['TRAIN_TOP_K_PERCENT_PIXELS'] = 0.15\nd['TRAIN_HARD_MINING_STEP'] = 50000\nd['TRAIN_LR_STEPSIZE'] = 2000\nd['TRAIN_INTER_USE_TRUE_RESULT'] = True\nd['TRAIN_RESUME_DIR'] = ''\n\nd['LOG_DIR'] = os.path.join('./log')\nd['TEST_CHECKPOINT'] = 'save_step_100000.pth'\nd['TEST_MODE'] = False\nd['TRAIN_EPOCHS'] = int(200000*cfg.TRAIN_BATCH_SIZE/60.)\n\n\n\nif not torch.cuda.is_available():\n raise ValueError('config.py: cuda is not avalable')\n\n\n"
] |
[
[
"torch.cuda.is_available"
]
] |
Vrushank264/Averting-from-CNNs
|
[
"b04b65a09394849771c1943bbb9fe840767d296a"
] |
[
"Involution/InvNet.py"
] |
[
"import torch\r\nimport torch.nn as nn\r\nfrom Involution import Involution2D\r\nfrom torchsummary import summary\r\nfrom typing import Union, Tuple, Optional\r\n\r\nclass Bottleneck(nn.Module):\r\n \r\n def __init__(self,\r\n in_channels: int,\r\n out_channels: int,\r\n expansion: int = 4,\r\n stride: Union[int, Tuple[int,int]] = (1,1),\r\n dilation: Union[int, Tuple[int,int]] = (1,1),\r\n downsample: Optional[nn.Module] = None,\r\n ):\r\n \"\"\"\r\n \r\n\r\n Parameters\r\n ----------\r\n in_channels : int\r\n Input Channels.\r\n out_channels : int\r\n Output Channels.\r\n expansion : int, optional\r\n The ratio of out_channels/mid_channels where\r\n mid_channels are the input/output channels of \r\n conv2. The default is 4.\r\n stride : Union[int, Tuple[int,int]], optional\r\n Stride of the block. The default is (1,1).\r\n dilation : Union[int, Tuple[int,int]], optional\r\n Dilation of Convolution. The default is (1,1).\r\n downsample : Optional[nn.Module], optional\r\n Downsample operation on identity branch. The default is None.\r\n\r\n \"\"\"\r\n \r\n super(Bottleneck, self).__init__()\r\n \r\n self.in_channels = in_channels\r\n self.out_channels = out_channels\r\n self.expansion = expansion\r\n assert out_channels % expansion == 0\r\n self.mid_channels = out_channels // expansion\r\n self.stride = stride\r\n self.dilation = dilation\r\n \r\n self.conv1 = nn.Conv2d(in_channels = in_channels,\r\n out_channels = self.mid_channels,\r\n kernel_size = (1,1),\r\n stride = (1,1),\r\n padding = (0,0), \r\n bias = False)\r\n self.bn1 = nn.BatchNorm2d(self.mid_channels)\r\n \r\n self.conv2 = Involution2D(channels = self.mid_channels,\r\n kernel_size = 7,\r\n stride = stride)\r\n \r\n self.bn2 = nn.BatchNorm2d(self.mid_channels)\r\n \r\n self.conv3 = nn.Conv2d(in_channels = self.mid_channels,\r\n out_channels = out_channels,\r\n kernel_size = 1,\r\n bias = False)\r\n self.bn3 = nn.BatchNorm2d(out_channels)\r\n \r\n self.relu = nn.ReLU(inplace = True)\r\n self.downsample = downsample\r\n \r\n def forward(self, x):\r\n \r\n identity = x\r\n \r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n \r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n out = self.relu(out)\r\n \r\n out = self.conv3(out)\r\n out = self.bn3(out)\r\n \r\n if self.downsample is not None:\r\n identity = self.downsample(x)\r\n \r\n out += identity\r\n out = self.relu(out)\r\n \r\n return out\r\n \r\nclass ResidualLayer(nn.Sequential):\r\n \r\n def __init__(self, \r\n block,\r\n num_blocks,\r\n in_channels,\r\n out_channels,\r\n expansion = 4,\r\n stride = 1,\r\n avg_down = False,\r\n **kwargs\r\n ) -> nn.Sequential:\r\n \"\"\"\r\n \r\n Parameters\r\n ----------\r\n block : int\r\n Residual block used to build ResidualLayer.\r\n num_blocks : int\r\n Number of blocks.\r\n in_channels : int\r\n Block's Input Channels.\r\n out_channels : int\r\n Block's Output Channels.\r\n expansion : int, optional\r\n The expansion for Bottleneck. The default is 4.\r\n stride : int, optional\r\n Stride of the first block. The default is 1.\r\n avg_down : bool\r\n Use Average pool instead of stride convolution. The default is False.\r\n\r\n Returns\r\n -------\r\n nn.Sequential (list of Residual layers)\r\n\r\n \"\"\"\r\n \r\n self.block = block\r\n self.expansion = expansion\r\n \r\n downsample = None\r\n \r\n if stride != 1 or in_channels != out_channels:\r\n downsample = []\r\n conv_stride = stride\r\n if avg_down and stride!=1:\r\n conv_stride = 1\r\n downsample.append(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False))\r\n \r\n downsample.extend([\r\n nn.Conv2d(in_channels,\r\n out_channels,\r\n kernel_size = 1,\r\n stride = conv_stride,\r\n bias = False),\r\n nn.BatchNorm2d(out_channels)\r\n ])\r\n downsample = nn.Sequential(*downsample)\r\n \r\n layers = []\r\n \r\n layers.append(\r\n block(\r\n in_channels,\r\n out_channels,\r\n expansion = self.expansion, \r\n stride = stride,\r\n downsample = downsample\r\n ))\r\n in_channels = out_channels\r\n \r\n for i in range(1, num_blocks):\r\n \r\n layers.append(\r\n block(\r\n in_channels,\r\n out_channels,\r\n expansion = self.expansion,\r\n stride = 1\r\n )\r\n )\r\n super(ResidualLayer, self).__init__(*layers)\r\n \r\nclass InvNet(nn.Module):\r\n \r\n arch = {\r\n 26: (Bottleneck, (1, 2, 4, 1)),\r\n 38: (Bottleneck, (2, 3, 5, 2)),\r\n 50: (Bottleneck, (3, 4, 6, 3)),\r\n 101: (Bottleneck, (3, 4, 23, 3)),\r\n 152: (Bottleneck, (3, 8, 36, 3))\r\n }\r\n \r\n def __init__(self,\r\n depth,\r\n num_classes,\r\n in_channels = 3,\r\n stem_channels = 64,\r\n base_channels = 64,\r\n expansion = 4,\r\n num_stages = 4,\r\n strides = (1,2,2,2),\r\n dilations = (1,1,1,1),\r\n out_indices = (3, ),\r\n avg_down = False,\r\n zero_init_residual = True\r\n ):\r\n \"\"\"\r\n \r\n\r\n Parameters\r\n ----------\r\n depth : int\r\n Network Depth {18, 34, 50, 101, 152}.\r\n num_classes : int\r\n Number of classes.\r\n in_channels : int, optional\r\n DESCRIPTION. The default is 3.\r\n stem_channels : int, optional\r\n Output channels for the stem layer. The default is 64.\r\n base_channels : TYPE, optional\r\n Middle Channels for the first stage. The default is 64.\r\n expansion : int, optional\r\n expansion for the bottleneck block. The default is 4.\r\n num_stages : int, optional\r\n Stages of the network. The default is 4.\r\n strides : tuple, optional\r\n Strides of the first blocks of each stage. The default is (1,2,2,2).\r\n dilations : tuple, optional\r\n Dilation of each stage. The default is (1,1,1,1).\r\n out_indices : tuple, optional\r\n Output from which stages, if only one stage is specified,\r\n a single tensor(feature map) is returned, \r\n otherwise a tuple of tensors will be returned. The default is (3, ).\r\n avg_down : bool, optional\r\n Use Average pool instead of strided convolution when downsampling \r\n in bottleneck. The default is False.\r\n zero_init_residual : bool, optional\r\n Whether to use zero init for last batchnorm layer\r\n in res blocks or not. The default is True.\r\n\r\n \"\"\"\r\n \r\n super(InvNet, self).__init__()\r\n if depth not in self.arch:\r\n raise KeyError(f'Invalid depth {depth} for InvNet.')\r\n \r\n self.num_classes = num_classes\r\n self.depth = depth\r\n self.stem_channels = stem_channels\r\n self.base_channels = base_channels\r\n self.num_stages = num_stages\r\n assert num_stages >=1 and num_stages <=4\r\n self.strides = strides\r\n self.out_indices = out_indices\r\n assert max(out_indices) < num_stages\r\n self.avg_down = avg_down\r\n self.zero_init_residual = zero_init_residual\r\n self.expansion = 4\r\n assert self.expansion != 0\r\n self.block, stage_blocks = self.arch[depth]\r\n self.stage_blocks = stage_blocks[:num_stages]\r\n self.avgpool = nn.AdaptiveAvgPool2d((1,1))\r\n self.dropout = nn.Dropout2d(0.2)\r\n self.fc1 = nn.Linear(512 * self.expansion, 1000)\r\n self.fc2 = nn.Linear(1000, num_classes)\r\n self.stem_layer(in_channels, stem_channels)\r\n \r\n self.res_layers = []\r\n in_c = stem_channels\r\n out_c = base_channels * self.expansion\r\n \r\n for i, num_blocks in enumerate(self.stage_blocks):\r\n \r\n stride = strides[i]\r\n dilation = dilations[i]\r\n res_layer = self.make_res_layer(\r\n block = self.block,\r\n num_blocks = num_blocks,\r\n in_channels = in_c,\r\n out_channels = out_c,\r\n expansion = self.expansion,\r\n stride = stride,\r\n dilation = dilation,\r\n avg_down = self.avg_down,\r\n )\r\n in_c = out_c\r\n out_c *= 2\r\n layer_name = f'layer{i+1}'\r\n self.add_module(layer_name, res_layer)\r\n self.res_layers.append(layer_name)\r\n \r\n def make_res_layer(self, **kwargs):\r\n return ResidualLayer(**kwargs)\r\n \r\n def stem_layer(self, in_channels, stem_channels):\r\n \r\n self.stem = nn.Sequential(nn.Conv2d(in_channels,\r\n stem_channels // 2,\r\n kernel_size = 3,\r\n stride = 2, \r\n padding = 1),\r\n nn.BatchNorm2d(stem_channels // 2),\r\n nn.ReLU(inplace=True),\r\n Involution2D(stem_channels // 2, \r\n kernel_size = 3,\r\n stride = 1),\r\n nn.BatchNorm2d(stem_channels // 2),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(stem_channels // 2,\r\n stem_channels,\r\n kernel_size =3,\r\n stride = 1,\r\n padding = 1),\r\n nn.BatchNorm2d(stem_channels),\r\n nn.ReLU(inplace=True))\r\n self.maxpool = nn.MaxPool2d(kernel_size = 3, stride = 2, padding = 1)\r\n \r\n def init_weights(self, pretrained = None):\r\n \r\n super(InvNet, self).init_weights(pretrained)\r\n if pretrained is None:\r\n for i in self.modules():\r\n if isinstance(i, nn.Conv2d):\r\n nn.init.kaiming_normal_(i, mode = 'fan_out', nonlinearity='relu')\r\n elif isinstance(i, nn.BatchNorm2d):\r\n nn.init.constant_(i, 1.0)\r\n \r\n if self.zero_init_residual:\r\n for i in self.modules():\r\n if isinstance(i, Bottleneck):\r\n nn.init.constant_(i.bn3, 0.0)\r\n \r\n def forward(self, x):\r\n \r\n x = self.stem(x)\r\n x = self.maxpool(x)\r\n outs = []\r\n for i, layer_name in enumerate(self.res_layers):\r\n \r\n res_layer = getattr(self, layer_name)\r\n x = res_layer(x)\r\n if i in self.out_indices:\r\n outs.append(x)\r\n \r\n if len(outs) == 1:\r\n outs = outs[0]\r\n else:\r\n outs = tuple(outs)\r\n \r\n outs = self.avgpool(outs)\r\n outs = torch.flatten(outs, 1)\r\n outs = self.fc1(outs)\r\n outs = self.dropout(outs)\r\n outs = self.fc2(outs)\r\n \r\n return outs\r\n \r\n \r\ndef test():\r\n \r\n x = torch.randn((1,3,64,64)).to(torch.device('cuda'))\r\n model = InvNet(18, num_classes=3).to(torch.device('cuda'))\r\n output = model(x)\r\n print(summary(model, (3,64,64)))\r\n print(output.shape)\r\n \r\nif __name__ == '__main__':\r\n \r\n test() \r\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.Dropout2d",
"torch.nn.init.constant_",
"torch.randn",
"torch.nn.Conv2d",
"torch.flatten",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.device",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] |
StructuralNeurobiologyLab/LightConvPoint
|
[
"3f353f45e9e910fa390a74520dfd478e3e88f104"
] |
[
"lightconvpoint/networks/convpoint.py"
] |
[
"import torch\nimport torch.nn as nn\nimport lightconvpoint.nn as lcp_nn\n\n# This mdoels for classification and segmentation\n# are inspired from ConvPoint\n# https://github.com/aboulch/ConvPoint\n\n\nclass ConvPointCls(nn.Module):\n \"\"\"ConvPoint classification network.\n\n Network inspired from the KPConv paper and code (https://github.com/aboulch/ConvPoint)\n\n # Arguments\n in_channels: int.\n The number of input channels.\n out_channels: int.\n The number of output channels.\n ConvNet: convolutional layer.\n The convolutional class to be used in the network.\n Search: search algorithm.\n The search class to be used in the network.\n \"\"\"\n\n def __init__(self, in_channels, out_channels, ConvNet, Search, **kwargs):\n super().__init__()\n\n # input 2048\n self.cv1 = lcp_nn.Conv(\n ConvNet(in_channels, 64, 16),\n Search(K=16, npoints=1024),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(64),\n )\n self.cv2 = lcp_nn.Conv(\n ConvNet(64, 128, 16),\n Search(K=16, npoints=256),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(128),\n )\n self.cv3 = lcp_nn.Conv(\n ConvNet(128, 256, 16),\n Search(K=16, npoints=64),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(256),\n )\n self.cv4 = lcp_nn.Conv(\n ConvNet(256, 256, 16),\n Search(K=16, npoints=16),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(256),\n )\n self.cv5 = lcp_nn.Conv(\n ConvNet(256, 512, 16),\n Search(K=16, npoints=1),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(512),\n )\n\n # last layer\n self.fcout = nn.Linear(512, out_channels)\n\n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(0.5)\n\n def forward(self, x, input_pts, support_points=None, indices=None):\n\n if support_points is None:\n support_points = [None for _ in range(5)]\n if indices is None:\n indices = [None for _ in range(5)]\n\n x1, pts1, ids1 = self.cv1(x, input_pts, support_points[0], indices=indices[0])\n\n x2, pts2, ids2 = self.cv2(x1, pts1, support_points[1], indices=indices[1])\n\n x3, pts3, ids3 = self.cv3(x2, pts2, support_points[2], indices=indices[2])\n\n x4, pts4, ids4 = self.cv4(x3, pts3, support_points[3], indices=indices[3])\n\n x5, pts5, ids5 = self.cv5(x4, pts4, support_points[4], indices=indices[4])\n\n if x1 is not None:\n xout = x5.view(x5.size(0), -1)\n xout = self.dropout(xout)\n xout = self.fcout(xout)\n return xout\n else:\n return None, [ids1, ids2, ids3, ids4, ids5], [pts1, pts2, pts3, pts4, pts5]\n\n\nclass ConvPointSeg(nn.Module):\n \"\"\"ConvPoint segmentation network.\n\n Network inspired from the KPConv paper and code (https://github.com/aboulch/ConvPoint)\n\n # Arguments\n in_channels: int.\n The number of input channels.\n out_channels: int.\n The number of output channels.\n ConvNet: convolutional layer.\n The convolutional class to be used in the network.\n Search: search algorithm.\n The search class to be used in the network.\n \"\"\"\n\n def __init__(self, in_channels, out_channels, ConvNet, Search):\n super().__init__()\n\n # input 8192 / 2048\n self.cv0 = lcp_nn.Conv(ConvNet(in_channels, 64, 16), Search(K=16)) # no stride\n self.cv1 = lcp_nn.Conv(\n ConvNet(64, 64, 16),\n Search(K=16, npoints=2048),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(64),\n )\n self.cv2 = lcp_nn.Conv(\n ConvNet(64, 64, 16),\n Search(K=16, npoints=1024),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(64),\n )\n self.cv3 = lcp_nn.Conv(\n ConvNet(64, 64, 16),\n Search(K=16, npoints=256),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(64),\n )\n self.cv4 = lcp_nn.Conv(\n ConvNet(64, 128, 16),\n Search(K=16, npoints=64),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(128),\n )\n self.cv5 = lcp_nn.Conv(\n ConvNet(128, 128, 16),\n Search(K=16, npoints=16),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(128),\n )\n self.cv6 = lcp_nn.Conv(\n ConvNet(128, 128, 16),\n Search(K=16, npoints=8),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(128),\n )\n\n self.cv5d = lcp_nn.Conv(\n ConvNet(128, 128, 16),\n Search(K=4),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(128),\n )\n self.cv4d = lcp_nn.Conv(\n ConvNet(256, 128, 16),\n Search(K=4),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(128),\n )\n self.cv3d = lcp_nn.Conv(\n ConvNet(256, 64, 16),\n Search(K=4),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(64),\n )\n self.cv2d = lcp_nn.Conv(\n ConvNet(128, 64, 16),\n Search(K=8),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(64),\n )\n self.cv1d = lcp_nn.Conv(\n ConvNet(128, 64, 16),\n Search(K=8),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(64),\n )\n self.cv0d = lcp_nn.Conv(\n ConvNet(128, 64, 16),\n Search(K=8),\n activation=nn.ReLU(),\n normalization=nn.BatchNorm1d(64),\n )\n\n self.fcout = nn.Conv1d(128, out_channels, 1)\n self.drop = nn.Dropout(0.5)\n self.relu = nn.ReLU(inplace=True)\n self.features_out_size = 128\n\n def forward(\n self, x, input_pts, support_points=None, indices=None, return_features=False\n ):\n\n if support_points is None:\n support_points = [None for _ in range(13)]\n if indices is None:\n indices = [None for _ in range(13)]\n\n # ENCODER\n x0, pts0, ids0 = self.cv0(x, input_pts, input_pts, indices=indices[0])\n x1, pts1, ids1 = self.cv1(x0, pts0, support_points[0], indices=indices[1])\n x2, pts2, ids2 = self.cv2(x1, pts1, support_points[1], indices=indices[2])\n x3, pts3, ids3 = self.cv3(x2, pts2, support_points[2], indices=indices[3])\n x4, pts4, ids4 = self.cv4(x3, pts3, support_points[3], indices=indices[4])\n x5, pts5, ids5 = self.cv5(x4, pts4, support_points[4], indices=indices[5])\n x6, pts6, ids6 = self.cv6(x5, pts5, support_points[5], indices=indices[6])\n\n # DECODER\n x5d, _, ids5d = self.cv5d(x6, pts6, pts5, indices=indices[7])\n x5d = torch.cat([x5d, x5], dim=1) if x5d is not None else None\n x4d, _, ids4d = self.cv4d(x5d, pts5, pts4, indices=indices[8])\n x4d = torch.cat([x4d, x4], dim=1) if x4d is not None else None\n x3d, _, ids3d = self.cv3d(x4d, pts4, pts3, indices=indices[9])\n x3d = torch.cat([x3d, x3], dim=1) if x3d is not None else None\n x2d, _, ids2d = self.cv2d(x3d, pts3, pts2, indices=indices[10])\n x2d = torch.cat([x2d, x2], dim=1) if x2d is not None else None\n x1d, _, ids1d = self.cv1d(x2d, pts2, pts1, indices=indices[11])\n x1d = torch.cat([x1d, x1], dim=1) if x1d is not None else None\n x0d, _, ids0d = self.cv0d(x1d, pts1, input_pts, indices=indices[12])\n\n if x0d is not None:\n x0d = torch.cat([x0d, x0], dim=1)\n xout = self.drop(x0d)\n xout = self.fcout(xout)\n\n if return_features:\n return xout, x0d\n else:\n return xout\n\n else:\n return (\n None,\n [\n ids0,\n ids1,\n ids2,\n ids3,\n ids4,\n ids5,\n ids6,\n ids5d,\n ids4d,\n ids3d,\n ids2d,\n ids1d,\n ids0d,\n ],\n [pts1, pts2, pts3, pts4, pts5, pts6],\n )\n"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.Linear",
"torch.nn.Conv1d",
"torch.nn.ReLU"
]
] |
tnksh/pyqubo
|
[
"493841fd374ea31268e0fe9023043888ffa220d7"
] |
[
"pyqubo/utils/solver.py"
] |
[
"# Copyright 2018 Recruit Communications Co., Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport dimod\nimport numpy as np\n\n\ndef solve_qubo(qubo, num_reads=10, num_sweeps=1000, beta_range=(1.0, 50.0)):\n \"\"\"Solve QUBO with Simulated Annealing (SA) provided by dimod.\n \n Args:\n qubo (dict[(label, label), float]): The QUBO to be solved.\n \n num_reads (int, default=10): Number of run repetitions of SA.\n \n num_sweeps (int, default=1000): Number of iterations in each run of SA.\n \n beta_range (tuple(float, float), default=(1.0, 50.0)): Tuple of start beta and end beta.\n \n Returns:\n dict[label, bit]: The solution of SA.\n \"\"\"\n max_abs_value = float(max(abs(v) for v in qubo.values()))\n scale_qubo = {k: float(v) / max_abs_value for k, v in qubo.items()}\n sa = dimod.reference.SimulatedAnnealingSampler()\n sa_computation = sa.sample_qubo(scale_qubo, num_reads=num_reads,\n num_sweeps=num_sweeps, beta_range=beta_range)\n best = np.argmin(sa_computation.record.energy)\n best_solution = list(sa_computation.record.sample[best])\n return dict(zip(sa_computation.variable_labels, best_solution))\n\n\ndef solve_ising(linear, quad, num_reads=10, num_sweeps=1000, beta_range=(1.0, 50.0)):\n \"\"\"Solve Ising model with Simulated Annealing (SA) provided by dimod.\n\n Args:\n linear (dict[label, float]): The linear parameter of the Ising model.\n \n quad (dict[(label, label), float]): The quadratic parameter of the Ising model.\n\n num_reads (int, default=10): Number of run repetitions of SA.\n\n num_sweeps (int, default=1000): Number of iterations in each run of SA.\n\n beta_range (tuple(float, float), default=(1.0, 50.0)): Tuple of start beta and end beta.\n\n Returns:\n dict[label, bit]: The solution of SA.\n \"\"\"\n max_abs_value = float(max(abs(v) for v in (list(quad.values()) + list(linear.values()))))\n scale_linear = {k: float(v) / max_abs_value for k, v in linear.items()}\n scale_quad = {k: float(v) / max_abs_value for k, v in quad.items()}\n sa = dimod.reference.SimulatedAnnealingSampler()\n sa_computation = sa.sample_ising(scale_linear, scale_quad, num_reads=num_reads,\n num_sweeps=num_sweeps, beta_range=beta_range)\n best = np.argmin(sa_computation.record.energy)\n best_solution = list(sa_computation.record.sample[best])\n return dict(zip(sa_computation.variable_labels, best_solution))\n"
] |
[
[
"numpy.argmin"
]
] |
gibiee/random_navigate_agent
|
[
"2b9abb76b3f0821b709310229c7f5159a080620e"
] |
[
"demo.py"
] |
[
"import argparse\nimport os\nimport random\nimport time\nimport numpy as np\nimport pandas as pd\n\ndef agent_move(agent, map_size) :\n available_move = []\n pos_y, pos_x = agent\n if pos_y > 0 : available_move.append('up')\n if pos_y < map_size-1 : available_move.append('down')\n if pos_x > 0 : available_move.append('left')\n if pos_x < map_size-1 : available_move.append('right')\n\n move = random.choice(available_move)\n if move == 'up' : pos_y -= 1\n elif move == 'down' : pos_y += 1\n elif move == 'left' : pos_x -= 1\n elif move == 'right' : pos_x += 1\n\n return (pos_y, pos_x)\n\ndef display(map, map_size, agent, try_count) :\n print(f'{try_count}번째 시도 중...')\n\n pos_y, pos_x = agent\n display_map = pd.DataFrame(np.round(map, 3))\n display_map.iloc[pos_y, pos_x] = '◎'\n display_map.iloc[map_size-1, map_size-1] = '★'\n\n print(display_map)\n\ndef erase_display(sleep) :\n time.sleep(sleep)\n os.system('cls' if os.name=='nt' else 'clear')\n\ndef update_reward(map, history, discount_rate) :\n reward = 1\n history.reverse()\n for hist in history :\n pos_y, pos_x = hist\n reward *= discount_rate\n map[pos_y, pos_x] = max(reward, map[pos_y, pos_x])\n\n return map\n\nif __name__ == \"__main__\" :\n parser = argparse.ArgumentParser(description='set parameters')\n parser.add_argument('--map_size', type=int, default=5, help='size of the map')\n parser.add_argument('--discount_rate', type=float, default=0.9, help='discount rate applied on reward')\n parser.add_argument('--display_sleep', type=float, default=0.1, help='sleep time when display')\n parser.add_argument('--update_sleep', type=float, default=1, help='sleep time when arrive at goal')\n args = parser.parse_args()\n\n map_size = args.map_size\n discount_rate = args.discount_rate\n display_sleep = args.display_sleep\n update_sleep = args.update_sleep\n\n try_count = 1\n map = np.zeros(shape=(map_size, map_size))\n while True :\n agent, history = (0,0), []\n while True : \n display(map, map_size, agent, try_count)\n if agent == (map_size-1, map_size-1) : \n print(f'\\n목적지 도착! steps : {len(history)}')\n print('map에 reward 반영 중...')\n map = update_reward(map, history, discount_rate)\n time.sleep(update_sleep)\n erase_display(display_sleep)\n break\n\n erase_display(display_sleep)\n\n agent = agent_move(agent, map_size)\n history.append(agent)\n\n try_count += 1\n\n\n\n \n\n\n"
] |
[
[
"numpy.round",
"numpy.zeros"
]
] |
mero2online/LAS_Handler
|
[
"07cf4c010f292320327278a97ff25d0101744419"
] |
[
"HandleLithoPercentLAS.py"
] |
[
"import os\nimport shutil\nimport lasio\nimport openpyxl\nimport xlwt\nimport xlrd\nfrom copy2 import copy2\nfrom openpyxl import Workbook\nfrom openpyxl.styles import Alignment, Font, PatternFill\nfrom pandas import DataFrame\nfrom openpyxl.utils.dataframe import dataframe_to_rows\nfrom openpyxl.utils import get_column_letter\n\nfrom my_const import *\nfrom HelperFunc import getFinalWellDate, resource_path, readLocalFile, writeLocalFile\nfrom NewCurvesData import newPerCurves, newPerLithCurves, modPerCurves, newPerCurvesDSG\nfrom GetFunc import convertNULL, GET_LITHO_EMPTY, Get_DSG_Formula, GetDSG_LAS_Header, getNewPerWellDSG, GetDSG_LAS_Header_ColorCode\n\n\ndef gen_litho_Percent_LAS(filename, start_depth):\n las = lasio.read(filename)\n\n wellNameOriginal = las.well.WELL.value\n finalWellName = wellNameOriginal[wellNameOriginal.find(\n '(')+len('('):wellNameOriginal.rfind(')')]\n las.well.WELL = finalWellName\n\n finalWellDate = getFinalWellDate()\n las.well.DATE = finalWellDate\n\n las.well.SRVC = 'EXLOG'\n\n # Remove Unused Curves\n for idx, x in enumerate(las.keys()):\n if (idx == 21 or idx == 29 or idx == 30 or idx == 31 or idx == 32 or idx == 33):\n las.delete_curve(x)\n\n # Convert NULL values and delete curve then append it\n for idx, x in enumerate(las.keys()):\n res = convertNULL(las[x])\n las.delete_curve(x)\n las.append_curve(newPerCurves[idx], res, descr=newPerCurves[idx])\n\n firstRow = ' '.join(las.keys())\n lasFilename = resource_path('draft.las')\n excelFilename = resource_path('draft.xlsx')\n csvFilename = resource_path('draft.txt')\n\n las.write(lasFilename, fmt='%.0f', len_numeric_field=5)\n las.to_excel(excelFilename)\n las.to_csv(csvFilename, units=False, delimiter='\\t')\n csvDraft = readLocalFile(csvFilename)\n csvDraftWitoutDecimal = csvDraft.replace('.0', '')\n writeLocalFile(resource_path(\n f'out\\\\DSG_FOR_GRAVITAS_CONVERTER.txt'), csvDraftWitoutDecimal)\n\n DSG()\n LITHOLOGY()\n\n trimLASandEXCEL(lasFilename, excelFilename, firstRow)\n\n lithology_gravitas_name = f'{finalWellName}_LITHOLOGY_{finalWellDate}_GRAVITAS'\n shutil.copy(resource_path('draft.las'), resource_path(\n f'out\\\\{lithology_gravitas_name}.las'))\n\n lasDraft = readLocalFile(lasFilename)\n lasDraftSplitted = lasDraft.splitlines()\n\n all_rows = []\n for idx, x in enumerate(lasDraftSplitted):\n if idx > 0:\n x = [int(i) for i in x.split()]\n all_rows.append(x)\n\n # load the excel file\n inBook = xlrd.open_workbook(\n resource_path('LITHOLOGY_GRAVITAS.xls'), formatting_info=True, on_demand=True)\n inSheet = inBook .sheet_by_index(0)\n\n # copy the contents of excel file\n outBook, outStyle = copy2(inBook)\n\n # open the first sheet\n w_sheet = outBook.get_sheet(0)\n\n for idx, row in enumerate(all_rows, 1):\n for idc, cell in enumerate(row):\n xf_index = inSheet.cell_xf_index(idx, idc)\n saved_style = outStyle[xf_index]\n w_sheet.write(idx, idc, cell, saved_style)\n\n outBook.get_sheet(0).name = f'{finalWellName}_LITHOLOGY_GRAVITAS'\n # save the file\n outBook.save(resource_path(\n f'out\\\\{lithology_gravitas_name}.xls'))\n\n if (start_depth):\n LITHOLOGY_GRAVITAS_Converted(\n lasFilename, finalWellName, finalWellDate, start_depth)\n\n#\n# LITHOLOGY_GRAVITAS_Converted\n#\n\n\ndef LITHOLOGY_GRAVITAS_Converted(lasFilename, finalWellName, finalWellDate, start_depth):\n lasDraft = readLocalFile(lasFilename)\n lasDraftSplitted = lasDraft.splitlines()\n\n firstNumRow = [int(i) for i in lasDraftSplitted[1].split()]\n filtered = []\n filtered.append(firstNumRow)\n for idx, x in enumerate(lasDraftSplitted):\n lastFiltered = filtered[len(filtered)-1]\n if idx > 0:\n x = [int(i) for i in x.split()]\n if x[1:len(x)-1] != lastFiltered[1:len(lastFiltered)-1]:\n filtered.append(x)\n else:\n filtered.pop()\n filtered.append(x)\n\n filtered.insert(0, [lasDraftSplitted[0].split()])\n\n result = []\n for idx, row in enumerate(filtered):\n if (idx == 0):\n headerRow = row[0]\n # headerRow[1:len(headerRow)] = headerRow[len(headerRow)-1:0:-1]\n else:\n matched = []\n # row[1:len(row)] = row[len(row)-1:0:-1]\n for id, val in enumerate(row):\n if id > 0:\n if val > 0:\n matched.append(val)\n top = getTop(result, row, idx, int(start_depth))\n base = row[0]\n right = val if len(matched) <= 1 else sum(matched)\n left = right - val\n result.append([top, base, left, right, headerRow[id]])\n\n result.insert(0, ['TOP', 'BASE', 'LEFT', 'RIGHT', 'LITHOTYPE'])\n\n finalFileName = f'{finalWellName}_LITHOLOGY_GRAVITAS_{finalWellDate}_Converted'\n wb = Workbook()\n ws1 = wb.active\n\n for row in result:\n ws1.append(row)\n wb.save(resource_path(f'out\\\\{finalFileName}.xlsx'))\n\n df = DataFrame(ws1.values)\n df.to_csv(resource_path(f'out\\\\{finalFileName}.txt'),\n index=False, header=False, sep='\\t')\n #\n # DSG\n #\n\n\ndef DSG():\n las = lasio.read(resource_path('draft.las'))\n\n # Convert all 'SNDSH' values to 0 and Change position of 'CEMENT'\n for idx, x in enumerate(las.keys()):\n if (idx == 18):\n data = [0]*len(las[x])\n las.delete_curve(x)\n las.insert_curve(18, x, data)\n elif (idx == 27):\n data = las[x]\n las.delete_curve(x)\n las.insert_curve(26, x, data)\n\n # Remove Unused Curves\n for idx, x in enumerate(las.keys()):\n if (idx == 9 or idx == 10 or idx == 16 or idx == 25):\n las.delete_curve(x)\n\n lasFilename = resource_path('draft_DSG.las')\n excelFilename = resource_path('draft_DSG.xlsx')\n\n las.to_excel(excelFilename)\n\n # Rename all curves for DSG by delete curve then insert it\n for idx, x in enumerate(las.keys()):\n mnemonic = newPerCurvesDSG[idx]['name']\n data = las[x] if idx <= 1 else las[x]+las[las.keys()[idx-1]]\n unit = 'F' if idx < 1 else '%'\n descr = newPerCurvesDSG[idx]['desc']\n las.delete_curve(x)\n las.insert_curve(idx, mnemonic, data, unit=unit, descr=descr)\n\n workbook = openpyxl.load_workbook(excelFilename)\n wsh = workbook['Curves']\n\n cellAlignment = Alignment(horizontal='center', vertical='center')\n for idx, cell in enumerate(wsh[\"1:1\"]):\n col_letter = get_column_letter(idx+1)\n fg = GetDSG_LAS_Header_ColorCode(idx)\n cell.value = las.keys()[idx]\n wsh.column_dimensions[col_letter].width = len(f\"{cell.value}\") * 1.5\n cell.alignment = cellAlignment\n cell.font = Font(name='Courier New')\n if fg:\n cell.fill = PatternFill(fgColor=fg, fill_type=\"solid\")\n for idx, cell in enumerate(wsh[col_letter]):\n cell.alignment = cellAlignment\n\n workbook.save(resource_path(excelFilename))\n\n las.insert_curve(\n 1, 'DEPTH_ORIG', las['DEPTH'], unit='F', descr='Depth Orig')\n\n las.well['STEP'].descr = 'STEP DEPTH'\n del las.well['PROV']\n\n cwd = os.getcwd()\n text = readLocalFile(f'{cwd}\\LAS_Handler_DSG_Config.csv')\n\n result = []\n\n for line in text.splitlines():\n result.append(line.split(\",\")[1])\n\n reAdd = ['UWI', 'WELL', 'COMP', 'FLD', 'LOC', 'SRVC', 'DATE']\n for x in reAdd:\n result.append(las.well[x].value)\n del las.well[x]\n\n newPerWellDSG = getNewPerWellDSG(result)\n for idx, x in enumerate(newPerWellDSG):\n las.well[newPerWellDSG[idx]['mnemonic']] = lasio.HeaderItem(\n mnemonic=newPerWellDSG[idx]['mnemonic'],\n value=newPerWellDSG[idx]['value'],\n descr=newPerWellDSG[idx]['descr'],\n unit=newPerWellDSG[idx]['unit'],)\n\n las.write(lasFilename, fmt='%.0f', len_numeric_field=5)\n\n txt = readLocalFile(lasFilename)\n startOne = '~Well ------------------------------------------------------'\n endOne = '~Curve Information -----------------------------------------'\n textOneLas = txt[txt.find(startOne)+len(startOne):txt.rfind(endOne)]\n\n startTwo = '~Curve Information -----------------------------------------'\n endTwo = '~Params ----------------------------------------------------'\n textTwoLas = txt[txt.find(startTwo)+len(startTwo):txt.rfind(endTwo)]\n\n finalDSG_Header = GetDSG_LAS_Header(textOneLas, textTwoLas)\n\n firstRow = '~A '+' '.join(las.keys())\n trimLASandEXCEL(lasFilename, excelFilename, firstRow)\n\n workbook = openpyxl.load_workbook(excelFilename)\n ws2Title = f\"{las.well['WELL'].value}_LITHOLOGY-DSG\"\n workbook.create_sheet(title=ws2Title)\n ws = workbook['Curves']\n ws.title = 'original values'\n ws1 = workbook['original values']\n ws2 = workbook[ws2Title]\n\n ws1.freeze_panes = ws1['A2']\n\n df = DataFrame(ws1.values)\n rows = dataframe_to_rows(df, index=False, header=False)\n for r_idx, row in enumerate(rows, 1):\n for c_idx, value in enumerate(row, 1):\n values = Get_DSG_Formula(r_idx)\n ws2.cell(row=r_idx, column=c_idx,\n value=value if r_idx == 1 else values[c_idx-1])\n\n ws2.insert_cols(2, 1)\n for x in range(len(ws1['A'])):\n i = x+1\n if x == 0:\n ws2[f'B{i}'] = 'Depth.org'\n else:\n ws2[f'B{i}'] = f'=A{i}'\n\n for idx, cell in enumerate(ws2[\"1:1\"]):\n col_letter = get_column_letter(idx+1)\n ws2.column_dimensions[col_letter].width = len(f\"{cell.value}\") * 1.5\n cell.alignment = cellAlignment\n cell.font = Font(name='Courier New')\n for idx, cell in enumerate(ws2[col_letter]):\n cell.alignment = cellAlignment\n\n finalFileNameXlsx = f'{las.well.WELL.value}_LITHOLOGY-DSG_GRAVITAS_{las.well.DATE.value}'\n finalFileNameLas = f'{las.well.WELL.value}_LITHOLOGY-DSG_{las.well.DATE.value}'\n # workbook.save(excelFilename)\n workbook.save(resource_path(f'out\\\\{finalFileNameXlsx}.xlsx'))\n\n finalData = readLocalFile(lasFilename)\n finalLAS = f'{finalDSG_Header}{finalData}'\n writeLocalFile(resource_path(f'out\\\\{finalFileNameLas}.las'), finalLAS)\n\n#\n# LITHOLOGY\n#\n\n\ndef LITHOLOGY():\n las = lasio.read(resource_path('draft.las'))\n newLas = lasio.LASFile()\n newLas.add_curve('DEPTH', las['DEPTH'], unit='ft', descr='1 Hole Depth')\n\n for idx, x in enumerate(newPerLithCurves):\n if idx <= 19:\n newLas.add_curve(newPerLithCurves[idx]['name'], las[modPerCurves[idx]],\n unit='%', descr=newPerLithCurves[idx]['desc'])\n else:\n newLas.add_curve(newPerLithCurves[idx]['name'], GET_LITHO_EMPTY(\n las['DEPTH']), unit='%', descr=newPerLithCurves[idx]['desc'])\n\n lasFilename = resource_path('draft_LITHOLOGY.las')\n excelFilename = resource_path('draft_LITHOLOGY.xlsx')\n firstRow = ' '.join(newLas.keys())\n\n newLas.write(lasFilename, fmt='%.0f', len_numeric_field=5)\n newLas.to_excel(excelFilename)\n\n txt = readLocalFile(resource_path('draft.las'))\n startOne = '~Well ------------------------------------------------------'\n endOne = '~Curve Information -----------------------------------------'\n textOneLas = txt[txt.find(startOne)+len(startOne):txt.rfind(endOne)]\n lith = readLocalFile(resource_path('draft_LITHOLOGY.las'))\n startTwo = '~Curve Information -----------------------------------------'\n endTwo = '~Params ----------------------------------------------------'\n textTwoLas = lith[lith.find(startTwo)+len(startTwo):lith.rfind(endTwo)]\n startThree = '~ASCII -----------------------------------------------------'\n textThreeLas = lith[lith.find(startThree)+len(startThree):len(lith)-1]\n finalData = f'{text1}{textOneLas}{text4}{textTwoLas}{text5}{textThreeLas}'\n\n finalFileName = f'{las.well.WELL.value}_LITHOLOGY_{las.well.DATE.value}'\n writeLocalFile(resource_path(f'out\\\\{finalFileName}.las'), finalData)\n\n wb = Workbook()\n ws1 = wb.active\n data = finalData.splitlines()\n for idx, row in enumerate(data):\n if idx <= 55:\n ws1.append([row])\n elif idx == 56:\n one = row.split()\n two = one[2:len(one)]\n two.insert(0, ' '.join([one[0], one[1]]))\n ws1.append(two)\n else:\n ws1.append([int(x) for x in row.split()])\n\n wb.save(resource_path(f'out\\\\{finalFileName}.xlsx'))\n\n trimLASandEXCEL(lasFilename, excelFilename, firstRow)\n\n\n# ########\n# Helper #\n# ########\n\n\ndef trimLASandEXCEL(lasFilename, excelFilename, firstRow):\n workbook = openpyxl.load_workbook(excelFilename)\n std = workbook.get_sheet_by_name('Header')\n workbook.remove_sheet(std)\n workbook.save(excelFilename)\n\n txt = readLocalFile(lasFilename)\n\n start = '~ASCII -----------------------------------------------------'\n data = txt[txt.find(start)+len(start):len(txt)-1]\n finalData = f'{firstRow}{data}'\n\n writeLocalFile(lasFilename, finalData)\n\n\ndef getTop(result, row, idx, start_depth):\n if idx == 1:\n return start_depth\n if (len(result) > 0 and ((row[0] - result[len(result)-1][1]) > 10)):\n return result[len(result)-1][1]\n elif (len(result) > 0 and result[len(result)-1][1]-result[len(result)-1][0] > 10) and row[0] == result[len(result)-1][1]:\n return result[len(result)-1][0]\n else:\n return row[0] - 10\n"
] |
[
[
"pandas.DataFrame"
]
] |
ReCAP-UTR/NLP
|
[
"792758a1e952c1b183644bf8d87c6eed2a048339"
] |
[
"nlp_service/client.py"
] |
[
"from __future__ import annotations\n\nimport typing as t\n\nimport numpy as np\nimport spacy\nfrom arg_services.base.v1 import base_pb2\nfrom arg_services.nlp.v1 import nlp_pb2\nfrom spacy.language import Language\nfrom spacy.tokens import Doc, DocBin, Span, Token # type: ignore\n\nfrom nlp_service import similarity\n\nDoc.set_extension(\"vector\", default=None)\nSpan.set_extension(\"vector\", default=None)\nToken.set_extension(\"vector\", default=None)\n\n\ndef blank(language: str, similarity_method: int = 0) -> Language:\n spacy_lang = spacy.blank(language)\n inject_pipes(spacy_lang, similarity_method)\n\n return spacy_lang\n\n\ndef docbin2docs(\n docbin_bytes: bytes, language: t.Union[str, Language], similarity_method: int = 0\n) -> t.Tuple[Doc, ...]:\n if isinstance(language, str):\n language = blank(language, similarity_method)\n\n docbin = DocBin().from_bytes(docbin_bytes)\n\n return tuple(docbin.get_docs(language.vocab))\n\n\ndef list2array(values: t.Iterable[float]) -> np.ndarray:\n return np.array(values)\n\n\ndef inject_vectors(\n doc: Doc,\n res: nlp_pb2.VectorResponse,\n) -> None:\n if res.document:\n doc._.set(\"vector\", list2array(res.document.vector))\n\n if res.sentences:\n for sent, sent_res in zip(doc.sents, res.sentences):\n sent._.set(\"vector\", list2array(sent_res.vector))\n\n if res.tokens:\n for token, token_res in zip(doc, res.tokens):\n token._.set(\"vector\", list2array(token_res.vector))\n\n\ndef inject_pipes(nlp: spacy.Language, similarity_method: int = 0) -> None:\n nlp.add_pipe(\"vector\", last=True)\n nlp.add_pipe(\"similarity\", last=True, config={\"method\": similarity_method})\n\n\[email protected](\"vector\")\ndef _vector_component(doc):\n func = lambda x: x._.vector\n\n doc.user_hooks[\"vector\"] = func\n doc.user_span_hooks[\"vector\"] = func\n doc.user_token_hooks[\"vector\"] = func\n\n return doc\n\n\[email protected](\"similarity\")\nclass SimilarityFactory:\n def __init__(self, nlp, name, method):\n if method:\n self.func = similarity.mapping[method]\n\n def __call__(self, doc):\n if self.func:\n doc.user_hooks[\"similarity\"] = self.func\n doc.user_span_hooks[\"similarity\"] = self.func\n doc.user_token_hooks[\"similarity\"] = self.func\n\n return doc\n"
] |
[
[
"numpy.array"
]
] |
NajwaLaabid/kalman-jax
|
[
"7cd4d83f9c5a22008d2c565deefe3fa7ffd2005d"
] |
[
"kalmanjax/experiments/timings/results.py"
] |
[
"import pickle\nimport numpy as np\n\ntask_list = ['heteroscedastic', 'coal', 'banana', 'binary', 'audio', 'aircraft', 'rainforest']\n\nmethod_timings = np.zeros([10, 6])\nfor method in range(10):\n for task_num in range(6):\n task = task_list[task_num]\n if (task_num == 4) and method in [4, 5, 7, 9]:\n method_timings[method, task_num] = np.nan\n else:\n with open(\"output/\" + str(task) + \"_\" + str(method) + \".txt\", \"rb\") as fp:\n result = pickle.load(fp)\n # print(result)\n method_timings[method, task_num] = result\n\n# for fold in range(10):\n# with open(\"output/\" + str(15) + \"_\" + str(fold) + \"_nlpd.txt\", \"rb\") as fp:\n# print(pickle.load(fp))\n\nnp.set_printoptions(precision=3)\nprint(method_timings[:, :-1])\n# print(np.nanmean(method_nlpd, axis=1))\n# np.set_printoptions(precision=2)\n# print(np.std(method_nlpd, axis=1))\n# print(np.nanstd(method_nlpd, axis=1))\n"
] |
[
[
"numpy.set_printoptions",
"numpy.zeros"
]
] |
mourisl/DeepCAT
|
[
"67881c0961dc4336494c4000fc8be1680548b99a"
] |
[
"PrepareAdaptiveFile.py"
] |
[
"#! usr/bin/python\r\n\r\nimport os\r\nfrom os.path import exists\r\nimport numpy as np\r\nimport csv\r\nfrom csv import reader\r\nimport sys\r\n\r\nindir=sys.argv[1]\r\noutdir=sys.argv[2]\r\nthr=10000\r\n\r\n#def PrepareAdaptiveFile(indir,outdir,thr=10000):\r\nffs=os.listdir(indir)\r\nfor ff in ffs:\r\n if('.tsv' not in ff): \r\n continue\r\n ff0=ff\r\n if not os.path.exists(outdir):\r\n os.makedirs(outdir) \r\n str1='TestReal-' \r\n newff=outdir+'/'+str1+ff0\r\n# if exists(newff)==False:\r\n# continue\r\n csv_reader = reader(open(indir+'/'+ff,\"r\"), delimiter='\\t', quotechar=\"\\\"\") \r\n ddnew=[] \r\n for row in csv_reader:\r\n if '*' not in row[1]:\r\n if 'X' not in row[1]:\r\n# if '^C.+F$' not in row[1]:\r\n if (len(row[1])>=10) and (len(row[1])<=24):\r\n if 'unresolved' not in row[5]:\r\n if (row[1][0]=='C') and (row[1][-1]=='F'):\r\n ddnew.append(row)\r\n ddnew=np.array(ddnew) \r\n sorted_array = ddnew[ddnew[:,3].astype(float).argsort()] \r\n reverse_array = sorted_array[::-1]\r\n if len(reverse_array)>thr:\r\n col1=reverse_array[0:thr,1]\r\n col2=reverse_array[0:thr,5]\r\n col3=reverse_array[0:thr,3]\r\n else:\r\n col1=reverse_array[:,1]\r\n col2=reverse_array[:,5]\r\n col3=reverse_array[:,3]\r\n c=zip(col1,col2,col3)\r\n first_row='amino_acid\tv_gene\tfrequency'\r\n f=open(newff, 'w')\r\n f.write(first_row)\r\n f.write('\\n')\r\n f.close()\r\n with open(newff, 'w') as f:\r\n writer = csv.writer(f, delimiter='\\t')\r\n writer.writerows(c)\r\n"
] |
[
[
"numpy.array"
]
] |
Hugo-L3174/talos-torque-control
|
[
"14faafcc06c93b57c972e92c1684b006667ff32e"
] |
[
"python/dynamic_graph/sot/torque_control/talos/motors_parameters_symmetric_id.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 9 13:55:16 2015\n\n@author: adelpret\n\"\"\"\nfrom numpy import zeros as zeros\n\nNJ = 32;\nk_tau = zeros(NJ);\nk_v = zeros(NJ);\n\n# PARAMETERS OF R_hip_y JOINT 0\nk_v[0] = 0.017585\nk_tau[0] = 0.000355\n# PARAMETERS OF R_hip_r JOINT 1\nk_v[1] = 0.006573\nk_tau[1] = 0.000036\n# PARAMETERS OF R_hip_p JOINT 2\nk_v[2] = 0.008817\nk_tau[2] = 0.000109\n# PARAMETERS OF R_knee JOINT 3\nk_v[3] = 0.006774\nk_tau[3] = 0.000060\n# PARAMETERS OF R_ankle pitch JOINT 4\nk_v[4] = 0.008107\nk_tau[4] = 0.000226\n# PARAMETERS OF R_ankle roll JOINT 5\nk_v[5] = 0.007444\nk_tau[5] = 0.000273\n"
] |
[
[
"numpy.zeros"
]
] |
JackFram/Neural-Flow
|
[
"83cea7aa933fa9650b42271ba4205208814d047b"
] |
[
"run_translation_czy.py"
] |
[
"#!/usr/bin/env python\n# coding=utf-8\n# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for sequence to sequence.\n\"\"\"\n# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.\nfrom unittest import result\nfrom solver import OneShotHessianSolver\nfrom opt import BertQuantizeOp, SPruningOp, PruningOp, LowRankOp\n# from misc.translation import get_translation_FIM\n\nimport logging\nimport os\nimport sys\nimport copy\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nimport datasets\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom datasets import load_dataset, load_metric\nfrom utils import get_score\n\nimport transformers\nfrom transformers import (\n AutoConfig,\n AutoModelForSeq2SeqLM,\n AutoTokenizer,\n DataCollatorForSeq2Seq,\n HfArgumentParser,\n M2M100Tokenizer,\n MBart50Tokenizer,\n MBart50TokenizerFast,\n MBartTokenizer,\n MBartTokenizerFast,\n Seq2SeqTrainer,\n Seq2SeqTrainingArguments,\n default_data_collator,\n set_seed,\n)\nfrom transformers.trainer_utils import get_last_checkpoint\nfrom transformers.utils import check_min_version\nfrom transformers.utils.versions import require_version\n\n\n\n\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\ncheck_min_version(\"4.15.0.dev0\")\n\nrequire_version(\"datasets>=1.8.0\", \"To fix: pip install -r examples/pytorch/translation/requirements.txt\")\n\nlogger = logging.getLogger(__name__)\n\n# A list of all multilingual tokenizer which require src_lang and tgt_lang attributes.\nMULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast, M2M100Tokenizer]\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n default=\"Helsinki-NLP/opus-mt-en-ro\",\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Where to store the pretrained models downloaded from huggingface.co\"},\n )\n use_fast_tokenizer: bool = field(\n default=True,\n metadata={\"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"},\n )\n model_revision: str = field(\n default=\"main\",\n metadata={\"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"},\n )\n use_auth_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Will use the token generated when running `transformers-cli login` (necessary to use this script \"\n \"with private models).\"\n },\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n source_lang: str = field(default=\"en\", metadata={\"help\": \"Source language id for translation.\"})\n target_lang: str = field(default=\"ro\", metadata={\"help\": \"Target language id for translation.\"})\n\n dataset_name: Optional[str] = field(\n default=\"wmt16\", metadata={\"help\": \"The name of the dataset to use (via the datasets library).\"}\n )\n dataset_config_name: Optional[str] = field(\n default=\"ro-en\", metadata={\"help\": \"The configuration name of the dataset to use (via the datasets library).\"}\n )\n train_file: Optional[str] = field(default=None, metadata={\"help\": \"The input training data file (a jsonlines).\"})\n validation_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"An optional input evaluation data file to evaluate the metrics (sacreblue) on \"\n \"a jsonlines file.\"\n },\n )\n test_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"An optional input test data file to evaluate the metrics (sacreblue) on \" \"a jsonlines file.\"\n },\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\"help\": \"The number of processes to use for the preprocessing.\"},\n )\n max_source_length: Optional[int] = field(\n default=512,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n max_target_length: Optional[int] = field(\n default=128,\n metadata={\n \"help\": \"The maximum total sequence length for target text after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n val_max_target_length: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"The maximum total sequence length for validation target text after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.\"\n \"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used \"\n \"during ``evaluate`` and ``predict``.\"\n },\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to pad all samples to model maximum sentence length. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch. More \"\n \"efficient on GPU but very bad for TPU.\"\n },\n )\n max_train_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\"\n },\n )\n max_eval_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of evaluation examples to this \"\n \"value if set.\"\n },\n )\n max_predict_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of prediction examples to this \"\n \"value if set.\"\n },\n )\n num_beams: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Number of beams to use for evaluation. This argument will be passed to ``model.generate``, \"\n \"which is used during ``evaluate`` and ``predict``.\"\n },\n )\n ignore_pad_token_for_loss: bool = field(\n default=True,\n metadata={\n \"help\": \"Whether to ignore the tokens corresponding to padded labels in the loss computation or not.\"\n },\n )\n source_prefix: Optional[str] = field(\n default=None, metadata={\"help\": \"A prefix to add before every source text (useful for T5 models).\"}\n )\n forced_bos_token: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The token to force as the first generated token after the :obj:`decoder_start_token_id`.\"\n \"Useful for multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token \"\n \"needs to be the target language token.(Usually it is the target language token)\"\n },\n )\n\n def __post_init__(self):\n if self.dataset_name is None and self.train_file is None and self.validation_file is None:\n raise ValueError(\"Need either a dataset name or a training/validation file.\")\n elif self.source_lang is None or self.target_lang is None:\n raise ValueError(\"Need to specify the source language and the target language.\")\n\n # accepting both json and jsonl file extensions, as\n # many jsonlines files actually have a .json extension\n valid_extensions = [\"json\", \"jsonl\"]\n\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension in valid_extensions, \"`train_file` should be a jsonlines file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension in valid_extensions, \"`validation_file` should be a jsonlines file.\"\n if self.val_max_target_length is None:\n self.val_max_target_length = self.max_target_length\n\n\ndef train_wmt16(solver, model, raw_datasets, model_args, data_args, training_args, mode=\"train\"):\n configs = solver.configs\n tokenizer = solver.tokenizer\n logger = solver.logger\n\n if mode == \"train\":\n training_args.do_eval = False\n training_args.do_train = True\n elif mode == \"eval\":\n training_args.do_train = False\n training_args.do_eval = True\n\n # Set decoder_start_token_id\n if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):\n if isinstance(tokenizer, MBartTokenizer):\n model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]\n else:\n model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)\n\n if model.config.decoder_start_token_id is None:\n raise ValueError(\"Make sure that `config.decoder_start_token_id` is correctly defined\")\n\n prefix = data_args.source_prefix if data_args.source_prefix is not None else \"\"\n\n # Preprocessing the datasets.\n # We need to tokenize inputs and targets.\n if training_args.do_train:\n column_names = raw_datasets[\"train\"].column_names\n elif training_args.do_eval:\n column_names = raw_datasets[\"validation\"].column_names\n elif training_args.do_predict:\n column_names = raw_datasets[\"test\"].column_names\n else:\n logger.info(\"There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.\")\n return\n\n # For translation we set the codes of our source and target languages (only useful for mBART, the others will\n # ignore those attributes).\n if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)):\n assert data_args.target_lang is not None and data_args.source_lang is not None, (\n f\"{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --source_lang and \"\n \"--target_lang arguments.\"\n )\n\n tokenizer.src_lang = data_args.source_lang\n tokenizer.tgt_lang = data_args.target_lang\n\n # For multilingual translation models like mBART-50 and M2M100 we need to force the target language token\n # as the first generated token. We ask the user to explicitly provide this as --forced_bos_token argument.\n forced_bos_token_id = (\n tokenizer.lang_code_to_id[data_args.forced_bos_token] if data_args.forced_bos_token is not None else None\n )\n model.config.forced_bos_token_id = forced_bos_token_id\n\n # Get the language codes for input/target.\n source_lang = data_args.source_lang.split(\"_\")[0]\n target_lang = data_args.target_lang.split(\"_\")[0]\n\n # Temporarily set max_target_length for training.\n max_target_length = data_args.max_target_length\n padding = \"max_length\" if data_args.pad_to_max_length else False\n\n if training_args.label_smoothing_factor > 0 and not hasattr(model, \"prepare_decoder_input_ids_from_labels\"):\n logger.warning(\n \"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for\"\n f\"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory\"\n )\n\n def preprocess_function(examples):\n inputs = [ex[source_lang] for ex in examples[\"translation\"]]\n targets = [ex[target_lang] for ex in examples[\"translation\"]]\n inputs = [prefix + inp for inp in inputs]\n model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)\n\n # Setup the tokenizer for targets\n with tokenizer.as_target_tokenizer():\n labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)\n\n # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore\n # padding in the loss.\n if padding == \"max_length\" and data_args.ignore_pad_token_for_loss:\n labels[\"input_ids\"] = [\n [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels[\"input_ids\"]\n ]\n\n model_inputs[\"labels\"] = labels[\"input_ids\"]\n return model_inputs\n\n if training_args.do_train:\n if \"train\" not in raw_datasets:\n raise ValueError(\"--do_train requires a train dataset\")\n train_dataset = raw_datasets[\"train\"]\n if data_args.max_train_samples is not None:\n train_dataset = train_dataset.select(range(data_args.max_train_samples))\n with training_args.main_process_first(desc=\"train dataset map pre-processing\"):\n train_dataset = train_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not data_args.overwrite_cache,\n desc=\"Running tokenizer on train dataset\",\n )\n\n if training_args.do_eval:\n max_target_length = data_args.val_max_target_length\n if \"validation\" not in raw_datasets:\n raise ValueError(\"--do_eval requires a validation dataset\")\n eval_dataset = raw_datasets[\"validation\"]\n if data_args.max_eval_samples is not None:\n eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))\n with training_args.main_process_first(desc=\"validation dataset map pre-processing\"):\n eval_dataset = eval_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not data_args.overwrite_cache,\n desc=\"Running tokenizer on validation dataset\",\n )\n\n if training_args.do_predict:\n max_target_length = data_args.val_max_target_length\n if \"test\" not in raw_datasets:\n raise ValueError(\"--do_predict requires a test dataset\")\n predict_dataset = raw_datasets[\"test\"]\n if data_args.max_predict_samples is not None:\n predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))\n with training_args.main_process_first(desc=\"prediction dataset map pre-processing\"):\n predict_dataset = predict_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not data_args.overwrite_cache,\n desc=\"Running tokenizer on prediction dataset\",\n )\n\n # Data collator\n label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id\n if data_args.pad_to_max_length:\n data_collator = default_data_collator\n else:\n data_collator = DataCollatorForSeq2Seq(\n tokenizer,\n model=model,\n label_pad_token_id=label_pad_token_id,\n pad_to_multiple_of=8 if training_args.fp16 else None,\n )\n\n # Metric\n metric = load_metric(\"sacrebleu\")\n\n def postprocess_text(preds, labels):\n preds = [pred.strip() for pred in preds]\n labels = [[label.strip()] for label in labels]\n\n return preds, labels\n\n def compute_metrics(eval_preds):\n preds, labels = eval_preds\n if isinstance(preds, tuple):\n preds = preds[0]\n decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)\n if data_args.ignore_pad_token_for_loss:\n # Replace -100 in the labels as we can't decode them.\n labels = np.where(labels != -100, labels, tokenizer.pad_token_id)\n decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n\n # Some simple post-processing\n decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)\n\n result = metric.compute(predictions=decoded_preds, references=decoded_labels)\n result = {\"bleu\": result[\"score\"]}\n\n prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]\n result[\"gen_len\"] = np.mean(prediction_lens)\n result = {k: round(v, 4) for k, v in result.items()}\n return result\n\n # Initialize our Trainer\n\n # op = PruningOp(model, amount=0.4)\n # model, diff, storage_save = op.apply(name_list=op.operatable[:3], verbose=False, with_profile=True)\n trainer = Seq2SeqTrainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset if training_args.do_train else None,\n eval_dataset=eval_dataset if training_args.do_eval else None,\n tokenizer=tokenizer,\n data_collator=data_collator,\n compute_metrics=compute_metrics if training_args.predict_with_generate else None,\n )\n\n # Training\n if training_args.do_train:\n checkpoint = None\n # if training_args.resume_from_checkpoint is not None:\n # checkpoint = training_args.resume_from_checkpoint\n # elif last_checkpoint is not None:\n # checkpoint = last_checkpoint\n train_result = trainer.train(resume_from_checkpoint=checkpoint)\n trainer.save_model() # Saves the tokenizer too for easy upload\n\n metrics = train_result.metrics\n max_train_samples = (\n data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)\n )\n metrics[\"train_samples\"] = min(max_train_samples, len(train_dataset))\n\n trainer.log_metrics(\"train\", metrics)\n trainer.save_metrics(\"train\", metrics)\n trainer.save_state()\n\n # Evaluation\n results = {}\n max_length = (\n training_args.generation_max_length\n if training_args.generation_max_length is not None\n else data_args.val_max_target_length\n )\n num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix=\"eval\")\n max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)\n metrics[\"eval_samples\"] = min(max_eval_samples, len(eval_dataset))\n results[\"eval_loss\"] = metrics[\"eval_loss\"]\n\n trainer.log_metrics(\"eval\", metrics)\n trainer.save_metrics(\"eval\", metrics)\n\n if training_args.do_predict:\n logger.info(\"*** Predict ***\")\n\n predict_results = trainer.predict(\n predict_dataset, metric_key_prefix=\"predict\", max_length=max_length, num_beams=num_beams\n )\n metrics = predict_results.metrics\n max_predict_samples = (\n data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)\n )\n metrics[\"predict_samples\"] = min(max_predict_samples, len(predict_dataset))\n\n trainer.log_metrics(\"predict\", metrics)\n trainer.save_metrics(\"predict\", metrics)\n\n if trainer.is_world_process_zero():\n if training_args.predict_with_generate:\n predictions = tokenizer.batch_decode(\n predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True\n )\n predictions = [pred.strip() for pred in predictions]\n output_prediction_file = os.path.join(training_args.output_dir, \"generated_predictions.txt\")\n with open(output_prediction_file, \"w\", encoding=\"utf-8\") as writer:\n writer.write(\"\\n\".join(predictions))\n\n kwargs = {\"finetuned_from\": model_args.model_name_or_path, \"tasks\": \"translation\"}\n if data_args.dataset_name is not None:\n kwargs[\"dataset_tags\"] = data_args.dataset_name\n if data_args.dataset_config_name is not None:\n kwargs[\"dataset_args\"] = data_args.dataset_config_name\n kwargs[\"dataset\"] = f\"{data_args.dataset_name} {data_args.dataset_config_name}\"\n else:\n kwargs[\"dataset\"] = data_args.dataset_name\n\n languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None]\n if len(languages) > 0:\n kwargs[\"language\"] = languages\n\n if training_args.push_to_hub:\n trainer.push_to_hub(**kwargs)\n else:\n trainer.create_model_card(**kwargs)\n\n return results\n\n\ndef evaluate_wmt_solver(solver, get_solution_func, model_orig, raw_datasets, model_args, data_args, training_args, **kwargs):\n loss = []\n for storage_thresold in np.arange(solver.model_size, 0, -solver.model_size/20):\n\n model = copy.deepcopy(model_orig)\n\n print(\"Getting results for storage threshold {}\".format(storage_thresold))\n if 'methods' in kwargs:\n solution = get_solution_func(storage_thresold, methods=kwargs['methods'])\n else:\n solution = get_solution_func(storage_thresold)\n # print(f\"solution: {solution}\")\n if solution is not None:\n quantize_list = []\n for layer in solution:\n for name in layer.split(\"+\"):\n layer_name, op_name, attrs = name.split(\"@\")\n if op_name == \"upruning\":\n op = PruningOp(model)\n model = op.apply([layer_name], amount=float(attrs))\n elif op_name == \"quantize\" and attrs != \"none\":\n quantize_list.append(layer_name)\n elif op_name == \"lowrank\":\n op = LowRankOp(model)\n model = op.apply([layer_name], rank_fraction=(float(attrs)))\n elif op_name == \"spruning\":\n op = SPruningOp(model)\n model = op.apply([layer_name], amount=float(attrs))\n # os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n model.to(\"cuda\")\n # configs.device = \"gpu\"\n training_args.save_strategy = \"no\"\n training_args.max_steps = 3000\n training_args.no_cuda = False\n results = train_wmt16(\n solver=solver,\n model=model,\n raw_datasets=raw_datasets,\n model_args=model_args,\n data_args=data_args,\n training_args=training_args,\n mode=\"train\"\n )\n # model.eval()\n # model.to(\"cpu\")\n # configs.device = \"cpu\"\n if len(quantize_list) > 0:\n # os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n training_args.no_cuda = True\n model.to(\"cpu\")\n op = BertQuantizeOp(model)\n op.set_config()\n mod_model = op.apply(name_list=quantize_list, verbose=False)\n else:\n mod_model = model\n # training_args.no_cuda = True\n results = train_wmt16(\n solver=solver,\n model=mod_model,\n raw_datasets=raw_datasets,\n model_args=model_args,\n data_args=data_args,\n training_args=training_args,\n mode=\"eval\"\n )\n loss.append(results[\"eval_loss\"])\n return loss\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n # Preset training args\n # training_args.output_dir = \"tst-translation\"\n\n training_args.per_device_train_batch_size = 4\n training_args.per_device_eval_batch_size = 4\n training_args.save_total_limit = 10\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout)],\n )\n\n log_level = training_args.get_process_log_level()\n logger.setLevel(log_level)\n datasets.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n\n # Log on each process the small summary:\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\n + f\"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}\"\n )\n logger.info(f\"Training/evaluation parameters {training_args}\")\n\n if data_args.source_prefix is None and model_args.model_name_or_path in [\n \"t5-small\",\n \"t5-base\",\n \"t5-large\",\n \"t5-3b\",\n \"t5-11b\",\n ]:\n logger.warning(\n \"You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with \"\n \"`--source_prefix 'translate English to German: ' `\"\n )\n\n # Detecting last checkpoint.\n last_checkpoint = None\n if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:\n last_checkpoint = get_last_checkpoint(training_args.output_dir)\n if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. \"\n \"Use --overwrite_output_dir to overcome.\"\n )\n elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:\n logger.info(\n f\"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change \"\n \"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\n )\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # Get the datasets: you can either provide your own JSON training and evaluation files (see below)\n # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n # (the dataset will be downloaded automatically from the datasets Hub).\n #\n # For translation, only JSON files are supported, with one field named \"translation\" containing two keys for the\n # source and target languages (unless you adapt what follows).\n #\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if data_args.dataset_name is not None:\n # Downloading and loading a dataset from the hub.\n raw_datasets = load_dataset(\n data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir\n )\n else:\n data_files = {}\n if data_args.train_file is not None:\n data_files[\"train\"] = data_args.train_file\n extension = data_args.train_file.split(\".\")[-1]\n if data_args.validation_file is not None:\n data_files[\"validation\"] = data_args.validation_file\n extension = data_args.validation_file.split(\".\")[-1]\n if data_args.test_file is not None:\n data_files[\"test\"] = data_args.test_file\n extension = data_args.test_file.split(\".\")[-1]\n raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)\n # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n config = AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n # tokenizer = AutoTokenizer.from_pretrained(\n # model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n # cache_dir=model_args.cache_dir,\n # use_fast=model_args.use_fast_tokenizer,\n # revision=model_args.model_revision,\n # use_auth_token=True if model_args.use_auth_token else None,\n # )\n # model = AutoModelForSeq2SeqLM.from_pretrained(\n # model_args.model_name_or_path,\n # from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n # config=config,\n # cache_dir=model_args.cache_dir,\n # revision=model_args.model_revision,\n # use_auth_token=True if model_args.use_auth_token else None,\n # )\n\n tokenizer = AutoTokenizer.from_pretrained(\n \"t5-small—translation/checkpoint-457500\"\n )\n\n model = AutoModelForSeq2SeqLM.from_pretrained(\n \"t5-small—translation/checkpoint-457500\"\n )\n\n model.resize_token_embeddings(len(tokenizer))\n\n # FIM = get_translation_FIM(None, model, tokenizer, op.operatable[0], logger)\n Ops = [BertQuantizeOp, PruningOp, LowRankOp, SPruningOp]\n hession_solver = OneShotHessianSolver(model.eval(), Ops, config, tokenizer, logger, task_name=\"t5-small-wmt16-czy-full\")\n # loss = evaluate_wmt_solver(\n # solver=hession_solver,\n # get_solution_func=hession_solver.get_zzh_solution,\n # model_orig=model,\n # raw_datasets=raw_datasets,\n # model_args=model_args,\n # data_args=data_args,\n # training_args=training_args\n # )\n # lq_loss = evaluate_wmt_solver(\n # solver=hession_solver,\n # get_solution_func=hession_solver.get_filtered_solution,\n # model_orig=model,\n # raw_datasets=raw_datasets,\n # model_args=model_args,\n # data_args=data_args,\n # training_args=training_args,\n # methods={'quantize', 'lowrank'}\n # )\n \n # lq_loss = [1.429, 1.4328, 1.4328, 1.4328, 1.4328, 1.5205, 1.5205, 1.5205, 1.5205, 1.5205, 1.5205, 1.5205, 1.5205, 1.5205, 1.5205, 1.7743, 1.9957, 2.7768, 3.8524, 5.9477]\n \n # l_loss = evaluate_wmt_solver(\n # solver=hession_solver,\n # get_solution_func=hession_solver.get_filtered_solution,\n # model_orig=model,\n # raw_datasets=raw_datasets,\n # model_args=model_args,\n # data_args=data_args,\n # training_args=training_args,\n # methods={\"lowrank\"}\n # )\n\n # l_loss = [1.429, 1.5587, 1.5944, 1.6955, 1.7695, 1.8979, 2.0674, 2.2706, 2.547, 2.8398, 3.178, 3.6058, 3.6638, 4.1246, 4.7156, 5.3854, 5.8748, 6.1862, 6.3674]\n\n # d_loss = evaluate_wmt_solver(\n # solver=hession_solver,\n # get_solution_func=hession_solver.get_filtered_solution,\n # model_orig=model,\n # raw_datasets=raw_datasets,\n # model_args=model_args,\n # data_args=data_args,\n # training_args=training_args,\n # methods={\"spruning\"}\n # )\n # pq_loss = evaluate_wmt_solver(\n # solver=hession_solver,\n # get_solution_func=hession_solver.get_filtered_solution,\n # model_orig=model,\n # raw_datasets=raw_datasets,\n # model_args=model_args,\n # data_args=data_args,\n # training_args=training_args,\n # methods={\"upruning\", \"quantize\"}\n # )\n pqdl_loss = evaluate_wmt_solver(\n solver=hession_solver,\n get_solution_func=hession_solver.get_filtered_solution,\n model_orig=model,\n raw_datasets=raw_datasets,\n model_args=model_args,\n data_args=data_args,\n training_args=training_args,\n methods={\"spruning\", \"upruning\", \"quantize\", \"lowrank\"}\n )\n # q_loss = evaluate_wmt_solver(\n # solver=hession_solver,\n # get_solution_func=hession_solver.get_quantize_solution,\n # model_orig=model,\n # raw_datasets=raw_datasets,\n # model_args=model_args,\n # data_args=data_args,\n # training_args=training_args\n # )\n # p_loss = evaluate_wmt_solver(\n # solver=hession_solver,\n # get_solution_func=hession_solver.get_pruning_solution,\n # model_orig=model,\n # raw_datasets=raw_datasets,\n # model_args=model_args,\n # data_args=data_args,\n # training_args=training_args\n # )\n # r_loss = evaluate_wmt_solver(\n # solver=hession_solver,\n # get_solution_func=hession_solver.get_random_solution,\n # model_orig=model,\n # raw_datasets=raw_datasets,\n # model_args=model_args,\n # data_args=data_args,\n # training_args=training_args\n # )\n # s_loss = evaluate_wmt_solver(\n # solver=hession_solver,\n # get_solution_func=hession_solver.get_max_storage_solution,\n # model_orig=model,\n # raw_datasets=raw_datasets,\n # model_args=model_args,\n # data_args=data_args,\n # training_args=training_args\n # )\n\n # data = np.load(\"./results/data/t5-small-wmt16_all.npz\")\n # q_loss = data[\"q_loss\"]\n # p_loss = data[\"p_loss\"]\n # pq_loss = data[\"pq_loss\"]\n # r_loss = data[\"r_loss\"]\n # s_loss = data[\"s_loss\"]\n # lq_loss = data[\"lq_loss\"]\n # l_loss = data[\"l_loss\"]\n # d_loss = data[\"d_loss\"]\n np.savez(\"./results/data/t5-small-wmt16_all.npz\", q_loss=q_loss, p_loss=p_loss, pqdl_loss=pqdl_loss, r_loss=r_loss, s_loss=s_loss, lq_loss=lq_loss, l_loss=l_loss, d_loss=d_loss, pq_loss=pq_loss)\n\n quant_range = np.arange(hession_solver.model_size, 0, -hession_solver.model_size / 20)[:len(q_loss)]\n l_range = np.arange(hession_solver.model_size, 0, -hession_solver.model_size / 20)[:len(l_loss)]\n oshs_range = np.arange(hession_solver.model_size, 0, -hession_solver.model_size / 20)\n plt.plot(quant_range, q_loss, label=\"q_loss\")\n plt.plot(oshs_range, p_loss, label=\"p_loss\")\n plt.plot(oshs_range, lq_loss, label=\"lq_loss\")\n plt.plot(oshs_range, pq_loss, label=\"pq_loss\")\n plt.plot(oshs_range, pqdl_loss, label=\"pqdl_loss\")\n plt.plot(l_range, l_loss, label=\"l_loss\")\n plt.plot(oshs_range, d_loss, label=\"d_loss\")\n plt.plot(oshs_range, r_loss, label=\"random_loss\")\n plt.plot(oshs_range, s_loss, label=\"max_storage_loss\")\n plt.legend()\n\n plt.savefig(\"./results/t5-small-wmt16-all.pdf\", bbox_inches=\"tight\", dpi=500)\n\n # print(f\"Score: oshs_loss:{get_score(loss)}, q_loss:{get_score(q_loss)}, p_loss:{get_score(p_loss)}, r_loss:{get_score(r_loss)}, s_loss:{get_score(s_loss)}.\")\n # print(f\"Score: oshs_loss:{get_score(loss)}.\")\n\nif __name__ == \"__main__\":\n main()"
] |
[
[
"numpy.savez",
"numpy.arange",
"numpy.mean",
"numpy.count_nonzero",
"matplotlib.pylab.plot",
"matplotlib.pylab.legend",
"matplotlib.pylab.savefig",
"numpy.where"
]
] |
TaroSunagawa/oculomotor
|
[
"99ea16b2fcc5e99c833b506421337b6c36fbc0cd"
] |
[
"application/agent/__init__.py"
] |
[
"import numpy as np\nimport brica\n\n# Limit of horizontal and vertical angles at one environment step.\nACTION_RATE = 0.02\n\n\nclass Environment:\n def __init__(self):\n self._image = None\n self._angle = (0.0, 0.0)\n # add phase\n #self._phase = 'True'\n self._reward = 0.0\n self._done = False\n self._action = None\n self.timing = brica.Timing(0, 1, 0)\n\n def __call__(self, inputs):\n # Action from SC module\n sc_action = inputs['from_sc']\n\n # Action from Cerebellum module\n cb_action = inputs['from_cb']\n \n if sc_action is not None:\n # If there is an action from SC module (supposed to be a succade eye motion),\n # choose it.\n self._action = np.clip(sc_action, -1.0, 1.0) * ACTION_RATE\n elif cb_action is not None:\n # If there is no action from SC module, choose action from Cerebellum module\n # (supposed to be a smooth pursuit eye motion)\n self._action = np.clip(cb_action, -1.0, 1.0) * ACTION_RATE\n\n # Final action values (horizontal and vertical relative angles) are between\n # -ACTION_RATE ~ ACTION_RATE.\n '''\n return dict(to_retina=(self._image, self._angle),\n to_bg=(self._reward, self._done))\n '''\n '''\n return dict(to_retina=(self._image, self._angle, self._phase),\n to_bg=(self._reward, self._done))\n '''\n return dict(to_retina=(self._image, self._angle),\n to_bg=(self._reward, self._done))\n\n # add phase\n def set(self, image, angle, reward, done):\n #def set(self, image, angle, phase, reward, done):\n self._image = image\n self._angle = angle\n # add phase\n #self._phase = phase\n self._reward = reward\n self._done = done\n\n @property\n def action(self):\n if self._action is None:\n return np.array([0.0, 0.0], dtype=np.float32)\n return self._action\n\n\nclass Agent(object):\n connections = [\n ('environment', 'retina'), # offset = 0\n ('environment', 'bg'),\n ('retina', 'lip'), # offset=1\n ('retina', 'vc'),\n ('retina', 'hp'),\n ('lip', 'fef'), # offset=2\n ('vc', 'pfc'), # offset=2\n ('hp', 'pfc'), # offset=2\n ('vc', 'fef'),\n ('pfc', 'fef'), # offset=3\n ('pfc', 'bg'),\n ('fef', 'pfc'), # offset=4\n ('fef', 'sc'),\n ('fef', 'bg'),\n ('fef', 'cb'),\n ('bg', 'pfc'), # offset=5\n ('bg', 'fef'),\n ('bg', 'sc'),\n ('cb', 'environment'), # offset=5\n ('sc', 'environment'), # offset=6\n ]\n\n def __init__(self, retina, lip, vc, pfc, fef, bg, sc, hp, cb):\n self.components = {}\n self.scheduler = brica.VirtualTimeScheduler()\n self.environment = Environment()\n self.setup(\n environment=self.environment,\n retina=retina,\n lip=lip,\n vc=vc,\n pfc=pfc,\n fef=fef,\n bg=bg,\n sc=sc,\n hp=hp,\n cb=cb\n )\n\n def setup(self, **functions):\n for key, function in functions.items():\n self.components[key] = brica.Component(function)\n self.scheduler.add_component(self.components[key], function.timing)\n\n for origin_name, target_name in self.connections:\n in_port = 'from_{}'.format(origin_name)\n out_port = 'to_{}'.format(target_name)\n\n self.components[origin_name].make_out_port(out_port)\n self.components[target_name].make_in_port(in_port)\n\n brica.connect(self.components[origin_name], out_port,\n self.components[target_name], in_port)\n\n #def __call__(self, image, angle, phase, reward, done):\n def __call__(self, image, angle, reward, done):\n # add phase\n self.environment.set(image, angle, reward, done)\n # self.environment.set(image, angle, phase, reward, done)\n self.scheduler.step()\n return self.environment.action\n"
] |
[
[
"numpy.array",
"numpy.clip"
]
] |
guidoAI/spiking_notebook
|
[
"58612097efa2184de3758e314b5cf38f155e4cac"
] |
[
"spiking.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 7 14:36:53 2021\n\n@author: Jesse Hagenaars\nAdapted by Guido for the AE4350 course\n\"\"\"\n\n# https://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html\n\nfrom typing import Optional, NamedTuple, Tuple\n\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\n# %matplotlib notebook\n\n\ndef spike_function(v: torch.Tensor) -> torch.Tensor:\n return v.gt(0.0).float() # gt means greater than, which works on tensors\n\n# Placeholder for LIF state\nclass LIFState(NamedTuple):\n z: torch.Tensor\n v: torch.Tensor\n i: torch.Tensor\n\n\n# Placeholder for LIF parameters\nclass LIFParameters(NamedTuple):\n i_decay: torch.Tensor = torch.as_tensor(0.0)\n v_decay: torch.Tensor = torch.as_tensor(0.75)\n thresh: torch.Tensor = torch.as_tensor(0.5)\n\n\n# Actual LIF function\ndef lif_neuron(\n i: torch.Tensor,\n state: Optional[LIFState] = None,\n p: LIFParameters = LIFParameters(),\n) -> Tuple[torch.Tensor, LIFState]:\n # Previous state\n if state is None:\n state = LIFState(\n z=torch.zeros_like(i),\n v=torch.zeros_like(i),\n i=torch.zeros_like(i),\n )\n # Update state\n i = state.i * p.i_decay + i\n v = state.v * p.v_decay * (1.0 - state.z) + i\n z = spike_function(v - p.thresh)\n return z, LIFState(z, v, i)\n\n\n# Placeholder for ALIF state\n# Change wrt LIF: threshold is also a state!\nclass ALIFState(NamedTuple):\n z: torch.Tensor\n v: torch.Tensor\n i: torch.Tensor\n t: torch.Tensor\n\n\n# Placeholder for ALIF parameters\n# Change wrt LIF: reset/start value for threshold, decay and addition constants\nclass ALIFParameters(NamedTuple):\n i_decay: torch.Tensor = torch.as_tensor(0.0)\n v_decay: torch.Tensor = torch.as_tensor(0.75)\n t_decay: torch.Tensor = torch.as_tensor(0.95)\n t_add: torch.Tensor = torch.as_tensor(1.05)\n t_0: torch.Tensor = torch.as_tensor(0.5)\n\n\n# Actual ALIF function\n# Change wrt LIF: additional equation for threshold adaptation\ndef alif_neuron(\n i: torch.Tensor,\n state: Optional[ALIFState] = None,\n p: ALIFParameters = ALIFParameters(),\n) -> Tuple[torch.Tensor, ALIFState]:\n # Previous state\n if state is None:\n state = ALIFState(\n z=torch.zeros_like(i),\n v=torch.zeros_like(i),\n i=torch.zeros_like(i),\n t=torch.ones_like(i) * p.t_0,\n )\n # Update state\n i = state.i * p.i_decay + i\n v = state.v * p.v_decay * (1.0 - state.z) + i\n z = spike_function(v - state.t)\n t = state.t * p.t_decay + p.t_add * z\n return z, ALIFState(z, v, i, t)\n\n\n# Incoming current: spikes of previous layer * weights\n# Let's assume weights of 0.3 and one incoming connection\nsteps = 100\ni = torch.randint(5, (steps,)).eq(2).float() * 0.3\n\ncurrents = []\nstates = []\nstate = None\nfor step in range(steps):\n _, state = lif_neuron(i[step], state)\n currents.append(i[step].item()) # .item() to convert tensor to float (for 1-element tensor)\n states.append([state.z.item(), state.v.item(), state.i.item()])\n \nplt.plot(currents, label=\"incoming current\")\nplt.plot(np.array(states)[:, 0], label=\"neuron spikes\")\nplt.plot(np.array(states)[:, 1], label=\"neuron voltage\")\nplt.plot(np.array(states)[:, 2], label=\"neuron current\")\nplt.grid()\nplt.legend()\nplt.show()\n\n\n## Plot spike rate as a function of input strength\n#steps = 1001 # input values\n#duration = 5 # how long to stimulate\n## By putting 'steps' in the batch dimension, we can can evaluate all steps in parallel\n## It's as if we have 'steps' neurons with the same parameters!\n#inp = torch.linspace(0, 10, steps) # no .view(-1, 1) necessary\n#\n## Log responses in a tensor\n#log_lif = torch.zeros(steps, duration)\n#log_alif = torch.zeros(steps, duration)\n#state_lif, state_alif = None, None\n## Note that we can't parallelize the 'duration' dimension, because the next neuron state depends on the previous\n#for d in range(duration):\n# # Only log spikes\n# log_lif[:, d], state_lif = lif_neuron(inp, state_lif)\n# log_alif[:, d], state_alif = alif_neuron(inp, state_alif)\n# \n## Now average over 'duration' to get the avg spike rate\n## And plot!\n#plt.plot(inp.view(-1).numpy(), log_lif.mean(-1).numpy(), label=\"LIF\")\n#plt.plot(inp.view(-1).numpy(), log_alif.mean(-1).numpy(), label=\"ALIF\")\n#plt.plot(inp.view(-1).numpy(), inp.view(-1).numpy(), label=\"Input\")\n#plt.grid()\n#plt.legend()\n#plt.show()"
] |
[
[
"matplotlib.pyplot.legend",
"torch.randint",
"torch.zeros_like",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"numpy.array",
"matplotlib.pyplot.show",
"torch.ones_like",
"torch.as_tensor"
]
] |
JiYuanFeng/mmclassification
|
[
"b337ef1f11b85148cca4b6fb0c4da3f8cc2eede6"
] |
[
"tests/test_models/test_neck.py"
] |
[
"# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\n\nfrom mmcls.models.necks import GlobalAveragePooling, HRFuseScales\n\n\ndef test_gap_neck():\n\n # test 1d gap_neck\n neck = GlobalAveragePooling(dim=1)\n # batch_size, num_features, feature_size\n fake_input = torch.rand(1, 16, 24)\n\n output = neck(fake_input)\n # batch_size, num_features\n assert output.shape == (1, 16)\n\n # test 1d gap_neck\n neck = GlobalAveragePooling(dim=2)\n # batch_size, num_features, feature_size(2)\n fake_input = torch.rand(1, 16, 24, 24)\n\n output = neck(fake_input)\n # batch_size, num_features\n assert output.shape == (1, 16)\n\n # test 1d gap_neck\n neck = GlobalAveragePooling(dim=3)\n # batch_size, num_features, feature_size(3)\n fake_input = torch.rand(1, 16, 24, 24, 5)\n\n output = neck(fake_input)\n # batch_size, num_features\n assert output.shape == (1, 16)\n\n with pytest.raises(AssertionError):\n # dim must in [1, 2, 3]\n GlobalAveragePooling(dim='other')\n\n\ndef test_hr_fuse_scales():\n\n in_channels = (18, 32, 64, 128)\n neck = HRFuseScales(in_channels=in_channels, out_channels=1024)\n\n feat_size = 56\n inputs = []\n for in_channel in in_channels:\n input_tensor = torch.rand(3, in_channel, feat_size, feat_size)\n inputs.append(input_tensor)\n feat_size = feat_size // 2\n\n with pytest.raises(AssertionError):\n neck(inputs)\n\n outs = neck(tuple(inputs))\n assert isinstance(outs, tuple)\n assert len(outs) == 1\n assert outs[0].shape == (3, 1024, 7, 7)\n"
] |
[
[
"torch.rand"
]
] |
vivid43/PRML-ZW
|
[
"be465e616785109f05290faf20fb4df93ee06092"
] |
[
"SoftmaxRegression/softmax_regression.py"
] |
[
"import numpy as np\nimport pandas as pd\n\nclass SoftMax():\n\tdef __init__(self,alpha = 1.0,maxIteration = 1000):\n\t\tself.maxIteration = int(maxIteration)\n\t\tself.alpha=float(alpha)\n\n\tdef fit(self,X_,y):\n\t\tX = np.hstack((np.ones((X_.shape[0],1)),X_))\n\t\tm,n = X.shape\n\t\tk=y.shape[1]\n\t\tself.theta = np.ones((n,k))\n\t\tfor i in range(self.maxIteration):\n\t\t\ttheta_prev = np.copy(self.theta)\n\t\t\tDtheta = -X.T@(y-self.softmax(X,self.theta))/m\n\t\t\tself.theta -= self.alpha*Dtheta\n\t\t\tif np.allclose(self.theta, theta_prev):\n\t\t\t\tbreak\n\t\tacc = self.acc(self.predict(X),np.argmax(y,axis=1))\n\t\tprint(\"Iteration:{}\".format(i))\n\t\tprint('train_accuracy:{}'.format(acc))\n\n\n\tdef softmax(self,X,theta):\n\t\ttemp = X @ theta\n\t\treturn temp/np.sum(temp,axis=1).reshape(-1,1)\n\n\n\tdef predict(self,X_):\n\t\tX = np.hstack((np.ones((X_.shape[0],1)),X_))\n\t\treturn np.argmax(self.softmax(X,self.theta),axis=1).reshape(-1,1)\n\tdef acc(self,y,y_pred):\n\t\treturn (1-np.count_nounzero(y-y_pred))/y.shape[0]\n"
] |
[
[
"numpy.allclose",
"numpy.count_nounzero",
"numpy.ones",
"numpy.copy",
"numpy.argmax",
"numpy.sum"
]
] |
aalto-ui/chi21adaptive
|
[
"83d75f0c9dee7002e1d38f21108fef52d7be1b85"
] |
[
"value_network/train.py"
] |
[
"#!/usr/bin/env python3\n# coding: utf-8\n\nimport sys\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport numpy as np\nimport tensorflow as tf\n\n\n# Boost GPU usage.\nconf = tf.compat.v1.ConfigProto()\nconf.gpu_options.allow_growth = True\ntf.compat.v1.Session(config=conf)\n\n# Maximum number of items in a given menu, including separators.\nMAX_MENU_ITEMS = 20\n# Size of the one-hot encoded vectors. This value should be large enough to avoid hashing collisions.\nENC_VOCAB_SIZE = 90\n\n\ndef load_data(filepath):\n X1, X2, X3, X4 = [], [], [], []\n y1, y2, y3 = [], [], []\n\n with open(filepath) as f:\n for line in f.read().splitlines():\n (serial, forage, recall), (target_menu, diff_freq, diff_asso), exposed = format_row(line)\n\n X1.append(target_menu)\n X2.append(diff_freq)\n X3.append(diff_asso)\n X4.append(exposed)\n\n y1.append(serial)\n y2.append(forage)\n y3.append(recall)\n\n return (np.array(X1), np.array(X2), np.array(X3), np.array(X4)), (np.array(y1), np.array(y2), np.array(y3))\n\n\ndef format_row(line):\n (serial, forage, recall), (source_menu, source_freq, source_asso), (target_menu, target_freq, target_asso), exposed = parse_row(line)\n\n adap_menu, diff_freq, diff_asso = parse_user_input(source_menu, source_freq, source_asso, target_menu, target_freq, target_asso)\n\n return (serial, forage, recall), (adap_menu, diff_freq, diff_asso), exposed\n\n\ndef parse_row(line):\n # Row format is \"[serial,forage,recall][source_menu][source_freq][source_asso][target_menu][target_freq][target_asso][exposed]\"\n tokens = line[1:-1].split('][')\n n_toks = len(tokens)\n assert n_toks == 8, 'There are {} tokens, but I expected 8'.format(n_toks)\n\n # FIXME: We should agree on a parser-friendly row format.\n serial, forage, recall = list(map(float, tokens[0].split(', ')))\n\n source_menu = list(map(lambda x: x.replace(\"'\", ''), tokens[1].split(', ')))\n source_freq = list(map(float, tokens[2].split(', ')))\n source_asso = list(map(float, tokens[3].split(', ')))\n\n target_menu = list(map(lambda x: x.replace(\"'\", ''), tokens[4].split(', ')))\n target_freq = list(map(float, tokens[5].split(', ')))\n target_asso = list(map(float, tokens[6].split(', ')))\n\n # Currently there's only one extra feat, but wrap it as list in case we add more feats in the future.\n exposed = [bool(tokens[7])]\n\n return (serial, forage, recall), (source_menu, source_freq, source_asso), (target_menu, target_freq, target_asso), exposed\n\n\ndef parse_user_input(source_menu, source_freq, source_asso, target_menu, target_freq, target_asso):\n # Encode adapted menu as integers and compute the difference between previous and current menu configuration.\n adap_menu = onehot_menu(target_menu)\n # Adjust remaining menu items with zeros (reserved value) at the end.\n adap_menu = adj(adap_menu, value=[0])\n\n# # Experimental: ignore differences w.r.t source menu.\n# num_cols = len(target_freq)\n# tgt_asso = np.array(target_asso).reshape((num_cols, num_cols))\n# tgt_asso = adj([adj(item) for item in tgt_asso], [0]*MAX_MENU_ITEMS)\n# tgt_asso = tgt_asso.reshape((MAX_MENU_ITEMS*MAX_MENU_ITEMS,))\n# return adap_menu, adj(target_freq), tgt_asso\n\n # Ensure that all vectors have the same length.\n max_freq_len = max(len(source_freq), len(target_freq))\n max_asso_len = max(len(source_asso), len(target_asso))\n source_freq = pad(source_freq, max_freq_len)\n target_freq = pad(target_freq, max_freq_len)\n source_asso = pad(source_asso, max_asso_len)\n target_asso = pad(target_asso, max_asso_len)\n\n diff_freq = np.diff([source_freq, target_freq], axis=0).flatten()\n diff_asso = np.diff([source_asso, target_asso], axis=0).flatten()\n\n # Ensure there is a change in freq distribution, otherwise `diff_freq` would be always zero.\n if np.array_equal(source_freq, target_freq):\n diff_freq = source_freq\n\n # The association matrix list is given as a flat vector, so reshape it before padding.\n # Notice that we read the number of items BEFORE padding `diff_freq`.\n num_rows = len(diff_freq)\n num_cols = len(diff_asso)//num_rows\n diff_asso = diff_asso.reshape((num_cols, num_rows))\n diff_asso = adj([adj(item) for item in diff_asso], [0]*MAX_MENU_ITEMS)\n diff_asso = diff_asso.reshape((MAX_MENU_ITEMS*MAX_MENU_ITEMS,))\n\n return adap_menu, adj(diff_freq), diff_asso\n\n\ndef pad(l, size, value=0):\n return l + [value] * abs((len(l)-size))\n\n\ndef adj(vec, value=0):\n N = len(vec)\n d = MAX_MENU_ITEMS - N\n if d < 0:\n # Truncate vector.\n vec = vec[:MAX_MENU_ITEMS]\n elif d > 0:\n # Pad vector with zeros (reserved value) at the *end* of the vector.\n vec = list(vec) + [value for _ in range(d)]\n return np.array(vec)\n\n\ndef onehot_menu(items):\n # FIXME: We should agree on a single-word menu separator, because '----' is conflicting with the built-in text parser.\n enc_menu = [tf.keras.preprocessing.text.one_hot(w, ENC_VOCAB_SIZE, filters='') for w in items]\n return enc_menu\n\n\ndef create_model(adap_menu, diff_freq, diff_asso, xtra_feat):\n # The provided sample args are needed to get the input shapes right.\n # For example, the network capacity is bounded by the (max) number of menu items.\n num_items = diff_freq.shape[0]\n\n def menu_head(inputs):\n m = tf.keras.layers.Embedding(ENC_VOCAB_SIZE, num_items, input_length=num_items)(inputs)\n m = tf.keras.layers.Flatten()(m)\n m = tf.keras.layers.Dropout(0.5)(m)\n m = tf.keras.layers.Dense(num_items//2)(m)\n m = tf.keras.Model(inputs=inputs, outputs=m)\n return m\n\n def freq_head(inputs):\n f = tf.keras.layers.Reshape((num_items, 1))(inputs)\n f = tf.keras.layers.LSTM(num_items, activation='relu')(f)\n f = tf.keras.layers.Dropout(0.5)(f)\n f = tf.keras.layers.Dense(num_items//2)(f)\n f = tf.keras.Model(inputs=inputs, outputs=f)\n return f\n\n def asso_head(inputs):\n a = tf.keras.layers.Reshape((num_items, num_items))(inputs)\n a = tf.keras.layers.LSTM(num_items*2, activation='relu')(a)\n a = tf.keras.layers.Dropout(0.5)(a)\n a = tf.keras.layers.Dense(num_items//2)(a)\n a = tf.keras.Model(inputs=inputs, outputs=a)\n return a\n\n def serial_tail(inputs):\n s = tf.keras.layers.Dense(num_items//2)(inputs)\n s = tf.keras.layers.Dropout(0.5)(s)\n s = tf.keras.layers.Dense(1)(s)\n s = tf.keras.layers.Activation('linear', name='serial_output')(s)\n return s\n\n def forage_tail(inputs):\n f = tf.keras.layers.Dense(num_items//2)(inputs)\n f = tf.keras.layers.Dropout(0.5)(f)\n f = tf.keras.layers.Dense(1)(f)\n f = tf.keras.layers.Activation('linear', name='forage_output')(f)\n return f\n\n def recall_tail(inputs):\n r = tf.keras.layers.Dense(num_items//2)(inputs)\n r = tf.keras.layers.Dropout(0.5)(r)\n r = tf.keras.layers.Dense(1)(r)\n r = tf.keras.layers.Activation('linear', name='recall_output')(r)\n return r\n\n input_menu = tf.keras.layers.Input(shape=adap_menu.shape, name='menu')\n input_freq = tf.keras.layers.Input(shape=diff_freq.shape, name='priors')\n input_asso = tf.keras.layers.Input(shape=diff_asso.shape, name='associations')\n input_feat = tf.keras.layers.Input(shape=xtra_feat.shape, name='features')\n\n menu = menu_head(input_menu)\n freq = freq_head(input_freq)\n asso = asso_head(input_asso)\n\n combined_head = tf.keras.layers.concatenate([menu.output, freq.output, asso.output, input_feat])\n serial = serial_tail(combined_head)\n forage = forage_tail(combined_head)\n recall = recall_tail(combined_head)\n\n # Hereby I compose the almighty value network model.\n model = tf.keras.Model(inputs=[menu.input, freq.input, asso.input, input_feat], outputs=[serial, forage, recall])\n losses = {'serial_output': 'mse', 'forage_output': 'mse', 'recall_output': 'mse'}\n model.compile(optimizer='rmsprop', loss=losses, metrics=['mse', 'mae'])\n\n return model\n\n\n\nif __name__ == '__main__':\n # Input can be either a list of files or a directory.\n train_inputs = sys.argv[1:]\n\n # Collect all training files first.\n tr_files = []\n for tr_input in train_inputs:\n if os.path.isdir(tr_input):\n for path, directories, files in os.walk(tr_input):\n for f in files:\n if f.endswith('.txt'):\n file_path = os.path.join(path, f)\n tr_files.append(file_path)\n\n elif os.path.isfile(tr_input):\n tr_files.append(tr_input)\n\n X1, X2, X3, X4 = [], [], [], []\n y1, y2, y3 = [], [], []\n\n for f in tr_files:\n (X1_, X2_, X3_, X4_), (y1_, y2_, y3_) = load_data(file_path)\n X1 = np.concatenate((X1, X1_)) if len(X1) > 0 else X1_\n X2 = np.concatenate((X2, X2_)) if len(X2) > 0 else X2_\n X3 = np.concatenate((X3, X3_)) if len(X3) > 0 else X3_\n X4 = np.concatenate((X4, X4_)) if len(X4) > 0 else X4_\n y1 = np.concatenate((y1, y1_)) if len(y1) > 0 else y1_\n y2 = np.concatenate((y2, y2_)) if len(y2) > 0 else y2_\n y3 = np.concatenate((y3, y3_)) if len(y3) > 0 else y3_\n\n # Provide one sample of the input data to the model.\n model = create_model(X1[0], X2[0], X3[0], X4[0])\n\n# model.summary()\n# tf.keras.utils.plot_model(model, show_shapes=False, to_file='value_network.png')\n# tf.keras.utils.plot_model(model, show_shapes=True, to_file='value_network_with_shapes.png')\n# tf.keras.utils.plot_model(model, show_shapes=False, show_layer_names=False, to_file='value_network_blocks.png')\n\n from time import time\n now = int(time())\n\n cbs = [\n tf.keras.callbacks.TensorBoard(log_dir='./training_logs_{}'.format(now)),\n tf.keras.callbacks.EarlyStopping(patience=20, restore_best_weights=True),\n ]\n\n model.fit([X1, X2, X3, X4], [y1, y2, y3], validation_split=0.2, epochs=200, batch_size=32, callbacks=cbs)\n model.save('value_network.h5')\n"
] |
[
[
"tensorflow.compat.v1.ConfigProto",
"tensorflow.keras.layers.Activation",
"numpy.array_equal",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.preprocessing.text.one_hot",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.Model",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.compat.v1.Session",
"numpy.concatenate",
"tensorflow.keras.layers.LSTM",
"numpy.diff",
"tensorflow.keras.layers.Dropout",
"numpy.array",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Input"
]
] |
IESSC/Machine-Learning
|
[
"0866f09ee7ee671b49f0c7340383f5051dcacb49"
] |
[
"Ch 4/test_RegularizedLinearModels.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 4 22:15:19 2017\n\n@author: ASUS\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import SGDRegressor\n\n# In[]\nm = 100\nX = 6 * np.random.rand(m, 1) - 3\ny = 0.5 * X**2 + X + 2 + np.random.randn(m, 1)\n\nplt.plot(X, y, \"b.\")\n\nsgd_reg = SGDRegressor(penalty=\"l2\")\nsgd_reg.fit(X, y.ravel())\nsgd_reg.predict([[1.5]])\n\n# In[]\n\nfrom sklearn.linear_model import Ridge\nridge_reg = Ridge(alpha=1, solver=\"cholesky\")\nridge_reg.fit(X, y)\nridge_reg.predict([[1.5]])\n\n# In[]\n\nfrom sklearn.linear_model import Lasso\nlasso_reg = Lasso(alpha=0.1)\nlasso_reg.fit(X, y)\nlasso_reg.predict([[1.5]])\n\n# In[]\n\nfrom sklearn.linear_model import ElasticNet\nelastic_net = ElasticNet(alpha=0.1, l1_ratio=0.5)\nelastic_net.fit(X, y)\nelastic_net.predict([[1.5]])"
] |
[
[
"sklearn.linear_model.SGDRegressor",
"sklearn.linear_model.ElasticNet",
"sklearn.linear_model.Lasso",
"matplotlib.pyplot.plot",
"sklearn.linear_model.Ridge",
"numpy.random.randn",
"numpy.random.rand"
]
] |
Erotemic/netharn
|
[
"bc4a6d75445c949e709e5ab903ba72813ec68b79",
"bc4a6d75445c949e709e5ab903ba72813ec68b79"
] |
[
"netharn/data/channel_spec.py",
"netharn/initializers/functional.py"
] |
[
"import ubelt as ub\nimport six\n\n\nclass ChannelSpec(ub.NiceRepr):\n \"\"\"\n Parse and extract information about network input channel specs for\n early or late fusion networks.\n\n Notes:\n The pipe ('|') character represents an early-fused input stream, and\n order matters (it is non-communative).\n\n The comma (',') character separates different inputs streams/branches\n for a multi-stream/branch network which will be lated fused. Order does\n not matter\n\n TODO:\n - [ ] : normalize representations? e.g: rgb = r|g|b?\n - [ ] : rename to BandsSpec or SensorSpec?\n\n Example:\n >>> # Integer spec\n >>> ChannelSpec.coerce(3)\n <ChannelSpec(u0|u1|u2) ...>\n\n >>> # single mode spec\n >>> ChannelSpec.coerce('rgb')\n <ChannelSpec(rgb) ...>\n\n >>> # early fused input spec\n >>> ChannelSpec.coerce('rgb|disprity')\n <ChannelSpec(rgb|disprity) ...>\n\n >>> # late fused input spec\n >>> ChannelSpec.coerce('rgb,disprity')\n <ChannelSpec(rgb,disprity) ...>\n\n >>> # early and late fused input spec\n >>> ChannelSpec.coerce('rgb|ir,disprity')\n <ChannelSpec(rgb|ir,disprity) ...>\n\n Example:\n >>> from netharn.data.channel_spec import * # NOQA\n >>> self = ChannelSpec('gray')\n >>> print('self.info = {}'.format(ub.repr2(self.info, nl=1)))\n >>> self = ChannelSpec('rgb')\n >>> print('self.info = {}'.format(ub.repr2(self.info, nl=1)))\n >>> self = ChannelSpec('rgb|disparity')\n >>> print('self.info = {}'.format(ub.repr2(self.info, nl=1)))\n >>> self = ChannelSpec('rgb|disparity,disparity')\n >>> print('self.info = {}'.format(ub.repr2(self.info, nl=1)))\n >>> self = ChannelSpec('rgb,disparity,flowx|flowy')\n >>> print('self.info = {}'.format(ub.repr2(self.info, nl=1)))\n\n Example:\n >>> from netharn.data.channel_spec import * # NOQA\n >>> specs = [\n >>> 'rgb', # and rgb input\n >>> 'rgb|disprity', # rgb early fused with disparity\n >>> 'rgb,disprity', # rgb early late with disparity\n >>> 'rgb|ir,disprity', # rgb early fused with ir and late fused with disparity\n >>> 3, # 3 unknown channels\n >>> ]\n >>> for spec in specs:\n >>> print('=======================')\n >>> print('spec = {!r}'.format(spec))\n >>> #\n >>> self = ChannelSpec.coerce(spec)\n >>> print('self = {!r}'.format(self))\n >>> sizes = self.sizes()\n >>> print('sizes = {!r}'.format(sizes))\n >>> print('self.info = {}'.format(ub.repr2(self.info, nl=1)))\n >>> #\n >>> item = self._demo_item((1, 1), rng=0)\n >>> inputs = self.encode(item)\n >>> components = self.decode(inputs)\n >>> input_shapes = ub.map_vals(lambda x: x.shape, inputs)\n >>> component_shapes = ub.map_vals(lambda x: x.shape, components)\n >>> print('item = {}'.format(ub.repr2(item, precision=1)))\n >>> print('inputs = {}'.format(ub.repr2(inputs, precision=1)))\n >>> print('input_shapes = {}'.format(ub.repr2(input_shapes)))\n >>> print('components = {}'.format(ub.repr2(components, precision=1)))\n >>> print('component_shapes = {}'.format(ub.repr2(component_shapes, nl=1)))\n\n \"\"\"\n\n _known = {\n 'rgb': 'r|g|b'\n }\n\n _size_lut = {\n 'rgb': 3,\n }\n\n def __init__(self, spec):\n # TODO: allow integer specs\n self.spec = spec\n self._info = {}\n\n def __nice__(self):\n return self.spec\n\n def __json__(self):\n return self.spec\n\n def __contains__(self, key):\n \"\"\"\n Example:\n >>> 'disparity' in ChannelSpec('rgb,disparity,flowx|flowy')\n True\n >>> 'gray' in ChannelSpec('rgb,disparity,flowx|flowy')\n False\n \"\"\"\n return key in self.unique()\n\n @property\n def info(self):\n self._info = {\n 'spec': self.spec,\n 'parsed': self.parse(),\n 'unique': self.unique(),\n 'normed': self.normalize(),\n }\n return self._info\n\n @classmethod\n def coerce(cls, data):\n if isinstance(data, cls):\n self = data\n return self\n else:\n if isinstance(data, int):\n # we know the number of channels, but not their names\n spec = '|'.join(['u{}'.format(i) for i in range(data)])\n elif isinstance(data, six.string_types):\n spec = data\n else:\n raise TypeError(type(data))\n\n self = cls(spec)\n return self\n\n def parse(self):\n \"\"\"\n Build internal representation\n \"\"\"\n # commas break inputs into multiple streams\n stream_specs = self.spec.split(',')\n parsed = {ss: ss.split('|') for ss in stream_specs}\n return parsed\n\n def normalize(self):\n spec = self.spec\n stream_specs = spec.split(',')\n parsed = {ss: ss for ss in stream_specs}\n for k1 in parsed.keys():\n for k, v in self._known.items():\n parsed[k1] = parsed[k1].replace(k, v)\n parsed = {k: v.split('|') for k, v in parsed.items()}\n return parsed\n\n def keys(self):\n spec = self.spec\n stream_specs = spec.split(',')\n for spec in stream_specs:\n yield spec\n\n def sizes(self):\n \"\"\"\n Number of dimensions for each fused stream channel\n\n IE: The EARLY-FUSED channel sizes\n\n Example:\n >>> self = ChannelSpec('rgb|disparity,flowx|flowy')\n >>> self.sizes()\n \"\"\"\n sizes = {\n key: sum(self._size_lut.get(part, 1) for part in vals)\n for key, vals in self.parse().items()\n }\n return sizes\n\n def unique(self):\n \"\"\"\n Returns the unique channels that will need to be given or loaded\n \"\"\"\n return set(ub.flatten(self.parse().values()))\n\n def _item_shapes(self, dims):\n \"\"\"\n Expected shape for an input item\n\n Args:\n dims (Tuple[int, int]): the spatial dimension\n\n Returns:\n Dict[int, tuple]\n \"\"\"\n item_shapes = {}\n parsed = self.parse()\n # normed = self.normalize()\n fused_keys = list(self.keys())\n for fused_key in fused_keys:\n components = parsed[fused_key]\n for mode_key in components:\n c = self._size_lut.get(mode_key, 1)\n shape = (c,) + tuple(dims)\n item_shapes[mode_key] = shape\n return item_shapes\n\n def _demo_item(self, dims=(4, 4), rng=None):\n \"\"\"\n Create an input that satisfies this spec\n\n Returns:\n dict: an item like it might appear when its returned from the\n `__getitem__` method of a :class:`torch...Dataset`.\n\n Example:\n >>> dims = (1, 1)\n >>> ChannelSpec.coerce(3)._demo_item(dims, rng=0)\n >>> ChannelSpec.coerce('r|g|b|disaprity')._demo_item(dims, rng=0)\n >>> ChannelSpec.coerce('rgb|disaprity')._demo_item(dims, rng=0)\n >>> ChannelSpec.coerce('rgb,disaprity')._demo_item(dims, rng=0)\n >>> ChannelSpec.coerce('rgb')._demo_item(dims, rng=0)\n >>> ChannelSpec.coerce('gray')._demo_item(dims, rng=0)\n \"\"\"\n import torch\n import kwarray\n rng = kwarray.ensure_rng(rng)\n item_shapes = self._item_shapes(dims)\n item = {\n key: torch.from_numpy(rng.rand(*shape))\n for key, shape in item_shapes.items()\n }\n return item\n\n def encode(self, item, axis=0):\n \"\"\"\n Given a dictionary containing preloaded components of the network\n inputs, build a concatenated network representations of each input\n stream.\n\n Args:\n item (dict): a batch item\n axis (int, default=0): concatenation dimension\n\n Returns:\n Dict[str, Tensor]: mapping between input stream and its early fused\n tensor input.\n\n Example:\n >>> import torch\n >>> dims = (4, 4)\n >>> item = {\n >>> 'rgb': torch.rand(3, *dims),\n >>> 'disparity': torch.rand(1, *dims),\n >>> 'flowx': torch.rand(1, *dims),\n >>> 'flowy': torch.rand(1, *dims),\n >>> }\n >>> # Complex Case\n >>> self = ChannelSpec('rgb,disparity,rgb|disparity|flowx|flowy,flowx|flowy')\n >>> inputs = self.encode(item)\n >>> input_shapes = ub.map_vals(lambda x: x.shape, inputs)\n >>> print('input_shapes = {}'.format(ub.repr2(input_shapes, nl=1)))\n >>> # Simpler case\n >>> self = ChannelSpec('rgb|disparity')\n >>> inputs = self.encode(item)\n >>> input_shapes = ub.map_vals(lambda x: x.shape, inputs)\n >>> print('input_shapes = {}'.format(ub.repr2(input_shapes, nl=1)))\n \"\"\"\n import torch\n inputs = dict()\n parsed = self.parse()\n unique = self.unique()\n components = {k: item[k] for k in unique}\n for key, parts in parsed.items():\n inputs[key] = torch.cat([components[k] for k in parts], dim=axis)\n return inputs\n\n def decode(self, inputs, axis=1):\n \"\"\"\n break an early fused item into its components\n\n Example:\n >>> import torch\n >>> dims = (4, 4)\n >>> components = {\n >>> 'rgb': torch.rand(3, *dims),\n >>> 'ir': torch.rand(1, *dims),\n >>> }\n >>> self = ChannelSpec('rgb|ir')\n >>> inputs = self.encode(components)\n >>> from netharn.data import data_containers\n >>> item = {k: data_containers.ItemContainer(v, stack=True)\n >>> for k, v in inputs.items()}\n >>> batch = data_containers.container_collate([item, item])\n >>> components = self.decode(batch)\n \"\"\"\n parsed = self.parse()\n components = dict()\n for key, parts in parsed.items():\n idx1 = 0\n for part in parts:\n size = self._size_lut.get(part, 1)\n idx2 = idx1 + size\n fused = inputs[key]\n index = ([slice(None)] * axis + [slice(idx1, idx2)])\n component = fused[index]\n components[part] = component\n idx1 = idx2\n return components\n\n\nif __name__ == '__main__':\n \"\"\"\n CommandLine:\n python ~/code/netharn/netharn/data/channel_spec.py all\n \"\"\"\n import xdoctest\n xdoctest.doctest_module(__file__)\n",
"import numpy as np\nimport torch\nimport ubelt as ub\n\n\ndef trainable_layers(model, names=False):\n \"\"\"\n Returns all layers containing trainable parameters\n\n Notes:\n It may be better to simply use model.named_parameters() instead in most\n situation. This is useful when you need the classes that contains the\n parameters instead of the parameters themselves.\n\n Example:\n >>> import torchvision\n >>> model = torchvision.models.AlexNet()\n >>> list(trainable_layers(model, names=True))\n \"\"\"\n if names:\n stack = [('', '', model)]\n while stack:\n prefix, basename, item = stack.pop()\n name = '.'.join([p for p in [prefix, basename] if p])\n if isinstance(item, torch.nn.modules.conv._ConvNd):\n yield name, item\n elif isinstance(item, torch.nn.modules.batchnorm._BatchNorm):\n yield name, item\n elif hasattr(item, 'reset_parameters'):\n yield name, item\n\n child_prefix = name\n for child_basename, child_item in list(item.named_children())[::-1]:\n stack.append((child_prefix, child_basename, child_item))\n else:\n queue = [model]\n while queue:\n item = queue.pop(0)\n # TODO: need to put all trainable layer types here\n # (I think this is just everything with reset_parameters)\n if isinstance(item, torch.nn.modules.conv._ConvNd):\n yield item\n elif isinstance(item, torch.nn.modules.batchnorm._BatchNorm):\n yield item\n elif hasattr(item, 'reset_parameters'):\n yield item\n # if isinstance(input, torch.nn.modules.Linear):\n # yield item\n # if isinstance(input, torch.nn.modules.Bilinear):\n # yield item\n # if isinstance(input, torch.nn.modules.Embedding):\n # yield item\n # if isinstance(input, torch.nn.modules.EmbeddingBag):\n # yield item\n for child in item.children():\n queue.append(child)\n\n\ndef apply_initializer(input, func, funckw):\n \"\"\"\n Recursively initializes the input using a torch.nn.init function.\n\n If the input is a model, then only known layer types are initialized.\n\n Args:\n input (Tensor | Module): can be a model, layer, or tensor\n func (callable): initialization function\n funckw (dict):\n\n Example:\n >>> from torch import nn\n >>> import torch\n >>> class DummyNet(nn.Module):\n >>> def __init__(self, n_channels=1, n_classes=10):\n >>> super(DummyNet, self).__init__()\n >>> self.conv = nn.Conv2d(n_channels, 10, kernel_size=5)\n >>> self.norm = nn.BatchNorm2d(10)\n >>> self.param = torch.nn.Parameter(torch.rand(3))\n >>> self = DummyNet()\n >>> func = nn.init.kaiming_normal_\n >>> apply_initializer(self, func, {})\n >>> func = nn.init.constant_\n >>> apply_initializer(self, func, {'val': 42})\n >>> assert np.all(self.conv.weight.detach().numpy() == 42)\n >>> assert np.all(self.conv.bias.detach().numpy() == 0), 'bias is always init to zero'\n >>> assert np.all(self.norm.bias.detach().numpy() == 0), 'bias is always init to zero'\n >>> assert np.all(self.norm.weight.detach().numpy() == 1)\n >>> assert np.all(self.norm.running_mean.detach().numpy() == 0.0)\n >>> assert np.all(self.norm.running_var.detach().numpy() == 1.0)\n \"\"\"\n if getattr(input, 'bias', None) is not None:\n # print('zero input bias')\n # zero all biases\n input.bias.data.zero_()\n\n if isinstance(input, (torch.Tensor)):\n # assert False, ('input is tensor? does this make sense?')\n # print('input is tensor')\n func(input, **funckw)\n # data = input\n elif isinstance(input, (torch.nn.modules.conv._ConvNd)):\n # print('input is convnd')\n func(input.weight, **funckw)\n # elif isinstance(input, (torch.nn.modules.linear.Linear)):\n # func(input.weight, **funckw)\n elif isinstance(input, torch.nn.modules.batchnorm._BatchNorm):\n # Use default batch norm\n input.reset_parameters()\n # elif isinstance(input, torch.nn.modules.Linear):\n # input.reset_parameters()\n elif hasattr(input, 'reset_parameters'):\n # print('unknown input type fallback on reset_params')\n input.reset_parameters()\n else:\n # input is a torch module\n model = input\n # print('recurse input')\n layers = list(trainable_layers(model))\n # print('layers = {!r}'.format(layers))\n for item in layers:\n apply_initializer(item, func, funckw)\n\n\ndef load_partial_state(model, model_state_dict, leftover=None,\n ignore_unset=False, verbose=2,\n mangle=True, association=None,\n initializer=None):\n \"\"\"\n CommandLine:\n python -m netharn.initializers.nninit_base load_partial_state\n\n Args:\n model (torch.nn.Module): module to initialize\n\n model_state_dict (dict): state dict we wish to transfer\n\n leftover (callable): fallback method for initializing incompatible\n areas, if none then those areas are left as-is.\n\n association (str): controls how we search for the association between\n the two model states. Can be strict, module-hack, prefix-hack, or\n embedding. Default is: prefix-hack.\n\n mangle (bool, default=True): If True, mangles tensors that have the\n same key, but different shapes forcing them to fit. This might\n destroy information when forcing a a larger tensor into a smaller\n tensor, or leave extra uninitialized room when a small tensor is\n placed in a larger one. Note be careful when mangling a\n classification layer if class indexes are not aligned.\n\n verbose (int): verbosity level\n\n Returns:\n Dict: info - summary of actions taken\n\n TODO:\n - [ ] Allow user to specify how incompatible layers are handled.\n\n Notes:\n\n Have you ever had the scenario where\n\n Has anyone ever had a problem where you had a torch model with a state\n dict with keys that looked like: `mymodel.detector.layer1.conv.weight`,\n but you had a pretrained weight file with keys that looked like:\n `module.layer1.conv.weight`?\n\n The latest version of\n `netharn.initializers.functional.load_patial_state` can handle this by\n solving a maximum-common-subtree-isomorphism problem. This computes the\n largest possible mapping between the two state dictionaries that share\n consistent suffixes.\n\n >>> # This means you can load an off-the-shelf unmodified pretrained resnet50\n >>> # where the keys might look something like this:\n >>> resnet_keys = {\n >>> 'conv1.weight',\n >>> 'layer1.0.conv1.weight',\n >>> 'layer1.0.conv2.weight',\n >>> 'layer1.0.conv3.weight',\n >>> 'layer1.0.downsample.0.weight',\n >>> 'layer2.0.conv1.weight',\n >>> 'layer2.0.conv2.weight',\n >>> 'layer2.0.conv3.weight',\n >>> 'layer3.0.conv1.weight',\n >>> 'layer4.0.conv1.weight',\n >>> 'fc.weight',\n >>> 'fc.bias',\n >>> }\n >>> #\n >>> # And perhaps you have a model that has a state dict where keys\n >>> # look like this:\n >>> model_keys = {\n >>> 'preproc.conv1.weight'\n >>> 'backbone.layer1.0.conv1.weight',\n >>> 'backbone.layer1.0.conv2.weight',\n >>> 'backbone.layer1.0.conv3.weight',\n >>> 'backbone.layer1.0.downsample.0.weight',\n >>> 'backbone.layer2.0.conv1.weight',\n >>> 'backbone.layer2.0.conv2.weight',\n >>> 'backbone.layer2.0.conv3.weight',\n >>> 'backbone.layer3.0.conv1.weight',\n >>> 'backbone.layer4.0.conv1.weight',\n >>> 'head.conv1'\n >>> 'head.conv2'\n >>> 'head.fc.weight'\n >>> 'head.fc.bias'\n >>> }\n >>> #\n >>> # We can compute a partial mapping between them\n >>> subpaths1, subpaths2 = maximum_common_ordered_subpaths(resnet_keys, model_keys)\n >>> print(ub.repr2(ub.dzip(subpaths1, subpaths2)))\n {\n 'layer1.0.conv2.weight': 'backbone.layer1.0.conv2.weight',\n 'layer1.0.conv3.weight': 'backbone.layer1.0.conv3.weight',\n 'layer1.0.downsample.0.weight': 'backbone.layer1.0.downsample.0.weight',\n 'layer2.0.conv1.weight': 'backbone.layer2.0.conv1.weight',\n 'layer2.0.conv2.weight': 'backbone.layer2.0.conv2.weight',\n 'layer2.0.conv3.weight': 'backbone.layer2.0.conv3.weight',\n 'layer3.0.conv1.weight': 'backbone.layer3.0.conv1.weight',\n 'layer4.0.conv1.weight': 'backbone.layer4.0.conv1.weight',\n }\n\n Also, if the sizes of the tensor don't quite fit, they will be\n mangled, i.e. \"shoved-in\" as best as possible.\n\n\n Example:\n >>> import netharn as nh\n >>> self1 = nh.models.ToyNet2d(input_channels=1, num_classes=10)\n >>> self2 = nh.models.ToyNet2d(input_channels=3, num_classes=2)\n >>> self1.hack_param1 = torch.nn.Parameter(torch.rand(1))\n >>> self2.hack_param1 = torch.nn.Parameter(torch.rand(3))\n >>> self2.hack_param2 = torch.nn.Parameter(torch.rand(3))\n >>> model_state_dict = self1.state_dict()\n >>> load_partial_state(self2, model_state_dict)\n >>> load_partial_state(self2, model_state_dict, leftover=torch.nn.init.kaiming_normal_)\n\n Example:\n >>> import netharn as nh\n >>> xpu = nh.XPU(None)\n >>> self1 = nh.models.ToyNet2d()\n >>> self2 = xpu.mount(self1)\n >>> load_partial_state(self2, self1.state_dict())\n >>> load_partial_state(self1, self2.state_dict())\n >>> # Add extra nonsense to state-dict\n >>> extra_state_dict = {'extra.' + k: v for k, v in self1.state_dict().items()}\n >>> extra_state_dict['stats'] = ub.peek(extra_state_dict.values()).clone()\n >>> model = self2\n >>> model_state_dict = extra_state_dict\n >>> load_partial_state(self2, extra_state_dict)\n\n Example:\n >>> # xdoctest: +REQUIRES(--slow)\n >>> from netharn.initializers.functional import * # NOQA\n >>> import torchvision\n >>> import torch\n >>> resnet50 = torchvision.models.resnet50()\n >>> class CustomModel(torch.nn.Module):\n >>> def __init__(self):\n >>> super().__init__()\n >>> self.module = resnet50\n >>> self.extra = torch.nn.Linear(1, 1)\n >>> model = CustomModel()\n >>> model_state_dict = resnet50.state_dict()\n >>> model_state_dict2 = {'prefix.' + k: v for k, v in model_state_dict.items()}\n >>> import ubelt as ub\n >>> with ub.Timer(verbose=2, label='strict'):\n >>> load_partial_state(model, model_state_dict, association='strict', verbose=0)\n >>> with ub.Timer(verbose=2, label='prefix-hack'):\n >>> load_partial_state(model, model_state_dict, association='prefix-hack', verbose=0)\n >>> with ub.Timer(verbose=2, label='module-hack'):\n >>> load_partial_state(model, model_state_dict, association='module-hack', verbose=0)\n >>> with ub.Timer(verbose=2, label='embedding'):\n >>> load_partial_state(model, model_state_dict, association='embedding', verbose=0)\n\n >>> load_partial_state(model, model_state_dict, association='prefix-hack', verbose=1)\n >>> load_partial_state(model, model_state_dict, association='module-hack', verbose=1)\n\n CommandLine:\n xdoctest -m /home/joncrall/code/netharn/netharn/initializers/functional.py load_partial_state:2 --slow\n\n \"\"\"\n if association is None:\n association = 'module-hack' # old default\n # association = 'prefix-hack' # new default\n\n if initializer is not None:\n import warnings\n warnings.warn('initializer is deprecated use leftover')\n leftover = initializer\n\n self_state = model.state_dict()\n\n def _fix_keys(model_state_dict):\n \"\"\"\n Hack around DataParallel wrapper. If there is nothing in common between\n the two models check to see if prepending 'module.' to other keys fixes\n it.\n \"\"\"\n other_keys = set(model_state_dict)\n self_keys = set(self_state)\n common_keys = other_keys.intersection(self_keys)\n if not common_keys:\n if association == 'strict':\n pass\n elif association == 'module-hack':\n # If there are no common keys try a hack\n prefix = 'module.'\n def smap(f, ss):\n return set(map(f, ss))\n def fix1(k):\n return prefix + k\n def fix2(k):\n if k.startswith(prefix):\n return k[len(prefix):]\n if smap(fix1, other_keys).intersection(self_keys):\n model_state_dict = ub.map_keys(fix1, model_state_dict)\n elif smap(fix2, other_keys).intersection(self_keys):\n model_state_dict = ub.map_keys(fix2, model_state_dict)\n elif association == 'prefix-hack':\n import functools\n def add_prefix(k, prefix):\n return prefix + k\n def remove_prefix(k, prefix):\n if k.startswith(prefix):\n return k[len(prefix):]\n # set1 = other_keys\n # target_set2 = self_keys\n found = _best_prefix_transform(other_keys, self_keys)\n if found is not None:\n for action, prefix in found['transform']:\n if action == 'add':\n func = functools.partial(add_prefix, prefix=prefix)\n elif action == 'remove':\n func = functools.partial(remove_prefix, prefix=prefix)\n else:\n raise AssertionError\n model_state_dict = ub.map_keys(func, model_state_dict)\n elif association == 'embedding':\n if verbose > 1:\n print('Using subpath embedding assocation, may take some time')\n # I believe this is the correct way to solve the problem\n paths1 = sorted(other_keys)\n paths2 = sorted(self_state)\n subpaths1, subpaths2 = maximum_common_ordered_subpaths(paths1, paths2)\n mapping = ub.dzip(subpaths1, subpaths2)\n if verbose > 1:\n print('mapping = {}'.format(ub.repr2(mapping, nl=1)))\n model_state_dict = ub.map_keys(lambda k: mapping.get(k, k), model_state_dict)\n else:\n raise KeyError(association)\n return model_state_dict\n\n other_state = _fix_keys(model_state_dict)\n\n self_unset_keys = set(self_state.keys()) # will end up as keys in our that were not set\n other_unused_keys = set(other_state.keys()) # will end up as keys in the other model that were not used\n\n seen_keys = ub.ddict(set)\n\n for key, other_value in other_state.items():\n if key not in self_state:\n if verbose > 0:\n print('Skipping {} because it does not exist'.format(key))\n seen_keys['skipped'].add(key)\n else:\n self_value = self_state[key]\n if other_value.size() == self_value.size():\n self_state[key] = other_value\n self_unset_keys.remove(key)\n other_unused_keys.remove(key)\n seen_keys['full_add'].add(key)\n elif len(other_value.size()) == len(self_value.size()):\n if not mangle:\n if verbose > 0:\n print('Skipping {} due to incompatable size and mangle=False'.format(key))\n print(' * self = {!r}'.format(self_value.size()))\n print(' * other = {!r}'.format(other_value.size()))\n seen_keys['skipped'].add(key)\n elif key.endswith('bias'):\n if verbose > 0:\n print('Skipping {} due to incompatable size'.format(key))\n print(' * self = {!r}'.format(self_value.size()))\n print(' * other = {!r}'.format(other_value.size()))\n seen_keys['skipped'].add(key)\n else:\n if leftover is None:\n if verbose > 0:\n print('Skipping {} due to incompatable size and no default initializer'.format(key))\n print(' * self = {!r}'.format(self_value.size()))\n print(' * other = {!r}'.format(other_value.size()))\n seen_keys['skipped'].add(key)\n else:\n if verbose > 0:\n print('Partially add {} with incompatable size'.format(key))\n print(' * self = {!r}'.format(self_value.size()))\n print(' * other = {!r}'.format(other_value.size()))\n # Initialize all weights in case any are unspecified\n if leftover is None:\n try:\n leftover(self_state[key])\n except Exception:\n if verbose > 0:\n print('Unable to init {} with {}'.format(key, leftover))\n\n # Transfer as much as possible\n min_size = np.minimum(self_state[key].shape,\n other_value.shape)\n sl = tuple([slice(0, s) for s in min_size])\n self_state[key][sl] = other_value[sl]\n\n # if shock_partial:\n # # Shock weights because we are doing something weird\n # # might help the network recover in case this is\n # # not a good idea\n # shock(self_state[key], func=leftover)\n self_unset_keys.remove(key)\n other_unused_keys.remove(key)\n\n if self_state[key].numel() < other_value.numel():\n seen_keys['partial_add_some'].add(key)\n else:\n seen_keys['partial_add_all'].add(key)\n else:\n if verbose > 0:\n print('Skipping {} due to incompatable size'.format(key))\n print(' * self = {!r}'.format(self_value.size()))\n print(' * other = {!r}'.format(other_value.size()))\n seen_keys['skipped'].add(key)\n\n if ignore_unset is True:\n self_unset_keys = []\n elif ignore_unset:\n self_unset_keys = list(ub.oset(self_unset_keys) - set(ignore_unset))\n\n if (self_unset_keys or other_unused_keys or\n seen_keys['partial_add_some'] or seen_keys['partial_add_all']):\n if verbose > 0:\n if seen_keys:\n print('Pretrained weights are a partial fit')\n else:\n print('Pretrained weights do not fit!')\n if verbose > 1:\n print('Seen Keys: {}'.format(ub.repr2(seen_keys, nl=2)))\n print('Self Unset Keys: {}'.format(ub.repr2(self_unset_keys, nl=1)))\n print('Other Unused keys: {}'.format(ub.repr2(other_unused_keys, nl=1)))\n print('summary:')\n seen_sum = ub.map_vals(len, seen_keys)\n print('Seen Num: {}'.format(ub.repr2(seen_sum, nl=2)))\n print('Self Unset Num: {}'.format(ub.repr2(len(self_unset_keys), nl=1)))\n print('Other Unused Num: {}'.format(ub.repr2(len(other_unused_keys), nl=1)))\n if leftover:\n if verbose > 0:\n print('Initializing unused keys using {}'.format(leftover))\n for key in self_unset_keys:\n if key.endswith('.num_batches_tracked'):\n pass # ignore num_batches_tracked\n elif key.endswith('.bias'):\n self_state[key].fill_(0)\n else:\n try:\n leftover(self_state[key])\n except Exception:\n if verbose > 0:\n print('Unable to init {} with {}'.format(key, leftover))\n\n else:\n if verbose > 0:\n print('Pretrained weights are a perfect fit')\n model.load_state_dict(self_state)\n\n info = {\n 'seen': seen_keys,\n 'self_unset': self_unset_keys,\n 'other_unused': other_unused_keys\n }\n return info\n\n\ndef _best_prefix_transform(set1, target_set2):\n \"\"\"\n Find a way to transform prefixes of items in set1 to match target_set2\n\n Example:\n >>> set1 = {'mod.f.0.w',\n >>> 'mod.f.1.b',\n >>> 'mod.f.1.n',\n >>> 'mod.f.1.rm',\n >>> 'mod.f.1.rv',}\n >>> #\n >>> target_set2 = {\n >>> 'bar.foo.extra.f.1.b',\n >>> 'bar.foo.extra.f.1.n',\n >>> 'bar.foo.extra.f.1.w',\n >>> 'bar.foo.extra.f.3.w',\n >>> }\n >>> _best_prefix_transform(set1, target_set2)\n >>> target_set2.add('JUNK')\n >>> _best_prefix_transform(set1, target_set2)\n \"\"\"\n\n # probably an efficient way to do this with a trie\n\n # NOTE: In general this is a graph-isomorphism problem or a maximum common\n # subgraph problem. However, we can look only at the special case of\n # \"maximum common subtrees\". Given two directory structures (as trees)\n # we find the common bits.\n # https://perso.ensta-paris.fr/~diam/ro/online/viggo_wwwcompendium/node168.html\n # We can approximate to O(log log n / log^2 n)\n # Can get algorithm from maximum independent set\n # https://arxiv.org/abs/1602.07210\n\n # The most efficient algorithm here would be for solving\n # \"Maximum common labeled subtrees\"\n # APX-hard for unordered trees, but polytime solveable for ordered trees\n # For directory structures we can induce an order, and hense obtain a\n # polytime solution\n # #\n # On the Maximum Common Embedded Subtree Problem for Ordered Trees\n # https://pdfs.semanticscholar.org/0b6e/061af02353f7d9b887f9a378be70be64d165.pdf\n\n from os.path import commonprefix\n prefixes1 = commonprefix(list(set1)).split('.')\n prefixes2 = commonprefix(list(target_set2)).split('.')\n\n # Remove the trailing prefixes that are the same\n num_same = 0\n for i in range(1, min(len(prefixes1), len(prefixes2))):\n if prefixes1[-i] == prefixes2[-i]:\n num_same = i\n else:\n break\n prefixes1 = prefixes1[:-num_same]\n prefixes2 = prefixes2[:-num_same]\n\n ALLOW_FUZZY = 1\n if ALLOW_FUZZY and len(prefixes2) == 0:\n # SUPER HACK FOR CASE WHERE THERE IS JUST ONE SPOILER ELEMENT IN THE\n # TARGET SET. THE ALGORITHM NEEDS TO BE RETHOUGHT FOR THAT CASE\n possible_prefixes = [k.split('.') for k in target_set2]\n prefix_hist = ub.ddict(lambda: 0)\n for item in possible_prefixes:\n for i in range(1, len(item)):\n prefix_hist[tuple(item[0:i])] += 1\n prefixes2 = ['.'.join(ub.argmax(prefix_hist))]\n\n def add_prefix(items, prefix):\n return {prefix + k for k in items}\n def remove_prefix(items, prefix):\n return {k[len(prefix):] if k.startswith(prefix) else k for k in items}\n\n import itertools as it\n found_cand = []\n for i1, i2 in it.product(range(len(prefixes1) + 1), range(len(prefixes2) + 1)):\n if i1 == 0 and i2 == 0:\n continue\n # Very inefficient, we should be able to do better\n prefix1 = '.'.join(prefixes1[:i1])\n prefix2 = '.'.join(prefixes2[:i2])\n if prefix1:\n prefix1 = prefix1 + '.'\n if prefix2:\n prefix2 = prefix2 + '.'\n\n # We are allowed to remove a prefix from a set, add the other\n # prefix to the set, or remove and then add.\n set1_cand1 = remove_prefix(set1, prefix1)\n set1_cand2 = add_prefix(set1, prefix2)\n set1_cand3 = add_prefix(set1_cand1, prefix2)\n\n common1 = set1_cand1 & target_set2\n common2 = set1_cand2 & target_set2\n common3 = set1_cand3 & target_set2\n if common1:\n found_cand.append({\n 'transform': [('remove', prefix1)],\n 'value': len(common1),\n })\n if common2:\n found_cand.append({\n 'transform': [('add', prefix2)],\n 'value': len(common2),\n })\n if common3:\n found_cand.append({\n 'transform': [('remove', prefix1), ('add', prefix2)],\n 'value': len(common3),\n })\n if len(found_cand):\n found = max(found_cand, key=lambda x: x['value'])\n else:\n found = None\n return found\n\n\ndef maximum_common_ordered_subpaths(paths1, paths2, sep='.'):\n \"\"\"\n CommandLine:\n xdoctest -m /home/joncrall/code/netharn/netharn/initializers/functional.py maximum_common_ordered_subpaths:0 --profile && cat profile_output.txt\n xdoctest -m /home/joncrall/code/netharn/netharn/initializers/functional.py maximum_common_ordered_subpaths:0\n\n Example:\n >>> import torchvision\n >>> resnet50 = torchvision.models.resnet50()\n >>> paths1 = sorted(resnet50.state_dict().keys())[0:100]\n >>> paths2 = ['prefix.' + k for k in paths1]\n >>> paths2.append('extra_key')\n >>> subpaths1, subpaths2 = maximum_common_ordered_subpaths(paths1, paths2)\n >>> mapping = ub.dzip(subpaths1, subpaths2)\n >>> print('mapping = {}'.format(ub.repr2(mapping, nl=1)))\n\n Example:\n >>> rng = None\n >>> import kwarray\n >>> rng = kwarray.ensure_rng(rng)\n >>> def random_paths(rng, max_depth=10):\n >>> depth = rng.randint(1, max_depth)\n >>> parts = list(map(chr, rng.randint(ord('a'), ord('z'), size=depth)))\n >>> path = '.'.join(parts)\n >>> return path\n >>> n = 50\n >>> paths1 = sorted({random_paths(rng) for _ in range(n)})\n >>> paths2 = sorted({random_paths(rng) for _ in range(n)})\n >>> paths1 = paths1 + ['a.' + k for k in paths2[0:n // 3]]\n >>> subpaths1, subpaths2 = maximum_common_ordered_subpaths(paths1, paths2)\n >>> mapping = ub.dzip(subpaths1, subpaths2)\n >>> print('mapping = {}'.format(ub.repr2(mapping, nl=1)))\n\n Example:\n >>> from netharn.initializers.functional import * # NOQA\n >>> paths1 = [\n >>> 'stats',\n >>> 'z.mod.f.0.w',\n >>> 'a.z.mod.f.0.b',\n >>> 'z.mod.f.1.b',\n >>> 'z.mod.f.1.n',\n >>> 'z.mod.f.1.m',\n >>> 'z.mod.f.1.v',\n >>> 'z.mod.f.2.m',\n >>> 'z.mod.z.q'\n >>> ]\n >>> # paths1 = ['mod']\n >>> #\n >>> paths2 = [\n >>> 'stats',\n >>> 'bar.f.0.w',\n >>> 'bar.foo.extra.z.q',\n >>> 'bar.foo.extra',\n >>> 'bar.foo.extra.f.1.b',\n >>> 'bar.foo.extra.f.1.n',\n >>> 'bar.foo.extra.f.1.w',\n >>> 'bar.foo.extra.f.3.z', # FIXME we need to handle label comparision operators\n >>> # I think we allow labels to match if they have the same suffix\n >>> ]\n >>> sep = '.'\n >>> subpaths1, subpaths2 = maximum_common_ordered_subpaths(paths1, paths2, sep)\n >>> mapping = ub.dzip(subpaths1, subpaths2)\n >>> print('mapping = {}'.format(ub.repr2(mapping, nl=1)))\n\n\n Example:\n >>> sep = '.'\n >>> paths1 = ['a.b']\n >>> paths2 = ['a.b']\n >>> subpaths1, subpaths2 = maximum_common_ordered_subpaths(paths1, paths2, sep)\n >>> mapping = ub.dzip(subpaths1, subpaths2)\n >>> print('mapping = {}'.format(ub.repr2(mapping, nl=1)))\n >>> paths1 = ['c.a.b']\n >>> paths2 = ['a.b']\n >>> subpaths1, subpaths2 = maximum_common_ordered_subpaths(paths1, paths2, sep)\n >>> mapping = ub.dzip(subpaths1, subpaths2)\n >>> print('mapping = {}'.format(ub.repr2(mapping, nl=1)))\n >>> paths1 = ['c.a.b', 'c.a.e', 'c.a.q']\n >>> paths2 = ['a.b', 'c.e', 'c.a', 'a.q']\n >>> subpaths1, subpaths2 = maximum_common_ordered_subpaths(paths1, paths2, sep)\n >>> mapping = ub.dzip(subpaths1, subpaths2)\n >>> print('mapping = {}'.format(ub.repr2(mapping, nl=1)))\n \"\"\"\n import networkx as nx\n\n # the longest common balanced sequence problem\n def _affinity(tok1, tok2):\n score = 0\n for t1, t2 in zip(tok1[::-1], tok2[::-1]):\n if t1 == t2:\n score += 1\n else:\n break\n return score\n # return tok1[-1] == tok2[-1]\n node_affinity = _affinity\n # import operator\n # eq = operator.eq\n\n def paths_to_tree(paths):\n tree = nx.OrderedDiGraph()\n for path in sorted(paths):\n parts = tuple(path.split(sep))\n node_path = []\n for i in range(1, len(parts) + 1):\n node = parts[0:i]\n tree.add_node(node)\n tree.nodes[node]['label'] = node[-1]\n node_path.append(node)\n for u, v in ub.iter_window(node_path, 2):\n tree.add_edge(u, v)\n return tree\n\n tree1 = paths_to_tree(paths1)\n tree2 = paths_to_tree(paths2)\n\n # _print_forest(tree1)\n # _print_forest(tree2)\n\n # if 0:\n # DiGM = isomorphism.DiGraphMatcher(tree1, tree2)\n # DiGM.is_isomorphic()\n # list(DiGM.subgraph_isomorphisms_iter())\n\n from netharn.initializers import _nx_extensions\n subtree1, subtree2 = _nx_extensions.maximum_common_ordered_tree_embedding(tree1, tree2, node_affinity=node_affinity)\n # subtree1, subtree2 = _nx_extensions.maximum_common_ordered_subtree_isomorphism(tree1, tree2, node_affinity=node_affinity)\n\n subpaths1 = [sep.join(node) for node in subtree1.nodes if subtree1.out_degree[node] == 0]\n subpaths2 = [sep.join(node) for node in subtree2.nodes if subtree2.out_degree[node] == 0]\n return subpaths1, subpaths2\n"
] |
[
[
"torch.cat"
],
[
"numpy.minimum"
]
] |
pambot/HistModPaper
|
[
"c3fe01046523dd3bbb4fb66a345c4e94a43ee121"
] |
[
"plot_regr_compare.py"
] |
[
"# load modules\nimport sys\nimport glob\nimport numpy as np\nimport pandas as pd\nimport scipy.stats\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport cPickle as pickle\n\n\nplt.style.use('ggplot')\n\ncells = ['Gm12878', 'H1hesc', 'Helas3', 'Hepg2', 'Huvec', 'K562', 'Nhek']\n\nfolder = {\n1:'../predict1/results/',\n2:'../predict2/results/',\n3:'../predict3/results/',\n}\n\nplt.figure()\ncdict = {'full': '#003366', 'res':'#900000'}\nfor ttype in ['full', 'res']:\n v_means = []\n for fol in range(1,4):\n vals = []\n for cell in cells:\n data = pickle.load(open(folder[fol]+'rgrExpr_'+ttype+'_'+cell+'_1.0.pkl', 'rb'))\n vals.append(data['r_value'])\n \n v_means.append(np.array(vals))\n \n if ttype=='full':\n pos = np.array([0.5, 2.0, 3.5])\n else:\n pos = np.array([1.0, 2.5, 4.0])\n violin = plt.violinplot(v_means, pos, widths=0.3, showmeans=True, showextrema=False)\n plt.setp(violin['bodies'], facecolor=cdict[ttype], edgecolor=cdict[ttype])\n for key in ['cmeans']:\n\t plt.setp(violin[key], color=cdict[ttype], linewidth='3', alpha=0.5)\n\nblue_patch = mpatches.Patch(color='#003366', label='Full data') \nred_patch = mpatches.Patch(color='#900000', label='Residuals')\nplt.legend(handles=[blue_patch, red_patch])\nplt.xticks([0.75, 2.25, 3.75], ['Binary', 'ChromStates', 'Signals'], fontsize=16, color='k')\nplt.yticks(np.arange(3, 11)/10.0, fontsize=16, color='k')\nplt.ylabel('Pearson R', fontsize=18, color='k')\nplt.title('Expression', fontsize=18)\nplt.savefig('figures/r2Expr.png', bbox_inches='tight')\n\n\nplt.figure()\ncdict = {'full': '#003366', 'res':'#900000'}\nfor ttype in ['full', 'res']:\n v_means = []\n for fol in range(1,4):\n vals = []\n for cell in cells:\n data = pickle.load(open(folder[fol]+'rgrSpec_'+ttype+'_'+cell+'_1.0.pkl', 'rb'))\n vals.append(data['r_value'])\n \n v_means.append(np.array(vals))\n \n if ttype=='full':\n pos = np.array([0.5, 2.0, 3.5])\n else:\n pos = np.array([1.0, 2.5, 4.0])\n violin = plt.violinplot(v_means, pos, widths=0.3, showmeans=True, showextrema=False)\n plt.setp(violin['bodies'], facecolor=cdict[ttype], edgecolor=cdict[ttype])\n for key in ['cmeans']:\n\t plt.setp(violin[key], color=cdict[ttype], linewidth='3', alpha=0.5)\n\nblue_patch = mpatches.Patch(color='#003366', label='Full data') \nred_patch = mpatches.Patch(color='#900000', label='Residuals')\nplt.legend(handles=[blue_patch, red_patch])\nplt.xticks([0.75, 2.25, 3.75], ['Binary', 'ChromStates', 'Signals'], fontsize=16, color='k')\nplt.yticks(np.arange(3, 11)/10.0, fontsize=16, color='k')\nplt.ylabel('Pearson R', fontsize=18, color='k')\nplt.title('Specificity', fontsize=18)\nplt.savefig('figures/r2Spec.png', bbox_inches='tight')\n\n\nfrom scipy.stats import ttest_ind\nttest_ind(v_means[1], v_means[2])\n\n\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.patches.Patch",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.violinplot",
"matplotlib.pyplot.xticks",
"numpy.array",
"scipy.stats.ttest_ind",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure"
]
] |
gian1312/suchen
|
[
"df863140fd8df1ac2e195cbdfa4756f09f962270"
] |
[
"tensorforce/models/q_naf_model.py"
] |
[
"# Copyright 2017 reinforce.io. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom six.moves import xrange\n\nimport tensorflow as tf\n\nfrom tensorforce import util, TensorForceError\nfrom tensorforce.models import QModel\nfrom tensorforce.core.networks import Linear\n\n\nclass QNAFModel(QModel):\n\n def __init__(\n self,\n states,\n actions,\n scope,\n device,\n saver,\n summarizer,\n execution,\n batching_capacity,\n variable_noise,\n states_preprocessing,\n actions_exploration,\n reward_preprocessing,\n update_mode,\n memory,\n optimizer,\n discount,\n network,\n distributions,\n entropy_regularization,\n target_sync_frequency,\n target_update_weight,\n double_q_model,\n huber_loss\n ):\n if any(action['type'] != 'float' or 'min_value' in action or 'max_value' in action for action in actions.values()):\n raise TensorForceError(\"Only unconstrained float actions valid for NAFModel.\")\n\n super(QNAFModel, self).__init__(\n states=states,\n actions=actions,\n scope=scope,\n device=device,\n saver=saver,\n summarizer=summarizer,\n execution=execution,\n batching_capacity=batching_capacity,\n variable_noise=variable_noise,\n states_preprocessing=states_preprocessing,\n actions_exploration=actions_exploration,\n reward_preprocessing=reward_preprocessing,\n update_mode=update_mode,\n memory=memory,\n optimizer=optimizer,\n discount=discount,\n network=network,\n distributions=distributions,\n entropy_regularization=entropy_regularization,\n target_sync_frequency=target_sync_frequency,\n target_update_weight=target_update_weight,\n double_q_model=double_q_model,\n huber_loss=huber_loss\n )\n\n def initialize(self, custom_getter):\n super(QNAFModel, self).initialize(custom_getter)\n\n self.state_values = dict()\n self.l_entries = dict()\n for name, action in self.actions_spec.items():\n num_action = util.prod(action['shape'])\n self.state_values[name] = Linear(size=num_action, scope='state-value')\n self.l_entries[name] = Linear(size=(num_action * (num_action - 1) // 2), scope='l-entries')\n\n def tf_q_value(self, embedding, distr_params, action, name):\n num_action = util.prod(self.actions_spec[name]['shape'])\n\n mean, stddev, _ = distr_params\n flat_mean = tf.reshape(tensor=mean, shape=(-1, num_action))\n flat_stddev = tf.reshape(tensor=stddev, shape=(-1, num_action))\n\n # Advantage computation\n # Network outputs entries of lower triangular matrix L\n if self.l_entries[name] is None:\n l_matrix = flat_stddev\n l_matrix = tf.exp(l_matrix)\n else:\n l_matrix = tf.map_fn(fn=tf.diag, elems=flat_stddev)\n\n l_entries = self.l_entries[name].apply(x=embedding)\n l_entries = tf.exp(l_entries)\n offset = 0\n columns = list()\n for zeros, size in enumerate(xrange(num_action - 1, -1, -1), 1):\n column = tf.pad(tensor=l_entries[:, offset: offset + size], paddings=((0, 0), (zeros, 0)))\n columns.append(column)\n offset += size\n\n l_matrix += tf.stack(values=columns, axis=1)\n\n # P = LL^T\n p_matrix = tf.matmul(a=l_matrix, b=tf.transpose(a=l_matrix, perm=(0, 2, 1)))\n # A = -0.5 (a - mean)P(a - mean)\n flat_action = tf.reshape(tensor=action, shape=(-1, num_action))\n difference = flat_action - flat_mean\n advantage = tf.matmul(a=p_matrix, b=tf.expand_dims(input=difference, axis=2))\n advantage = tf.matmul(a=tf.expand_dims(input=difference, axis=1), b=advantage)\n advantage = tf.squeeze(input=(-advantage / 2.0), axis=2)\n\n # Q = A + V\n # State-value function\n state_value = self.state_values[name].apply(x=embedding)\n q_value = state_value + advantage\n\n return tf.reshape(tensor=q_value, shape=((-1,) + self.actions_spec[name]['shape']))\n\n def tf_loss_per_instance(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None):\n # Michael: doubling this function because NAF needs V'(s) not Q'(s), see comment below\n embedding = self.network.apply(x=states, internals=internals, update=update)\n\n # Both networks can use the same internals, could that be a problem?\n # Otherwise need to handle internals indices correctly everywhere\n target_embedding = self.target_network.apply(\n x=next_states,\n internals=next_internals,\n update=update\n )\n\n deltas = list()\n for name, distribution in self.distributions.items():\n target_distribution = self.target_distributions[name]\n\n distr_params = distribution.parameterize(x=embedding)\n target_distr_params = target_distribution.parameterize(x=target_embedding)\n\n q_value = self.tf_q_value(embedding=embedding, distr_params=distr_params, action=actions[name], name=name)\n\n # Notice, this is V', not Q' because NAF outputs V(s) separately\n next_state_value = target_distribution.state_value(distr_params=target_distr_params)\n\n delta = self.tf_q_delta(q_value=q_value, next_q_value=next_state_value, terminal=terminal, reward=reward)\n\n collapsed_size = util.prod(util.shape(delta)[1:])\n delta = tf.reshape(tensor=delta, shape=(-1, collapsed_size))\n\n deltas.append(delta)\n\n # Surrogate loss as the mean squared error between actual observed rewards and expected rewards\n loss_per_instance = tf.reduce_mean(input_tensor=tf.concat(values=deltas, axis=1), axis=1)\n\n if self.huber_loss is not None and self.huber_loss > 0.0:\n return tf.where(\n condition=(tf.abs(x=loss_per_instance) <= self.huber_loss),\n x=(0.5 * tf.square(x=loss_per_instance)),\n y=(self.huber_loss * (tf.abs(x=loss_per_instance) - 0.5 * self.huber_loss))\n )\n else:\n return tf.square(x=loss_per_instance)\n\n def tf_regularization_losses(self, states, internals, update):\n losses = super(QNAFModel, self).tf_regularization_losses(\n states=states,\n internals=internals,\n update=update\n )\n\n for state_value in self.state_values.values():\n regularization_loss = state_value.regularization_loss()\n if regularization_loss is not None:\n if 'state-values' in losses:\n losses['state-values'] += regularization_loss\n else:\n losses['state-values'] = regularization_loss\n\n for l_entries in self.l_entries.values():\n regularization_loss = l_entries.regularization_loss()\n if regularization_loss is not None:\n if 'l-entries' in losses:\n losses['l-entries'] += regularization_loss\n else:\n losses['l-entries'] = regularization_loss\n\n return losses\n\n def get_variables(self, include_submodules=False, include_nontrainable=False):\n model_variables = super(QNAFModel, self).get_variables(\n include_submodules=include_submodules,\n include_nontrainable=include_nontrainable\n )\n\n state_values_variables = [\n variable for name in sorted(self.state_values)\n for variable in self.state_values[name].get_variables()\n ]\n model_variables += state_values_variables\n\n l_entries_variables = [\n variable for name in sorted(self.l_entries)\n for variable in self.l_entries[name].get_variables()\n ]\n model_variables += l_entries_variables\n\n return model_variables\n"
] |
[
[
"tensorflow.transpose",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.reshape",
"tensorflow.exp",
"tensorflow.squeeze",
"tensorflow.expand_dims",
"tensorflow.map_fn",
"tensorflow.square",
"tensorflow.pad",
"tensorflow.abs"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.