repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
jkulhanek/robot-visual-navigation
[ "ddc63df38d326e9225981bf89608043c77d950e8" ]
[ "python/environment/image_collection_environment.py" ]
[ "import os\nimport h5py\nimport gym\nfrom gym.wrappers import TimeLimit\nimport numpy as np\nimport random\n\nACTION_LIST = []\n\n\ndef move_position(position, mult=1):\n rotation = position[2]\n if rotation == 0:\n return (position[0] + 1 * mult, position[1], rotation)\n elif rotation == 1:\n return (position[0], position[1] + 1 * mult, rotation)\n elif rotation == 2:\n return (position[0] - 1 * mult, position[1], rotation)\n elif rotation == 3:\n return (position[0], position[1] - 1 * mult, rotation)\n\n\ndef compute_complexity_distance(pointa, pointb):\n ax, ay, ar = pointa\n bx, by, br = pointb\n return abs(ax - bx) + abs(ay - by) + abs((ar - br + 1) % 4 - 1) * 2\n\n\nclass ImageEnvironmentWrapper(gym.ObservationWrapper):\n def __init__(self, env):\n super(ImageEnvironmentWrapper, self).__init__(env)\n self._stds = [51.764749543249216, 51.764749543249216, 1064.4242973195394, 1064.4242973195394]\n self._means = [172.50841217557178, 172.50841217557178, 980.5952, 980.5952]\n self.observation_space = gym.spaces.Tuple(\n tuple([gym.spaces.Box(-1.0, 1.0, x.shape, dtype=np.float32) for x in self.observation_space.spaces]))\n\n def observation(self, observation):\n return [(o.astype(np.float32) - m) / s for o, m, s in zip(list(observation), self._means, self._stds)]\n\n\nclass ImageEnvironment(gym.Env):\n metadata = {'render.modes': ['rgb_array']}\n\n def __init__(self, screen_size=(84, 84), dataset_name='turtle_room', path=None, has_end_action=False, augment_images=True, **kwargs):\n super(ImageEnvironment, self).__init__(**kwargs)\n if path is None:\n path = (os.environ['DATASETS_PATH'] if 'DATASETS_PATH' in os.environ else os.path.expanduser(\n '~/datasets')) + '/%s/grid_compiled.hdf5' % dataset_name\n self.path = path\n self.has_end_action = has_end_action\n self._file = None\n self.augment_images = augment_images\n self._datasetSelector = \"%sx%s\" % screen_size\n\n height, width = screen_size\n self.action_space = gym.spaces.Discrete(4 if not has_end_action else 5)\n self.observation_space = gym.spaces.Tuple((\n gym.spaces.Box(0, 255, (height, width, 3), dtype=np.uint8),\n gym.spaces.Box(0, 225, (height, width, 3), dtype=np.uint8),\n gym.spaces.Box(0, 225, (height, width, 1), dtype=np.uint16),\n gym.spaces.Box(0, 225, (height, width, 1), dtype=np.uint16)))\n\n self._last_observation = None\n self._initialized = False\n self._random = random.Random()\n self._next_task = None\n self._physicalPosition = None\n self.complexity = None\n\n def _initialize(self):\n if self._initialized:\n return False\n if self._file is None:\n self._file = h5py.File(self.path, 'r')\n self._positions = self._file[\"grid\"]\n self._physicalPositions = self._file[\"positions\"]\n self._images = self._file[self._datasetSelector + (\"/augmented_images\" if self.augment_images else \"/images\")]\n self._depths = self._file[self._datasetSelector + (\"/augmented_depths\" if self.augment_images else \"/depths\")]\n wid, hei, _, nsamples = self._positions.shape\n self._allowedPoints = set()\n self._goalPoints = []\n self._nongoalPoints = []\n for x in range(wid):\n for y in range(hei):\n isany = all(any(self._positions[x, y, r, i] != -1 for i in range(nsamples)) for r in range(4))\n if isany:\n self._allowedPoints.add((x, y))\n\n for (x, y) in self._allowedPoints:\n for r in range(4):\n xn, yn, _ = move_position((x, y, r))\n if (xn, yn) not in self._allowedPoints:\n self._goalPoints.append((x, y, r))\n else:\n self._nongoalPoints.append((x, y, r))\n self._initialized = True\n\n # Prepare complexity lookup\n self._complexityCache = dict()\n for g in self._goalPoints:\n cmpCache = []\n self._complexityCache[g] = cmpCache\n for i in range(wid + hei + 4 + 1):\n startPoints = []\n cmpCache.append(startPoints)\n for x in self._nongoalPoints:\n dist = compute_complexity_distance(g, x)\n if dist == i:\n startPoints.append(x)\n\n def set_complexity(self, complexity=None):\n self.complexity = complexity\n\n def step(self, action):\n assert self._initialized\n self._position, collided = self._move(self._position, action)\n terminal = self.is_goal(self._position)\n reward = 1.0 if terminal else (-0.01 if collided else 0)\n if self.has_end_action:\n if action == 4:\n if terminal:\n reward = 1.0\n terminal = True\n else:\n reward = 0\n terminal = True\n else:\n reward = (-0.01 if collided else 0)\n terminal = False\n obs = None if terminal else self._observe()\n self._last_observation = obs if obs is not None else tuple([np.copy(x) for x in list(self._last_observation)])\n return self._last_observation, reward, terminal, dict()\n\n def _ensure_in_grid(self, position):\n x, y, _ = position\n return (x, y) in self._allowedPoints\n\n def _move(self, position, action):\n x, y, r = position\n if action == 0:\n # Forward\n npos = move_position(position, 1)\n if self._ensure_in_grid(npos):\n return npos, False\n else:\n return position, True\n elif action == 1:\n # Backward\n npos = move_position(position, -1)\n if self._ensure_in_grid(npos):\n return npos, False\n else:\n return position, True\n elif action == 2:\n # Left\n npos = (x, y, (r + 1) % 4)\n return npos, True\n elif action == 3:\n # Right\n npos = (x, y, (r - 1) % 4)\n return npos, True\n else:\n return position, False\n\n def _observe(self):\n x, y, r = self._position\n index = self._positions[x, y, r, self._random.randrange((self._positions[x, y, r] != -1).sum())]\n self._physicalPosition = (self._physicalPositions[index], self._physicalPositions[self._goalIndex])\n indexg = self._goalIndex\n if self.augment_images:\n irender = self._random.randrange(self._images.shape[1])\n return (\n self._images[index, irender, ...],\n self._images[indexg, self._goalRender, ...],\n np.expand_dims(self._depths[index, irender, ...], 2),\n np.expand_dims(self._depths[indexg, self._goalRender, ...], 2)\n )\n else:\n return (\n self._images[index, ...],\n self._images[indexg, ...],\n np.expand_dims(self._depths[index, ...], 2),\n np.expand_dims(self._depths[indexg, ...], 2)\n )\n\n def set_next_task(self, position, goal):\n self._next_task = (position, goal)\n\n @property\n def position(self):\n c, g = self._physicalPosition\n return list(c[:3]), list(g[:3])\n\n def reset(self):\n self._initialize()\n # Sample goal\n self._goal = self.sample_goal() if self._next_task is None else self._next_task[1]\n xg, yg, rg = self._goal\n self._goalIndex = self._positions[xg, yg, rg, self._random.randrange((self._positions[xg, yg, rg] != -1).sum())]\n if self.augment_images:\n self._goalRender = self._random.randrange(self._images.shape[1])\n self._position = self.sample_position(self._goal) if self._next_task is None else self._next_task[0]\n self._last_observation = self._observe()\n self._next_task = None\n return self._last_observation\n\n def sample_goal(self):\n # Sample a goal on the edge of the grid\n return self._random.choice(self._goalPoints)\n\n def sample_position(self, goal):\n choiceArray = None\n if self.complexity is None:\n choiceArray = self._nongoalPoints\n else:\n choiceArray = []\n for i in range(min(len(self._complexityCache[goal]), self.complexity + 1)):\n choiceArray.extend(self._complexityCache[goal][i])\n\n return self._random.choice(choiceArray)\n\n def is_goal(self, position):\n diff = abs(self._goal[0] - position[0]) + abs(self._goal[1] - position[1])\n return diff <= 1 and self._goal[2] == position[2]\n\n def seed(self, seed=None):\n self._random.seed(seed)\n\n def close(self):\n if self._file is not None:\n self._file.close()\n self._file = None\n self._initialized = False\n\n def browse(self):\n from .browser import GoalKeyboardAgent\n agent = GoalKeyboardAgent(self, [0, 1, 2, 3])\n agent.show()\n\n def render(self, mode='rgb_array', close=False):\n if mode == 'rgb_array':\n return self._last_observation[0]\n # elif mode is 'human':\n # pop up a window and render\n else:\n super(ImageEnvironment, self).render(mode=mode) # just raise an exception\n\n\nif __name__ == \"__main__\":\n from PIL import Image\n import matplotlib\n matplotlib.use(\"TkAgg\")\n import matplotlib.pyplot as plt\n\n w = TimeLimit(ImageEnvironment(), max_episode_steps=300)\n i = Image.open(\"../assets/images/63.png\")\n plt.imshow(w._image_aug(images=[np.array(i)])[0])\n plt.show()\n" ]
[ [ "numpy.expand_dims", "matplotlib.use", "numpy.copy", "numpy.array", "matplotlib.pyplot.show" ] ]
am1tyadav/teal
[ "6599b937c03934f3ef362cb16550fbe31336f57b" ]
[ "teal/mel_to.py" ]
[ "import tensorflow as tf\nfrom tensorflow.keras import layers\nfrom teal.utils import get_mel_filter_bank\n\n\nclass MelSpecToSpectrogram(layers.Layer):\n def __init__(self, sample_rate: int, n_fft: int, n_mels: int, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self._sample_rate = sample_rate\n self._n_fft = n_fft\n self._n_mels = n_mels\n self._lin_to_mel_matrix = None\n\n def build(self, input_shape):\n self._lin_to_mel_matrix = get_mel_filter_bank(\n self._sample_rate, self._n_fft, self._n_mels\n )\n\n def call(self, inputs, *args, **kwargs):\n return tf.matmul(inputs, tf.transpose(self._lin_to_mel_matrix))\n\n def get_config(self):\n config = super().get_config()\n config.update(\n {\n \"_sample_rate\": self._sample_rate,\n \"_n_fft\": self._n_fft,\n \"_n_mels\": self._n_mels,\n }\n )\n return config\n" ]
[ [ "tensorflow.transpose" ] ]
Zhi-ChaoZhao/NRSurMemory_7qd4
[ "0f49530e7602f136f99879fbb4c9bb6e2f48cdc0" ]
[ "surfinBH/_fit_evaluators/fit_Mem_7qd4.py" ]
[ "import numpy as np\nimport sys\nfrom scipy.interpolate import InterpolatedUnivariateSpline as spline\nfrom surfinBH import surfinBH\nfrom surfinBH._lal_spin_evolution import lal_spin_evloution_wrapper\nimport surfinBH._utils as utils\nimport warnings\n\n#=============================================================================\nclass Mem_Fit7dq4(surfinBH.SurFinBH):\n \"\"\" A class for the NRSur7dq4Remnant model presented in Zhi-Chao Zhao, Xiaolin Liu, Zhoujian Cao, et al., [arxiv:ToBePublished], hereafter referred to as THE PAPER.\n\n This model predicts the final memory(log10(memory) and its GPR error) , for the remnants of precessing\n binary black hole systems. The fits are done using Gaussian Process\n Regression (GPR) and also provide an error estimate along with the fit\n value.\n\n This model has been trained in the parameter space:\n q <= 4, |chiA| <= 0.8, |chiB| <= 0.8\n\n However, it extrapolates reasonably to:\n q <= 6, |chiA| <= 1, |chiB| <= 1\n\n =========================================================================\n Usage:\n\n import surfinBH\n\n # Load the fit\n fit = surfinBH.LoadFits('NRSur7dq4Remnant')\n\n We provide the following call methods:\n # remnant log10memory and 1-sigma error estimate\n log10memory, log10memory_err = fit.memory(q, chiA, chiB, **kwargs)\n\n The arguments for each of these call methods are as follows:\n Arguments:\n q: Mass ratio (q = mA/mB >= 1)\n\n chiA: Dimensionless spin vector of the heavier black hole at\n reference epoch.\n chiB: Dimensionless spin vector of the lighter black hole at\n reference epoch.\n\n This follows the same convention as LAL, where the spin\n components are defined as:\n \\chi_z = \\chi \\cdot \\hat{L}, where L is the orbital angular\n momentum vector at the epoch.\n \\chi_x = \\chi \\cdot \\hat{n}, where n = body2 -> body1 is the\n separation vector at the epoch. body1 is the heavier body.\n \\chi_y = \\chi \\cdot \\hat{L \\cross n}.\n These spin components are frame-independent as they are defined\n using vector inner products. This is equivalent to specifying\n the spins in the coorbital frame at the reference epoch. See\n THE PAPER for a definition of the coorbital frame.\n\n\n Optional arguments:\n\n omega0: Orbital frequency used to set the reference epoch.\n Default: None.\n\n If omega0 is None, the reference epoch is assumed to be at\n t=-100 M from the peak of the waveform, see THE PAPER for\n definition of the peak.\n\n If 'omega0' is given, the reference epoch is take to be the\n time at which the orbital frequency in the coprecessing frame\n equals omega0. omega0 should be in dimensionless units of\n rad/M, where M is the total mass.\n\n See THE PAPER for how the orbital frequency is\n computed as well as the definition of the coprecessing frame.\n\n allow_extrap:\n If False, raises a warning when q > 4.1 or |chiA|,|chiB| > 0.81,\n and raises an error when q > 6.1 or |chiA|,|chiB| > 1.\n If True, allows extrapolation to any q and |chiA|,|chiB| <= 1.\n Use at your own risk.\n Default: False.\n\n Optional PN evolution arguments:\n\n If the omega0 option is used, the spins need to be evolved from omega0\n until t=-100M, where the fits will be evaluated. For the late inspiral\n part, we use the internal spin evolution of NRSur7dq4 (also described\n in THE PAPER), which is very accurate. However, this surrogate is not\n long enough for small values of omega0 as it only has data starting at\n t=-4300M. Therefore, whenever the input omega0 is smaller than\n omega_switch_IG (defined below), we use PN evolution to go from omega0\n to about t=-4300M, beyond which we use NRSur7dq4 for spin evolution.\n\n PN_approximant:\n Approximant used to do the PN spin evolution. Choose from\n 'SpinTaylorT4', 'SpinTaylorT1' or 'SpinTaylorT5'.\n Default: 'SpinTaylorT4'.\n\n PN_dt:\n Dimensionless time step size in units of M, used for the PN\n evolution. You may need to increase this if omega0 is very low.\n Default: 0.1\n\n PN_spin_order:\n Twice the PN order of spin effects. E.g., use 7 for 3.5PN.\n Default: 7\n\n PN_phase_order:\n Twice the PN order in phase. E.g., use 7 for 3.5PN.\n Default: 7\n\n t_sur_switch:\n The dimensionless time (from the peak) at which we switch from PN\n to the surrogate. Should be something larger than -4300.\n Default: -4000.\n\n omega_switch_IG:\n Initial guess for dimensionless orbital frequency, using which the\n switch will be made from PN to NRSur7dq4. This should be large\n enough to work for generic parts of the surrogate parameter space.\n You may need to increase this if the NRSur7dq4 model raises an\n exception like: \"Got omega_ref=0.03 < 0.031=omega_0, too small!\"\n Default: 0.03\n\n How t_sur_switch and omega_switch_IG work: The PN data is first\n generated starting at omega0, then the PN spins at omega_switch_IG\n are used to generate the NRSur7dq4 dynamics. NRSur7dq4 integrate\n the dynamics both forwards and backwards, so it will have omega and\n spins as a time series starting from -4300M. This is used to pick\n the omega0_sur and spins at t_sur_switch. Then the surrogate\n is reevaluated using omega0_sur and spins at t_sur_switch, thus\n ensuring that the switch always happens at t_sur_switch, even if\n omega_switch_IG corresponds to a later time.\n\n Inertial frame for returned values:\n\n Note that the default reference epoch corresponds to t=-100M, but\n if omega0 is given the reference epoch is taken to be the time at\n which the orbital frequency in the coprecessing frame is equal to\n omega0. This agrees with the LAL convention. See LIGO DCC document\n T1800226 for the LAL frame diagram.\n \"\"\"\n\n #-------------------------------------------------------------------------\n def __init__(self, name, load_nrsur=False):\n\n # Param limits beyond which to raise a warning\n soft_param_lims = {\n 'q': 4.1,\n 'chiAmag': 0.81,\n 'chiBmag': 0.81,\n }\n\n # Param limits beyond which to raise an error\n hard_param_lims = {\n 'q': 6.1,\n 'chiAmag': 1,\n 'chiBmag': 1,\n }\n\n super(Mem_Fit7dq4, self).__init__(name, soft_param_lims, hard_param_lims)\n self.nrsur = None\n self.fitnode_time = -100 # Time at which the fits are constructed\n\n #-------------------------------------------------------------------------\n def _load_NRSur7dq4(self):\n import gwsurrogate\n from gwsurrogate.new.precessing_surrogate import splinterp_many\n self.nrsur = gwsurrogate.LoadSurrogate('NRSur7dq4')\n self.splinterp_many = splinterp_many\n\n #-------------------------------------------------------------------------\n def _load_fits(self, h5file):\n \"\"\" Loads fits from h5file and returns a dictionary of fits. \"\"\"\n fits = {}\n for key in ['memory']:\n fits[key] = self._load_scalar_fit(fit_key=key, h5file=h5file)\n return fits\n\n #-------------------------------------------------------------------------\n def _extra_regression_kwargs(self):\n \"\"\" List of additional kwargs to use in regression tests.\n \"\"\"\n\n extra_args = []\n extra_args.append({\n 'omega0': 5e-3,\n 'PN_approximant': 'SpinTaylorT4',\n 'PN_dt': 0.1,\n 'PN_spin_order': 7,\n 'PN_phase_order': 7,\n })\n\n\n extra_args.append({\n 'omega0': 6e-3,\n 'PN_approximant': 'SpinTaylorT1',\n 'PN_dt': 0.5,\n 'PN_spin_order': 5,\n 'PN_phase_order': 7,\n })\n\n extra_args.append({\n 'omega0': 7e-3,\n 'PN_approximant': 'SpinTaylorT5',\n 'PN_dt': 1,\n 'PN_spin_order': 7,\n 'PN_phase_order': 5,\n })\n\n # These should be pure NRSur7dq4\n extra_args.append({'omega0': 3e-2})\n extra_args.append({'omega0': 5e-2})\n\n return extra_args\n\n #-------------------------------------------------------------------------\n def _get_fit_params(self, x, fit_key):\n \"\"\" Transforms the input parameter to fit parameters for the 7dq4 model.\n That is, maps from\n x = [q, chiAx, chiAy, chiAz, chiBx, chiBy, chiBz]\n fit_params = [np.log(q), chiAx, chiAy, chiHat, chiBx, chiBy, chi_a]\n\n chiHat is defined in Eq.(3) of 1508.07253, but with chiAz and chiBz instead\n of chiA and chiB.\n chi_a = (chiAz - chiBz)/2.\n \"\"\"\n q, chiAz, chiBz = x[0], x[3], x[6]\n eta = q/(1.+q)**2\n chi_wtAvg = (q*chiAz+chiBz)/(1.+q)\n chiHat = (chi_wtAvg - 38.*eta/113.*(chiAz + chiBz))/(1. - 76.*eta/113.)\n chi_a = (chiAz - chiBz)/2.\n\n fit_params = [np.log(q), x[1], x[2], chiHat, x[4], x[5], chi_a]\n return fit_params\n\n def _get_coorbital_frame_spins_at_idx(self, chiA, chiB, omega, lNhat, phi, \\\n idx):\n \"\"\" Computes PN spins and dynamics at a given idx.\n\n Inputs:\n chiA: Dimless spin evolution of BhA in inertial frame.\n chiB: Dimless spin evolution of BhB in inertial frame.\n omega: Orbital frequency evolution in dimless units.\n lNhat: Orbital angular momentum direction evolution.\n phi: Orbital phase evolution.\n idx: Index for output.\n\n Outputs (all are time series):\n chiA_at_idx_coorb: Spin of BhA at idx, in coorbital frame.\n chiB_at_idx_coorb: Spin of BhB at idx, in coorbital frame.\n quat_copr_at_idx: Coprecessing frame quaternion at idx.\n phi_at_idx: Orbital phase in the coprecessing frame at idx.\n omega_at_idx Dimensionless orbital frequency at idx.\n\n The inertial frame is assumed to be aligned to the coorbital frame at\n the first index.\n \"\"\"\n\n # Compute omega, inertial spins, angular momentum direction and orbital\n # phase at idx\n omega_at_idx = omega[idx]\n chiA_at_idx = chiA[idx]\n chiB_at_idx = chiB[idx]\n lNhat_at_idx = lNhat[idx]\n phi_at_idx = phi[idx]\n\n # Align the z-direction along orbital angular momentum direction\n # at idx. This moves us in to the coprecessing frame.\n quat_copr_at_idx = utils.alignVec_quat(lNhat_at_idx)\n chiA_at_idx_copr = utils.transformTimeDependentVector(\n np.array([quat_copr_at_idx]).T,\n np.array([chiA_at_idx]).T, inverse=1).T[0]\n chiB_at_idx_copr = utils.transformTimeDependentVector(\n np.array([quat_copr_at_idx]).T,\n np.array([chiB_at_idx]).T, inverse=1).T[0]\n\n # get coorbital frame spins at idx\n chiA_at_idx_coorb = utils.rotate_in_plane(chiA_at_idx_copr, phi_at_idx)\n chiB_at_idx_coorb = utils.rotate_in_plane(chiB_at_idx_copr, phi_at_idx)\n\n return chiA_at_idx_coorb, chiB_at_idx_coorb, quat_copr_at_idx, \\\n phi_at_idx, omega_at_idx\n\n def _get_PN_spins_at_surrogate_start(self, PN_approximant, q, omega0, \\\n chiA0, chiB0, PN_dt, PN_spin_order, PN_phase_order, \\\n omega_switch_IG, t_sur_switch):\n \"\"\" Computes PN spins and frame dynamics at a time close to the start\n of the surrogate waveform model.\n\n Generates PN spins and frame quantities using spins at omega0.\n Then uses the PN spins at omega_switch_IG to generate the surrogate\n dynamics.\n Then gets the surrogate orbital frequency at t_sur_switch, let's\n call this omega_init_sur.\n Then use the PN spins at omega_init_sur to regenerate the surrogate\n dynamics.\n \"\"\"\n\n # Get PN spin evolution starting at omega0\n omega_PN, phi_PN, chiA_PN, chiB_PN, lNhat_PN, e1_PN \\\n = lal_spin_evloution_wrapper(PN_approximant, q, omega0, \\\n chiA0, chiB0, PN_dt, PN_spin_order, PN_phase_order)\n\n # Get PN coorbital frame spins and frame dynamics at\n # omega_PN=omega_switch_IG\n idx = np.argmin(np.abs(omega_PN - omega_switch_IG))\n chiA_PN_at_idx_coorb, chiB_PN_at_idx_coorb, quat_PN_copr_at_idx, \\\n phi_PN_at_idx, omega_PN_at_idx \\\n = self._get_coorbital_frame_spins_at_idx(chiA_PN, chiB_PN, \\\n omega_PN, lNhat_PN, phi_PN, idx)\n\n # Now evaluate the surrogate dynamics (both forwards and backwards)\n # using PN spins at omega_switch_IG\n quat_sur, orbphase_sur, chiA_copr_sur, chiB_copr_sur \\\n = self.nrsur._sur_dimless.get_dynamics(q, chiA_PN_at_idx_coorb, \\\n chiB_PN_at_idx_coorb, init_quat=quat_PN_copr_at_idx,\n init_orbphase=phi_PN_at_idx, omega_ref=omega_switch_IG)\n dyn_times = self.nrsur._sur_dimless.tds\n omega_sur = np.gradient(orbphase_sur, dyn_times)\n\n # Get surrogate orbital frequency at t_sur_switch, which is\n # close to the start of the surrogate data\n omega_init_sur = omega_sur[np.argmin(np.abs( \\\n dyn_times - t_sur_switch))]\n\n # Get PN coorbital frame spins and frame dynamics at omega_init_sur\n idx = np.argmin(np.abs(omega_PN - omega_init_sur))\n chiA_PN_at_idx_coorb, chiB_PN_at_idx_coorb, quat_PN_copr_at_idx, \\\n phi_PN_at_idx, omega_PN_at_idx \\\n = self._get_coorbital_frame_spins_at_idx(chiA_PN, chiB_PN, \\\n omega_PN, lNhat_PN, phi_PN, idx)\n\n return chiA_PN_at_idx_coorb, chiB_PN_at_idx_coorb, \\\n quat_PN_copr_at_idx, phi_PN_at_idx, omega_PN_at_idx, \\\n chiA_PN, chiB_PN, omega_PN\n\n\n #-------------------------------------------------------------------------\n def _evolve_spins(self, q, chiA0, chiB0, omega0, \\\n return_spin_evolution=False, **kwargs):\n \"\"\" Evolves spins of the component BHs from an initial orbital\n frequency = omega0 until t=-100 M from the peak of the waveform. If\n omega0 < omega_switch_IG, use PN to evolve the spins until\n t=t_sur_switch. Then evolves further with the NRSur7dq4 waveform model\n until t=-100M from the peak.\n\n Returns spins in the coorbital frame at t=-100M, as well as the\n coprecessing frame quaternion and orbital phase in the coprecessing\n frame at this time.\n\n If return_spin_evolution is given, also returns the PN and surrogate\n spin times series.\n \"\"\"\n\n PN_approximant = kwargs.pop('PN_approximant', 'SpinTaylorT4')\n PN_dt = kwargs.pop('PN_dt', 0.1)\n PN_spin_order = kwargs.pop('PN_spin_order', 7)\n PN_phase_order = kwargs.pop('PN_phase_order', 7)\n # Initial guess for surrogate omega0, this should be large enough for\n # all q=6 cases\n omega_switch_IG = kwargs.pop('omega_switch_IG', 0.03)\n # The surrogate begins at -4300, use -4000 to be safe\n t_sur_switch = kwargs.pop('t_sur_switch', -4000)\n self._check_unused_kwargs(kwargs)\n\n # Load NRSur7dq4 if not previously loaded\n if self.nrsur is None:\n self._load_NRSur7dq4()\n\n # If omega0 is below the NRSur7dq4 initial guess frequency, we use PN\n # to evolve the spins. We get the initial spins and omega_init_sur such\n # that should go into the surrogate such that the inital time is\n # t_sur_switch.\n if omega0 < omega_switch_IG:\n chiA0_nrsur_coorb, chiB0_nrsur_coorb, quat0_nrsur_copr, \\\n phi0_nrsur, omega_init_sur, chiA_PN, chiB_PN, omega_PN \\\n = self._get_PN_spins_at_surrogate_start(PN_approximant, q, \\\n omega0, chiA0, chiB0, PN_dt, PN_spin_order, PN_phase_order, \\\n omega_switch_IG, t_sur_switch)\n\n # If omega0 >= omega_switch_IG, we evolve spins directly with NRSur7dq4\n # waveform model. We set the coprecessing frame quaternion to identity\n # and orbital phase to 0 at omega=omega0, hence the coprecessing frame\n # is the same as the inertial frame here.\n else:\n # Note that here we set omega_init_sur to omega0\n chiA0_nrsur_coorb, chiB0_nrsur_coorb, quat0_nrsur_copr, \\\n phi0_nrsur, omega_init_sur, chiA_PN, chiB_PN, omega_PN \\\n = chiA0, chiB0, [1,0,0,0], 0, omega0, None, None, None\n\n # Now evaluate the surrogate dynamics using PN spins at omega_init_sur\n quat_sur, orbphase_sur, chiA_copr_sur, chiB_copr_sur \\\n = self.nrsur._sur_dimless.get_dynamics(q, chiA0_nrsur_coorb, \\\n chiB0_nrsur_coorb, init_quat=quat0_nrsur_copr,\n init_orbphase=phi0_nrsur, omega_ref=omega_init_sur)\n\n # get data at time node where remnant fits are done\n dyn_times = self.nrsur._sur_dimless.tds\n nodeIdx = np.argmin(np.abs(dyn_times - self.fitnode_time))\n quat_fitnode = quat_sur.T[nodeIdx]\n orbphase_fitnode = orbphase_sur[nodeIdx]\n\n # get coorbital frame spins at the time node\n chiA_coorb_fitnode = utils.rotate_in_plane(chiA_copr_sur[nodeIdx],\n orbphase_fitnode)\n chiB_coorb_fitnode = utils.rotate_in_plane(chiB_copr_sur[nodeIdx],\n orbphase_fitnode)\n\n if return_spin_evolution:\n # Transform spins to the reference inertial frame\n chiA_inertial_sur = utils.transformTimeDependentVector(quat_sur, \\\n chiA_copr_sur.T).T\n chiB_inertial_sur = utils.transformTimeDependentVector(quat_sur, \\\n chiB_copr_sur.T).T\n spin_evolution = {\n 't_sur': dyn_times,\n 'chiA_sur': chiA_inertial_sur,\n 'chiB_sur': chiB_inertial_sur,\n 'orbphase_sur': orbphase_sur,\n 'quat_sur': quat_sur,\n 'omega_PN': omega_PN,\n 'chiA_PN': chiA_PN,\n 'chiB_PN': chiB_PN,\n 'omega_init_sur': omega_init_sur,\n }\n else:\n spin_evolution = None\n\n return chiA_coorb_fitnode, chiB_coorb_fitnode, quat_fitnode, \\\n orbphase_fitnode, spin_evolution\n\n #-------------------------------------------------------------------------\n def _eval_wrapper(self, fit_key, q, chiA, chiB, **kwargs):\n \"\"\"Evaluates the NRSur7dq4Remnant model.\n \"\"\"\n chiA = np.array(chiA)\n chiB = np.array(chiB)\n\n # Warn/Exit if extrapolating\n allow_extrap = kwargs.pop('allow_extrap', False)\n omega0 = kwargs.pop('omega0', None)\n self._check_param_limits(q, chiA, chiB, allow_extrap)\n\n if omega0 is None:\n # If omega0 is given, assume chiA, chiB are the coorbital frame\n # spins at t=-100 M.\n x = np.concatenate(([q], chiA, chiB))\n else:\n # If omega0 is given, evolve the spins from omega0\n # to t = -100 M from the peak.\n chiA_coorb_fitnode, chiB_coorb_fitnode, quat_fitnode, \\\n orbphase_fitnode, _ \\\n = self._evolve_spins(q, chiA, chiB, omega0, **kwargs)\n\n # x should contain coorbital frame spins at t=-100M\n x = np.concatenate(([q], chiA_coorb_fitnode, chiB_coorb_fitnode))\n\n if fit_key == 'memory' or fit_key == 'all':\n memory, memory_err = self._evaluate_fits(x, 'memory')\n if fit_key == 'memory':\n return memory, memory_err\n\n\n if fit_key == 'all':\n return memory, memory_err" ]
[ [ "numpy.log", "numpy.abs", "numpy.gradient", "numpy.concatenate", "numpy.array" ] ]
benyoussefrihab/ProjectKeras-1
[ "9542e5adeb509b78123d684af8cf320395ee3bf9" ]
[ "feature_extractor.py" ]
[ "from tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications.vgg16 import VGG16, preprocess_input\nfrom tensorflow.keras.models import Model\nimport numpy as np\n\n# See https://keras.io/api/applications/ for details\n\nclass FeatureExtractor:\n def __init__(self):\n base_model = VGG16(weights='imagenet')\n self.model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc1').output)\n\n def extract(self, img):\n \"\"\"\n Extract a deep feature from an input image\n Args:\n img: from PIL.Image.open(path) or tensorflow.keras.preprocessing.image.load_img(path)\n\n Returns:\n feature (np.ndarray): deep feature with the shape=(4096, )\n \"\"\"\n img = img.resize((224, 224)) # VGG must take a 224x224 img as an input\n img = img.convert('RGB') # Make sure img is color\n x = image.img_to_array(img) # To np.array. Height x Width x Channel. dtype=float32\n x = np.expand_dims(x, axis=0) # (H, W, C)->(1, H, W, C), where the first elem is the number of img\n x = preprocess_input(x) # Subtracting avg values for each pixel\n feature = self.model.predict(x)[0] # (1, 4096) -> (4096, )\n return feature / np.linalg.norm(feature) # Normalize\n\n" ]
[ [ "numpy.expand_dims", "tensorflow.keras.applications.vgg16.VGG16", "numpy.linalg.norm", "tensorflow.keras.applications.vgg16.preprocess_input", "tensorflow.keras.preprocessing.image.img_to_array" ] ]
GenieTim/mpltools
[ "9d7f5c5a704357f34a72802712b0261566b8dbc1" ]
[ "mpltools/special/hinton.py" ]
[ "from __future__ import division\nfrom future.builtins import zip\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import collections\nfrom matplotlib import transforms\nfrom matplotlib import ticker\n\n__all__ = ['hinton']\n\n\n# TODO: Add yutils.mpl._coll to mpltools and use that for square collection.\nclass SquareCollection(collections.RegularPolyCollection):\n \"\"\"Return a collection of squares.\"\"\"\n\n def __init__(self, **kwargs):\n super(SquareCollection, self).__init__(4, rotation=np.pi/4., **kwargs)\n\n def get_transform(self):\n \"\"\"Return transform scaling circle areas to data space.\"\"\"\n ax = self.axes\n pts2pixels = 72.0 / ax.figure.dpi\n scale_x = pts2pixels * ax.bbox.width / ax.viewLim.width\n scale_y = pts2pixels * ax.bbox.height / ax.viewLim.height\n return transforms.Affine2D().scale(scale_x, scale_y)\n\n\ndef hinton(inarray, max_value=None, use_default_ticks=True):\n \"\"\"Plot Hinton diagram for visualizing the values of a 2D array.\n\n Plot representation of an array with positive and negative values\n represented by white and black squares, respectively. The size of each\n square represents the magnitude of each value.\n\n Unlike the hinton demo in the matplotlib gallery [1]_, this implementation\n uses a RegularPolyCollection to draw squares, which is much more efficient\n than drawing individual Rectangles.\n\n .. note::\n This function inverts the y-axis to match the origin for arrays.\n\n .. [1] http://matplotlib.sourceforge.net/examples/api/hinton_demo.html\n\n Parameters\n ----------\n inarray : array\n Array to plot.\n max_value : float\n Any *absolute* value larger than `max_value` will be represented by a\n unit square.\n use_default_ticks: boolean\n Disable tick-generation and generate them outside this function.\n \"\"\"\n\n ax = plt.gca()\n ax.set_axis_bgcolor('gray')\n # make sure we're working with a numpy array, not a numpy matrix\n inarray = np.asarray(inarray)\n height, width = inarray.shape\n if max_value is None:\n max_value = 2**np.ceil(np.log(np.max(np.abs(inarray)))/np.log(2))\n values = np.clip(inarray/max_value, -1, 1)\n rows, cols = np.mgrid[:height, :width]\n\n pos = np.where(values > 0)\n neg = np.where(values < 0)\n for idx, color in zip([pos, neg], ['white', 'black']):\n if len(idx[0]) > 0:\n xy = list(zip(cols[idx], rows[idx]))\n circle_areas = np.pi / 2 * np.abs(values[idx])\n squares = SquareCollection(sizes=circle_areas,\n offsets=xy, transOffset=ax.transData,\n facecolor=color, edgecolor=color)\n ax.add_collection(squares, autolim=True)\n\n ax.axis('scaled')\n # set data limits instead of using xlim, ylim.\n ax.set_xlim(-0.5, width-0.5)\n ax.set_ylim(height-0.5, -0.5)\n\n if use_default_ticks:\n ax.xaxis.set_major_locator(IndexLocator())\n ax.yaxis.set_major_locator(IndexLocator())\n\n\nclass IndexLocator(ticker.Locator):\n\n def __init__(self, max_ticks=10):\n self.max_ticks = max_ticks\n\n def __call__(self):\n \"\"\"Return the locations of the ticks.\"\"\"\n dmin, dmax = self.axis.get_data_interval()\n if dmax < self.max_ticks:\n step = 1\n else:\n step = np.ceil(dmax / self.max_ticks)\n return self.raise_if_exceeds(np.arange(0, dmax, step))\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.log", "numpy.abs", "numpy.clip", "numpy.asarray", "numpy.arange", "matplotlib.transforms.Affine2D", "numpy.ceil", "numpy.where" ] ]
ivy-dl/vision
[ "ab7f188bee028af5880b4b5aa76761ea61e7ea4f" ]
[ "ivy_vision_tests/test_voxel_grids.py" ]
[ "# global\nimport pytest\nimport ivy.numpy\nimport numpy as np\nimport ivy_tests.helpers as helpers\n\n# local\nimport ivy_vision.voxel_grids as ivy_vg\nfrom ivy_vision_tests.data import TestData\n\n\nclass VoxelGridsTestData(TestData):\n\n def __init__(self):\n super().__init__()\n\n # un-batched\n self.simple_world_coords = np.array([[[0., 1.5, 3., 1.],\n [1.9, 0., 0.9, 1.]],\n [[0.7, 3., 0., 1.],\n [3., 0.8, 1.9, 1.]]])\n self.simple_world_coords_flat = np.reshape(self.simple_world_coords, (4, 4))\n self.simple_world_features_flat = np.ones((4, 1))\n self.simple_voxel_grid_dims = (3, 3, 3)\n self.simple_voxel_grid =\\\n np.array([[[0, 0, 0], [0, 0, 1], [1, 0, 0]],\n [[1, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 1, 0], [0, 0, 0], [0, 0, 0]]])\n self.simple_voxel_grid_m1_4_bounded =\\\n np.array([[[0, 0, 0], [0, 0, 1], [0, 0, 0]],\n [[0, 1, 0], [0, 0, 0], [1, 0, 0]],\n [[0, 0, 0], [0, 1, 0], [0, 0, 0]]])\n\n self.simple_voxel_grid_0_4_bounded = np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 1, 0], [0, 0, 0], [0, 0, 0]]])\n\n # batched\n self.simple_world_coords_batched =\\\n np.array([[[[0.5, 1.5, 2.6, 1.],\n [1.9, 0.7, 0.9, 1.]],\n [[0.7, 2.9, 0.7, 1.],\n [2.4, 0.8, 1.9, 1.]]],\n\n [[[1., 1., 1., 1],\n [1., 1., 2., 1]],\n [[1., 2., 1., 1],\n [1., 2., 2., 1]]]])\n self.simple_world_coords_batched_flat = np.reshape(self.simple_world_coords_batched, (2, 4, 4))\n\n self.simple_voxel_grid_batched = np.array([[[[0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[1, 0, 1], [0, 0, 0], [0, 0, 0]]],\n\n [[[1, 0, 1], [0, 0, 0], [1, 0, 1]],\n [[0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 0, 0], [0, 0, 0]]]])\n\n # world coords\n self.world_coords_flat = np.reshape(self.world_coords, (1, 2, 480*640, 4))\n\n\ntd = VoxelGridsTestData()\n\n\ndef test_world_coords_to_bounding_voxel_grid(dev_str, call):\n if call in [helpers.tf_graph_call]:\n # the need to dynamically infer array shapes for scatter makes this only valid in eager mode currently\n pytest.skip()\n assert np.allclose(np.sum(\n call(ivy_vg.coords_to_voxel_grid, td.simple_world_coords_flat, np.array([3, 3, 3]))[0], -1) > 0,\n td.simple_voxel_grid, atol=1e-6)\n assert np.allclose(np.sum(\n call(ivy_vg.coords_to_voxel_grid, td.simple_world_coords_flat, (1, 1, 1), 'RES')[0], -1) > 0,\n td.simple_voxel_grid, atol=1e-6)\n\n # with coord bounds\n assert np.allclose(np.sum(\n call(ivy_vg.coords_to_voxel_grid, td.simple_world_coords_flat, (3, 3, 3),\n coord_bounds=[-1]*3 + [4]*3)[0], -1) > 0, td.simple_voxel_grid_m1_4_bounded, atol=1e-6)\n assert np.allclose(np.sum(\n call(ivy_vg.coords_to_voxel_grid, td.simple_world_coords_flat, (3, 3, 3),\n coord_bounds=[0]*3 + [4]*3)[0], -1) > 0, td.simple_voxel_grid_0_4_bounded, atol=1e-6)\n assert np.allclose(np.sum(\n call(ivy_vg.coords_to_voxel_grid, td.simple_world_coords_batched_flat, (3, 3, 3),\n coord_bounds=[0.5]*3 + [2.5]*3)[0], -1) > 0, td.simple_voxel_grid_batched, atol=1e-6)\n\n # with features\n assert np.allclose(\n call(ivy_vg.coords_to_voxel_grid, td.simple_world_coords_flat, (3, 3, 3),\n features=td.simple_world_features_flat)[0][..., 3], td.simple_voxel_grid, atol=1e-6)\n\n # with multi-dimensions\n if call in [helpers.mx_call]:\n # MXNet cannot slice arrays with more than 6 dimensions\n return\n with ivy.numpy.use:\n target = np.sum(ivy_vg.coords_to_voxel_grid(td.world_coords_flat, (32, 32, 32), 'DIMS')[0], -1) > 0\n assert np.allclose(np.sum(\n call(ivy_vg.coords_to_voxel_grid, td.world_coords_flat, (32, 32, 32), 'DIMS')[0], -1) > 0, target, atol=1e-6)\n with ivy.numpy.use:\n target = np.sum(ivy_vg.coords_to_voxel_grid(td.world_coords_flat, (0.1, 0.1, 0.1), 'RES')[0], -1) > 0\n assert np.allclose(np.sum(\n call(ivy_vg.coords_to_voxel_grid, td.world_coords_flat, (0.1, 0.1, 0.1), 'RES')[0], -1) > 0, target, atol=1e-6)\n" ]
[ [ "numpy.reshape", "numpy.array", "numpy.ones" ] ]
DanielScottEaton/TrenchRipper
[ "e8bcadb3130b8215eeb9f32ec38ede1c031af47d" ]
[ "trenchripper/tplot.py" ]
[ "# fmt: off\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef plot_kymograph(kymograph):\n \"\"\"Helper function for plotting kymographs. Takes a kymograph array of\n shape (y_dim,x_dim,t_dim).\n\n Args:\n kymograph (array): kymograph array of shape (y_dim,x_dim,t_dim).\n \"\"\"\n list_in_t = [kymograph[t,:,:] for t in range(kymograph.shape[0])]\n img_arr = np.concatenate(list_in_t,axis=1)\n plt.imshow(img_arr)\n" ]
[ [ "numpy.concatenate", "matplotlib.pyplot.imshow" ] ]
hf136/models
[ "5330c8763bf6b83907fac01ddc1d640e6e480c81" ]
[ "LogisticRegression/utils.py" ]
[ "# coding=utf-8\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n\n__author__ = 'wuyueqiu'\n\n\ndef gen_data():\n # 生成数据\n X1 = [[1 + random.random(), 1 + random.random()] for i in range(50)]\n X2 = [[2 + random.random(), 2 + random.random()] for i in range(50)]\n y1 = [0 for i in range(50)]\n y2 = [1 for i in range(50)]\n X = np.array(X1 + X2)\n y = np.array(y1 + y2)\n return X, y\n\n\nif __name__ == '__main__':\n X, y = gen_data()\n print(X)\n print(y)\n plt.plot(X[:50, 0], X[:50, 1], 'bo')\n plt.plot(X[50:, 0], X[50:, 1], 'rx')\n plt.show()\n" ]
[ [ "matplotlib.pyplot.plot", "numpy.array", "matplotlib.pyplot.show" ] ]
rky0930/yolo_v2
[ "efe152edac12ee2d6b5347c73c2fbd9dea2b578c" ]
[ "research/object_detection/models/yolo_v2_darknet_19_feature_extractor_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.models.yolo_v2_darknet_19_feature_extractor.\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\nfrom object_detection.models import ssd_feature_extractor_test\nfrom object_detection.models import yolo_v2_darknet_19_feature_extractor\n\n\nclass YOLOv2Darknet19FeatureExtractorTest(\n ssd_feature_extractor_test.SsdFeatureExtractorTestBase, tf.test.TestCase):\n\n def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,\n is_training=True, batch_norm_trainable=True):\n \"\"\"Constructs a YoloDarknet19FeatureExtractor.\n\n Args:\n depth_multiplier: float depth multiplier for feature extractor\n pad_to_multiple: the nearest multiple to zero pad the input height and\n width dimensions to.\n is_training: whether the network is in training mode.\n batch_norm_trainable: Whether to update batch norm parameters during\n training or not\n Returns:\n an yolo_v2_darknet_19_feature_extractor.YoloDarknet19FeatureExtractor.\n \"\"\"\n min_depth = 32\n conv_hyperparams = {}\n return yolo_v2_darknet_19_feature_extractor.YOLOv2Darknet19FeatureExtractor(\n is_training, depth_multiplier, min_depth, pad_to_multiple,\n conv_hyperparams, batch_norm_trainable)\n\n def test_extract_features_returns_correct_shapes_416(self):\n image_height = 416\n image_width = 416\n depth_multiplier = 1.0\n pad_to_multiple = 1\n expected_feature_map_shape = [(4, 13, 13, 1024)]\n self.check_extract_features_returns_correct_shape(\n image_height, image_width, depth_multiplier, pad_to_multiple,\n expected_feature_map_shape)\n\n def test_extract_features_returns_correct_shapes_299(self):\n image_height = 299\n image_width = 299\n depth_multiplier = 1.0\n pad_to_multiple = 1\n expected_feature_map_shape = [(4, 9, 9, 1024)]\n self.check_extract_features_returns_correct_shape(\n image_height, image_width, depth_multiplier, pad_to_multiple,\n expected_feature_map_shape)\n\n def test_extract_features_raises_error_with_invalid_image_size(self):\n image_height = 32\n image_width = 32\n depth_multiplier = 1.0\n pad_to_multiple = 1\n self.check_extract_features_raises_error_with_invalid_image_size(\n image_height, image_width, depth_multiplier, pad_to_multiple)\n\n def test_preprocess_returns_correct_value_range(self):\n image_height = 128\n image_width = 128\n depth_multiplier = 1\n pad_to_multiple = 1\n test_image = np.random.rand(4, image_height, image_width, 3)\n feature_extractor = self._create_feature_extractor(depth_multiplier,\n pad_to_multiple)\n preprocessed_image = feature_extractor.preprocess(test_image)\n self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))\n\n def test_variables_only_created_in_scope(self):\n depth_multiplier = 1\n pad_to_multiple = 1\n scope_name = 'darknet_19'\n self.check_feature_extractor_variables_under_scope(\n depth_multiplier, pad_to_multiple, scope_name)\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "numpy.random.rand", "numpy.abs", "tensorflow.test.main" ] ]
tongni1975/TensorFlow-Machine-Learning-Cookbook-Second-Edition
[ "4f57ea4ad79c8111fb29bad3da5d151858c6a050" ]
[ "Chapter01/01_Introduction/07_Working_with_Data_Sources/07_data_gathering.py" ]
[ "# Data gathering\n#----------------------------------\n#\n# This function gives us the ways to access\n# the various data sets we will need\n\n# Data Gathering\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nops.reset_default_graph()\n\n\n# Iris Data\nfrom sklearn import datasets\n\niris = datasets.load_iris()\nprint(len(iris.data))\nprint(len(iris.target))\nprint(iris.data[0])\nprint(set(iris.target))\n\n# Low Birthrate Data\nimport requests\n\nbirthdata_url = 'https://github.com/nfmcclure/tensorflow_cookbook/raw/master/01_Introduction/07_Working_with_Data_Sources/birthweight_data/birthweight.dat'\nbirth_file = requests.get(birthdata_url)\nbirth_data = birth_file.text.split('\\r\\n')\nbirth_header = birth_data[0].split('\\t')\nbirth_data = [[float(x) for x in y.split('\\t') if len(x)>=1] for y in birth_data[1:] if len(y)>=1]\nprint(len(birth_data))\nprint(len(birth_data[0]))\n\n\n# Housing Price Data\nimport requests\n\nhousing_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data'\nhousing_header = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']\nhousing_file = requests.get(housing_url)\nhousing_data = [[float(x) for x in y.split(' ') if len(x)>=1] for y in housing_file.text.split('\\n') if len(y)>=1]\nprint(len(housing_data))\nprint(len(housing_data[0]))\n\n\n# MNIST Handwriting Data\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\nprint(len(mnist.train.images))\nprint(len(mnist.test.images))\nprint(len(mnist.validation.images))\nprint(mnist.train.labels[1,:])\n\n# CIFAR-10 Image Category Dataset\n# The CIFAR-10 data ( https://www.cs.toronto.edu/~kriz/cifar.html ) contains 60,000 32x32 color images of 10 classes.\n# It was collected by Alex Krizhevsky, Vinod Nair, and Geoffrey Hinton.\n# Alex Krizhevsky maintains the page referenced here.\n# This is such a common dataset, that there are built in functions in TensorFlow to access this data.\n\n# Running this command requires an internet connection and a few minutes to download all the images.\n(X_train, y_train), (X_test, y_test) = tf.contrib.keras.datasets.cifar10.load_data()\n\nprint(X_train.shape)\nprint(y_train.shape)\nprint(y_train[0,]) # this is a frog\n\n# Plot the 0-th image (a frog)\nfrom PIL import Image\nimg = Image.fromarray(X_train[0,:,:,:])\nplt.imshow(img)\n\n\n# Ham/Spam Text Data\nimport requests\nimport io\nfrom zipfile import ZipFile\n\n# Get/read zip file\nzip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'\nr = requests.get(zip_url)\nz = ZipFile(io.BytesIO(r.content))\nfile = z.read('SMSSpamCollection')\n# Format Data\ntext_data = file.decode()\ntext_data = text_data.encode('ascii',errors='ignore')\ntext_data = text_data.decode().split('\\n')\ntext_data = [x.split('\\t') for x in text_data if len(x)>=1]\n[text_data_target, text_data_train] = [list(x) for x in zip(*text_data)]\nprint(len(text_data_train))\nprint(set(text_data_target))\nprint(text_data_train[1])\n\n\n# Movie Review Data\nimport requests\nimport io\nimport tarfile\n\nmovie_data_url = 'http://www.cs.cornell.edu/people/pabo/movie-review-data/rt-polaritydata.tar.gz'\nr = requests.get(movie_data_url)\n# Stream data into temp object\nstream_data = io.BytesIO(r.content)\ntmp = io.BytesIO()\nwhile True:\n s = stream_data.read(16384)\n if not s: \n break\n tmp.write(s)\nstream_data.close()\ntmp.seek(0)\n# Extract tar file\ntar_file = tarfile.open(fileobj=tmp, mode=\"r:gz\")\npos = tar_file.extractfile('rt-polaritydata/rt-polarity.pos')\nneg = tar_file.extractfile('rt-polaritydata/rt-polarity.neg')\n# Save pos/neg reviews\npos_data = []\nfor line in pos:\n pos_data.append(line.decode('ISO-8859-1').encode('ascii',errors='ignore').decode())\nneg_data = []\nfor line in neg:\n neg_data.append(line.decode('ISO-8859-1').encode('ascii',errors='ignore').decode())\ntar_file.close()\n\nprint(len(pos_data))\nprint(len(neg_data))\nprint(neg_data[0])\n\n\n# The Works of Shakespeare Data\nimport requests\n\nshakespeare_url = 'http://www.gutenberg.org/cache/epub/100/pg100.txt'\n# Get Shakespeare text\nresponse = requests.get(shakespeare_url)\nshakespeare_file = response.content\n# Decode binary into string\nshakespeare_text = shakespeare_file.decode('utf-8')\n# Drop first few descriptive paragraphs.\nshakespeare_text = shakespeare_text[7675:]\nprint(len(shakespeare_text))\n\n\n# English-German Sentence Translation Data\nimport requests\nimport io\nfrom zipfile import ZipFile\nsentence_url = 'http://www.manythings.org/anki/deu-eng.zip'\nr = requests.get(sentence_url)\nz = ZipFile(io.BytesIO(r.content))\nfile = z.read('deu.txt')\n# Format Data\neng_ger_data = file.decode()\neng_ger_data = eng_ger_data.encode('ascii',errors='ignore')\neng_ger_data = eng_ger_data.decode().split('\\n')\neng_ger_data = [x.split('\\t') for x in eng_ger_data if len(x)>=1]\n[english_sentence, german_sentence] = [list(x) for x in zip(*eng_ger_data)]\nprint(len(english_sentence))\nprint(len(german_sentence))\nprint(eng_ger_data[10])\n" ]
[ [ "matplotlib.pyplot.imshow", "sklearn.datasets.load_iris", "tensorflow.contrib.keras.datasets.cifar10.load_data", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.python.framework.ops.reset_default_graph" ] ]
libingzheren/UnarySim
[ "c02461454618e9ce0c86ce695fad9e95d1ca5e00" ]
[ "sw/kernel/relu.py" ]
[ "import torch\nfrom UnarySim.sw.kernel.shiftreg import ShiftReg\n\nclass UnaryReLU(torch.nn.Module):\n \"\"\"\n unary ReLU activation based on comparing with bipolar 0\n data is always in bipolar representation\n the input bit streams are categorized into rate-coded and temporal-coded\n \"\"\"\n def __init__(self, \n depth=8, \n bitwidth=8, \n encode=\"RC\", \n shiftreg=False, \n btype=torch.float, \n stype=torch.float):\n super(UnaryReLU, self).__init__()\n self.depth = depth\n self.encode = encode\n self.sr = shiftreg\n self.stype = stype\n self.btype = btype\n if shiftreg is True:\n assert depth <= 127, \"When using shift register implementation, buffer depth should be less than 127.\"\n self.shiftreg = ShiftReg(depth, self.stype)\n self.depth_half = torch.nn.Parameter(torch.zeros(1).fill_(depth/2).type(btype), requires_grad=False)\n self.sr_cnt = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)\n self.init = True\n if encode is \"RC\":\n self.buf_max = torch.nn.Parameter(torch.zeros(1).fill_(2**depth - 1).type(btype), requires_grad=False)\n self.buf_half = torch.nn.Parameter(torch.zeros(1).fill_(2**(depth - 1)).type(btype), requires_grad=False)\n self.acc = torch.nn.Parameter(torch.zeros(1).fill_(2**(depth - 1)).type(btype), requires_grad=False)\n elif encode is \"TC\":\n self.threshold = torch.nn.Parameter(torch.zeros(1).fill_(2**(bitwidth - 1)).type(btype), requires_grad=False)\n self.acc = torch.nn.Parameter(torch.zeros(1).type(btype), requires_grad=False)\n self.cycle = torch.nn.Parameter(torch.zeros(1).type(btype), requires_grad=False)\n else:\n raise ValueError(\"UnaryReLU encode other than \\\"RC\\\", \\\"TC\\\" is illegal.\")\n \n def UnaryReLU_forward_rc(self, input):\n # check whether acc is larger than or equal to half.\n half_prob_flag = torch.ge(self.acc, self.buf_half).type(torch.int8)\n # only when input is 0 and flag is 1, output 0; otherwise 1\n output = input.type(torch.int8) | (1 - half_prob_flag)\n # update the accumulator based on output, thus acc update is after output generation\n self.acc.data = self.acc.add(output.mul(2).sub(1).type(self.btype)).clamp(0, self.buf_max.item())\n return output.type(self.stype)\n \n def UnaryReLU_forward_rc_sr(self, input):\n # check whether sr sum is larger than or equal to half.\n if self.init is True:\n output = torch.ones_like(input).type(self.stype)\n self.init = False\n else:\n output = (torch.lt(self.sr_cnt, self.depth_half).type(torch.int8) | input.type(torch.int8)).type(self.stype)\n # update shiftreg based on output, thus shiftreg update is after output generation\n _, self.sr_cnt.data = self.shiftreg(output)\n return output.type(self.stype)\n \n def UnaryReLU_forward_tc(self, input):\n # check reach half total cycle\n self.cycle.add_(1)\n half_cycle_flag = torch.gt(self.cycle, self.threshold).type(self.btype)\n # check whether acc is larger than or equal to threshold, when half cycle is reached\n self.acc.data = self.acc.add(input.type(self.btype))\n half_prob_flag = torch.gt(self.acc, self.threshold).type(self.btype)\n # if 1\n output = (1 - half_cycle_flag) * torch.ge(self.cycle, self.acc).type(self.btype) + half_cycle_flag * half_prob_flag * input.type(self.btype)\n return output.type(self.stype)\n\n def forward(self, input):\n if self.encode is \"RC\":\n if self.sr is False:\n return self.UnaryReLU_forward_rc(input)\n else:\n return self.UnaryReLU_forward_rc_sr(input)\n elif self.encode is \"TC\":\n return self.UnaryReLU_forward_tc(input)\n\n" ]
[ [ "torch.ge", "torch.zeros", "torch.lt", "torch.gt", "torch.ones_like" ] ]
RowitZou/topic_dialog_summ
[ "0de31d97b07be4004e08f9755ee66bea47aa7b10", "0de31d97b07be4004e08f9755ee66bea47aa7b10" ]
[ "src/train_abstractive.py", "src/models/decoder_rnn.py" ]
[ "#!/usr/bin/env python\n\"\"\"\n Main training workflow\n\"\"\"\nfrom __future__ import division\n\nimport argparse\nimport glob\nimport os\nimport random\nimport signal\nimport time\nimport torch\nimport distributed\n\nfrom pytorch_transformers import BertTokenizer\nfrom models import data_loader\nfrom models.data_loader import load_dataset\nfrom models.optimizers import build_optim, build_optim_bert, build_optim_other, build_optim_topic\nfrom models.rl_model import Model as Summarizer\nfrom models.rl_predictor import build_predictor\nfrom models.rl_model_trainer import build_trainer\nfrom others.logging import logger, init_logger\nfrom others.utils import rouge_results_to_str, test_bleu, test_length\nfrom rouge import Rouge, FilesRouge\n\nmodel_flags = ['hidden_size', 'ff_size', 'heads', 'emb_size', 'enc_layers', 'enc_hidden_size', 'enc_ff_size',\n 'dec_layers', 'dec_hidden_size', 'dec_ff_size', 'encoder', 'ff_actv', 'use_interval']\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef train_multi(args):\n \"\"\" Spawns 1 process per GPU \"\"\"\n init_logger()\n\n nb_gpu = args.world_size\n mp = torch.multiprocessing.get_context('spawn')\n\n # Create a thread to listen for errors in the child processes.\n error_queue = mp.SimpleQueue()\n error_handler = ErrorHandler(error_queue)\n\n # Train with multiprocessing.\n procs = []\n for i in range(nb_gpu):\n device_id = i\n procs.append(mp.Process(target=run, args=(args,\n device_id, error_queue,), daemon=True))\n procs[i].start()\n logger.info(\" Starting process pid: %d \" % procs[i].pid)\n error_handler.add_child(procs[i].pid)\n for p in procs:\n p.join()\n\n\ndef run(args, device_id, error_queue):\n \"\"\" run process \"\"\"\n\n setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])\n\n try:\n gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)\n print('gpu_rank %d' % gpu_rank)\n if gpu_rank != args.gpu_ranks[device_id]:\n raise AssertionError(\"An error occurred in \\\n Distributed initialization\")\n\n train_single(args, device_id)\n except KeyboardInterrupt:\n pass # killed by parent, do nothing\n except Exception:\n # propagate exception to parent process, keeping original traceback\n import traceback\n error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(\n target=self.error_listener, daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n (rank, original_trace) = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT) # kill children processes\n (rank, original_trace) = self.error_queue.get()\n msg = \"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"\n msg += original_trace\n raise Exception(msg)\n\n\ndef baseline(args, cal_lead=False, cal_oracle=False):\n test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),\n args.test_batch_size, args.test_batch_ex_size, 'cpu',\n shuffle=False, is_test=True)\n\n if cal_lead:\n mode = \"lead\"\n else:\n mode = \"oracle\"\n\n rouge = Rouge()\n pred_path = '%s.%s.pred' % (args.result_path, mode)\n gold_path = '%s.%s.gold' % (args.result_path, mode)\n save_pred = open(pred_path, 'w', encoding='utf-8')\n save_gold = open(gold_path, 'w', encoding='utf-8')\n\n with torch.no_grad():\n count = 0\n for batch in test_iter:\n summaries = batch.tgt_txt\n origin_sents = batch.original_str\n ex_segs = batch.ex_segs\n ex_segs = [sum(ex_segs[:i]) for i in range(len(ex_segs)+1)]\n\n for idx in range(len(summaries)):\n summary = \" \".join(summaries[idx][1:-1]).replace(\"\\n\", \"\")\n txt = origin_sents[ex_segs[idx]:ex_segs[idx+1]]\n if cal_oracle:\n selected = []\n max_rouge = 0.\n while True:\n cur_max_rouge = max_rouge\n cur_id = -1\n for i in range(len(txt)):\n if (i in selected):\n continue\n c = selected + [i]\n temp_txt = \" \".join([txt[j][9:] for j in c])\n if len(temp_txt.split()) > args.ex_max_token_num:\n continue\n rouge_score = rouge.get_scores(temp_txt, summary)\n rouge_1 = rouge_score[0][\"rouge-1\"][\"f\"]\n rouge_l = rouge_score[0][\"rouge-l\"][\"f\"]\n rouge_score = rouge_1 + rouge_l\n if rouge_score > cur_max_rouge:\n cur_max_rouge = rouge_score\n cur_id = i\n if (cur_id == -1):\n break\n selected.append(cur_id)\n max_rouge = cur_max_rouge\n pred_txt = \" \".join([txt[j][9:] for j in selected])\n else:\n k = min(args.ex_max_k, len(txt))\n pred_txt = \" \".join(txt[:k]).replace(\"\\n\", \"\")\n save_gold.write(summary + \"\\n\")\n save_pred.write(pred_txt + \"\\n\")\n count += 1\n if count % 10 == 0:\n print(count)\n save_gold.flush()\n save_pred.flush()\n save_gold.close()\n save_pred.close()\n\n length = test_length(pred_path, gold_path)\n bleu = test_bleu(pred_path, gold_path)\n file_rouge = FilesRouge(hyp_path=pred_path, ref_path=gold_path)\n pred_rouges = file_rouge.get_scores(avg=True)\n logger.info('Length ratio:\\n%s' % str(length))\n logger.info('Bleu:\\n%s' % str(bleu*100))\n logger.info('Rouges:\\n%s' % rouge_results_to_str(pred_rouges))\n\n\ndef validate(args, device_id):\n timestep = 0\n if (args.test_all):\n cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n xent_lst = []\n best_dev_steps = -1\n best_dev_results = (0, 0)\n best_test_results = (0, 0)\n patient = 100\n for i, cp in enumerate(cp_files):\n step = int(cp.split('.')[-2].split('_')[-1])\n if (args.test_start_from != -1 and step < args.test_start_from):\n xent_lst.append((1e6, cp))\n continue\n logger.info(\"Step %d: processing %s\" % (i, cp))\n rouge_dev = validate_(args, device_id, cp, step)\n rouge_test = test(args, device_id, cp, step)\n if (rouge_dev[0] + rouge_dev[1]) > (best_dev_results[0] + best_dev_results[1]):\n best_dev_results = rouge_dev\n best_test_results = rouge_test\n best_dev_steps = step\n patient = 100\n else:\n patient -= 1\n logger.info(\"Current step: %d\" % step)\n logger.info(\"Dev results: ROUGE-1-l: %f, %f\" % (rouge_dev[0], rouge_dev[1]))\n logger.info(\"Test results: ROUGE-1-l: %f, %f\" % (rouge_test[0], rouge_test[1]))\n logger.info(\"Best step: %d\" % best_dev_steps)\n logger.info(\"Best dev results: ROUGE-1-l: %f, %f\" % (best_dev_results[0], best_dev_results[1]))\n logger.info(\"Best test results: ROUGE-1-l: %f, %f\\n\\n\" % (best_test_results[0], best_test_results[1]))\n\n if patient == 0:\n break\n\n else:\n best_dev_results = (0, 0)\n best_test_results = (0, 0)\n best_dev_steps = -1\n while (True):\n cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if (cp_files):\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if (not os.path.getsize(cp) > 0):\n time.sleep(60)\n continue\n if (time_of_cp > timestep):\n timestep = time_of_cp\n step = int(cp.split('.')[-2].split('_')[-1])\n rouge_dev = validate_(args, device_id, cp, step)\n rouge_test = test(args, device_id, cp, step)\n if (rouge_dev[0] + rouge_dev[1]) > (best_dev_results[0] + best_dev_results[1]):\n best_dev_results = rouge_dev\n best_test_results = rouge_test\n best_dev_steps = step\n\n logger.info(\"Current step: %d\" % step)\n logger.info(\"Dev results: ROUGE-1-l: %f, %f\" % (rouge_dev[0], rouge_dev[1]))\n logger.info(\"Test results: ROUGE-1-l: %f, %f\" % (rouge_test[0], rouge_test[1]))\n logger.info(\"Best step: %d\" % best_dev_steps)\n logger.info(\"Best dev results: ROUGE-1-l: %f, %f\" % (best_dev_results[0], best_dev_results[1]))\n logger.info(\"Best test results: ROUGE-1-l: %f, %f\\n\\n\" % (best_test_results[0], best_test_results[1]))\n\n cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if (cp_files):\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if (time_of_cp > timestep):\n continue\n else:\n time.sleep(300)\n\n\ndef validate_(args, device_id, pt, step):\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n if (pt != ''):\n test_from = pt\n else:\n test_from = args.test_from\n logger.info('Loading checkpoint from %s' % test_from)\n checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt.keys():\n if (k in model_flags):\n setattr(args, k, opt[k])\n print(args)\n\n tokenizer = BertTokenizer.from_pretrained(args.bert_dir)\n\n model = Summarizer(args, device, tokenizer.vocab, checkpoint)\n model.eval()\n\n valid_iter = data_loader.Dataloader(args, load_dataset(args, 'dev', shuffle=False),\n args.test_batch_size, args.test_batch_ex_size, device,\n shuffle=False, is_test=True)\n\n predictor = build_predictor(args, tokenizer, model, logger)\n rouge = predictor.validate(valid_iter, step)\n return rouge\n\n\ndef test(args, device_id, pt, step):\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n if (pt != ''):\n test_from = pt\n else:\n test_from = args.test_from\n logger.info('Loading checkpoint from %s' % test_from)\n\n checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt.keys():\n if (k in model_flags):\n setattr(args, k, opt[k])\n print(args)\n\n tokenizer = BertTokenizer.from_pretrained(args.bert_dir)\n\n model = Summarizer(args, device, tokenizer.vocab, checkpoint)\n model.eval()\n\n test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),\n args.test_batch_size, args.test_batch_ex_size, device,\n shuffle=False, is_test=True)\n\n predictor = build_predictor(args, tokenizer, model, logger)\n rouge = predictor.validate(test_iter, step)\n return rouge\n\n\ndef test_text(args, device_id, pt, step):\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n if (pt != ''):\n test_from = pt\n else:\n test_from = args.test_from\n logger.info('Loading checkpoint from %s' % test_from)\n\n checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt.keys():\n if (k in model_flags):\n setattr(args, k, opt[k])\n print(args)\n\n tokenizer = BertTokenizer.from_pretrained(args.bert_dir)\n\n model = Summarizer(args, device, tokenizer.vocab, checkpoint)\n model.eval()\n\n test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),\n args.test_batch_size, args.test_batch_ex_size, device,\n shuffle=False, is_test=True)\n predictor = build_predictor(args, tokenizer, model, logger)\n predictor.translate(test_iter, step)\n\n\ndef train(args, device_id):\n if (args.world_size > 1):\n train_multi(args)\n else:\n train_single(args, device_id)\n\n\ndef train_single(args, device_id):\n init_logger(args.log_file)\n logger.info(str(args))\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n logger.info('Device ID %d' % device_id)\n logger.info('Device %s' % device)\n\n if device_id >= 0:\n torch.cuda.set_device(device_id)\n torch.cuda.manual_seed(args.seed)\n\n if args.train_from != '':\n logger.info('Loading checkpoint from %s' % args.train_from)\n checkpoint = torch.load(args.train_from,\n map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt.keys():\n if (k in model_flags):\n setattr(args, k, opt[k])\n else:\n checkpoint = None\n\n torch.manual_seed(args.seed)\n random.seed(args.seed)\n torch.backends.cudnn.deterministic = True\n\n def train_iter_fct():\n return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True),\n args.batch_size, args.batch_ex_size, device,\n shuffle=True, is_test=False)\n\n tokenizer = BertTokenizer.from_pretrained(args.bert_dir)\n\n model = Summarizer(args, device, tokenizer.vocab, checkpoint)\n\n if args.train_from_ignore_optim:\n checkpoint = None\n if args.sep_optim:\n if args.encoder == 'bert':\n optim_bert = build_optim_bert(args, model, checkpoint)\n optim_other = build_optim_other(args, model, checkpoint)\n if args.topic_model:\n optim_topic = build_optim_topic(args, model, checkpoint)\n optim = [optim_bert, optim_other, optim_topic]\n else:\n optim = [optim_bert, optim_other]\n else:\n optim_other = build_optim_other(args, model, checkpoint)\n if args.topic_model:\n optim_topic = build_optim_topic(args, model, checkpoint)\n optim = [optim_other, optim_topic]\n else:\n optim = [optim_other]\n else:\n optim = [build_optim(args, model, checkpoint, args.warmup)]\n\n logger.info(model)\n\n trainer = build_trainer(args, device_id, model, optim, tokenizer)\n\n if args.pretrain:\n trainer.train(train_iter_fct, args.pretrain_steps)\n else:\n trainer.train(train_iter_fct, args.train_steps)\n", "\"\"\" Base Class and function for Decoders \"\"\"\n\nfrom __future__ import division\nimport torch\nimport torch.nn as nn\n\nfrom models.neural import aeq, DecoderState, GlobalAttention\n\n\nclass RNNDecoder(nn.Module):\n \"\"\"\n Base recurrent attention-based decoder class.\n Specifies the interface used by different decoder types\n and required by :obj:`models.NMTModel`.\n .. mermaid::\n graph BT\n A[Input]\n subgraph RNN\n C[Pos 1]\n D[Pos 2]\n E[Pos N]\n end\n G[Decoder State]\n H[Decoder State]\n I[Outputs]\n F[Memory_Bank]\n A--emb-->C\n A--emb-->D\n A--emb-->E\n H-->C\n C-- attn --- F\n D-- attn --- F\n E-- attn --- F\n C-->I\n D-->I\n E-->I\n E-->G\n F---I\n Args:\n rnn_type (:obj:`str`):\n style of recurrent unit to use, one of [LSTM, GRU]\n bidirectional_encoder (bool) : use with a bidirectional encoder\n num_layers (int) : number of stacked layers\n hidden_size (int) : hidden size of each layer\n attn_type (str) : see :obj:`onmt.modules.GlobalAttention`\n coverage_attn (str): see :obj:`onmt.modules.GlobalAttention`\n copy_attn (bool): setup a separate copy attention mechanism\n dropout (float) : dropout value for :obj:`nn.Dropout`\n embeddings (:obj:`onmt.modules.Embeddings`): embedding module to use\n \"\"\"\n\n def __init__(self, rnn_type, bidirectional_encoder, num_layers,\n hidden_size, attn_type=\"general\",\n coverage_attn=False, copy_attn=False,\n dropout=0.0, embeddings=None,\n reuse_copy_attn=False):\n super(RNNDecoder, self).__init__()\n assert embeddings is not None\n # Basic attributes.\n self.decoder_type = 'rnn'\n self.bidirectional_encoder = bidirectional_encoder\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n self.embeddings = embeddings\n self.dropout = nn.Dropout(dropout)\n input_size = self.embeddings.embedding_dim + self.hidden_size\n\n # Build the RNN.\n self.rnn = self._build_rnn(rnn_type,\n input_size=input_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n dropout=dropout)\n\n # Set up the standard attention.\n self._coverage = coverage_attn\n self.attn = GlobalAttention(\n hidden_size, coverage=coverage_attn,\n attn_type=attn_type\n )\n\n # Set up a separated copy attention layer, if needed.\n self._copy = False\n if copy_attn and not reuse_copy_attn:\n self.copy_attn = GlobalAttention(\n hidden_size, attn_type=attn_type\n )\n if copy_attn:\n self._copy = True\n self._reuse_copy_attn = reuse_copy_attn\n\n def _build_rnn(self, rnn_type, input_size,\n hidden_size, num_layers, dropout):\n if rnn_type == \"LSTM\":\n stacked_cell = StackedLSTM\n else:\n stacked_cell = StackedGRU\n return stacked_cell(num_layers, input_size,\n hidden_size, dropout)\n\n def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None):\n \"\"\"\n See StdRNNDecoder._run_forward_pass() for description\n of arguments and return values.\n \"\"\"\n # Additional args check.\n input_feed = state.input_feed.squeeze(0)\n input_feed_batch, _ = input_feed.size()\n tgt_batch, _, _ = tgt.size()\n aeq(tgt_batch, input_feed_batch)\n # END Additional args check.\n\n # Initialize local and return variables.\n decoder_outputs = []\n attns = {\"std\": []}\n if self._copy:\n attns[\"copy\"] = []\n if self.training and self._coverage:\n attns[\"coverage\"] = []\n\n hidden = state.hidden\n coverage = state.coverage.squeeze(0) \\\n if state.coverage is not None else None\n\n # Input feed concatenates hidden state with\n # input at every time step.\n for _, emb_t in enumerate(tgt.transpose(0, 1).split(1)):\n emb_t = emb_t.squeeze(0)\n decoder_input = torch.cat([emb_t, input_feed], 1)\n\n rnn_output, hidden = self.rnn(decoder_input, hidden)\n decoder_output, p_attn = self.attn(\n rnn_output,\n memory_bank,\n memory_lengths=memory_lengths)\n\n decoder_output = self.dropout(decoder_output)\n input_feed = decoder_output\n\n decoder_outputs += [decoder_output]\n attns[\"std\"] += [p_attn]\n\n # Update the coverage attention.\n if self.training and self._coverage:\n coverage = coverage + p_attn \\\n if coverage is not None else p_attn\n attns[\"coverage\"] += [coverage]\n\n # Run the forward pass of the copy attention layer.\n if self._copy and not self._reuse_copy_attn:\n _, copy_attn = self.copy_attn(decoder_output,\n memory_bank)\n attns[\"copy\"] += [copy_attn]\n elif self._copy:\n attns[\"copy\"] = attns[\"std\"]\n # Return result.\n return hidden, decoder_outputs, attns\n\n def forward(self, tgt, memory_bank, state, memory_masks=None,\n step=None):\n # Check\n assert isinstance(state, RNNDecoderState)\n # tgt.size() returns tgt length and batch\n tgt_batch, _ = tgt.size()\n memory_batch, _, _ = memory_bank.size()\n aeq(tgt_batch, memory_batch)\n # END\n memory_lengths = memory_masks.sum(dim=1)\n emb = self.embeddings(tgt)\n # Run the forward pass of the RNN.\n decoder_final, decoder_outputs, attns = self._run_forward_pass(\n emb, memory_bank, state, memory_lengths=memory_lengths)\n\n # Update the state with the result.\n final_output = decoder_outputs[-1]\n coverage = None\n if \"coverage\" in attns:\n coverage = attns[\"coverage\"][-1].unsqueeze(0)\n state.update_state(decoder_final, final_output.unsqueeze(0), coverage)\n\n # Concatenates sequence of tensors along a new dimension.\n # NOTE: v0.3 to 0.4: decoder_outputs / attns[*] may not be list\n # (in particular in case of SRU) it was not raising error in 0.3\n # since stack(Variable) was allowed.\n # In 0.4, SRU returns a tensor that shouldn't be stacke\n if type(decoder_outputs) == list:\n decoder_outputs = torch.stack(decoder_outputs).transpose(0, 1)\n\n for k in attns:\n if type(attns[k]) == list:\n attns[k] = torch.stack(attns[k]).transpose(0, 1)\n\n return decoder_outputs, state, attns\n\n def init_decoder_state(self, src, memory_bank, encoder_final):\n \"\"\" Init decoder state with last state of the encoder \"\"\"\n def _fix_enc_hidden(hidden):\n # The encoder hidden is (layers*directions) x batch x dim.\n # We need to convert it to layers x batch x (directions*dim).\n if self.bidirectional_encoder:\n hidden = torch.cat([hidden[0:hidden.size(0):2],\n hidden[1:hidden.size(0):2]], 2)\n return hidden\n\n if isinstance(encoder_final, tuple): # LSTM\n return RNNDecoderState(self.hidden_size,\n tuple([_fix_enc_hidden(enc_hid)\n for enc_hid in encoder_final]))\n else: # GRU\n return RNNDecoderState(self.hidden_size,\n _fix_enc_hidden(encoder_final))\n\n\nclass RNNDecoderState(DecoderState):\n \"\"\" Base class for RNN decoder state \"\"\"\n\n def __init__(self, hidden_size, rnnstate):\n \"\"\"\n Args:\n hidden_size (int): the size of hidden layer of the decoder.\n rnnstate: final hidden state from the encoder.\n transformed to shape: layers x batch x (directions*dim).\n \"\"\"\n if not isinstance(rnnstate, tuple):\n self.hidden = (rnnstate,)\n else:\n self.hidden = rnnstate\n self.coverage = None\n\n # Init the input feed.\n batch_size = self.hidden[0].size(1)\n h_size = (batch_size, hidden_size)\n self.input_feed = self.hidden[0].data.new(*h_size).zero_() \\\n .unsqueeze(0)\n\n @property\n def _all(self):\n return self.hidden + (self.input_feed,)\n\n def update_state(self, rnnstate, input_feed, coverage):\n \"\"\" Update decoder state \"\"\"\n if not isinstance(rnnstate, tuple):\n self.hidden = (rnnstate,)\n else:\n self.hidden = rnnstate\n self.input_feed = input_feed\n self.coverage = coverage\n\n def repeat_beam_size_times(self, beam_size):\n \"\"\" Repeat beam_size times along batch dimension. \"\"\"\n vars = [e.data.repeat(1, beam_size, 1)\n for e in self._all]\n self.hidden = tuple(vars[:-1])\n self.input_feed = vars[-1]\n\n def map_batch_fn(self, fn):\n self.hidden = tuple(map(lambda x: fn(x, 1), self.hidden))\n self.input_feed = fn(self.input_feed, 1)\n\n\nclass StackedLSTM(nn.Module):\n \"\"\"\n Our own implementation of stacked LSTM.\n Needed for the decoder, because we do input feeding.\n \"\"\"\n\n def __init__(self, num_layers, input_size, rnn_size, dropout):\n super(StackedLSTM, self).__init__()\n self.dropout = nn.Dropout(dropout)\n self.num_layers = num_layers\n self.layers = nn.ModuleList()\n\n for _ in range(num_layers):\n self.layers.append(nn.LSTMCell(input_size, rnn_size))\n input_size = rnn_size\n\n def forward(self, input_feed, hidden):\n h_0, c_0 = hidden\n h_1, c_1 = [], []\n for i, layer in enumerate(self.layers):\n h_1_i, c_1_i = layer(input_feed, (h_0[i], c_0[i]))\n input_feed = h_1_i\n if i + 1 != self.num_layers:\n input_feed = self.dropout(input_feed)\n h_1 += [h_1_i]\n c_1 += [c_1_i]\n\n h_1 = torch.stack(h_1)\n c_1 = torch.stack(c_1)\n\n return input_feed, (h_1, c_1)\n\n\nclass StackedGRU(nn.Module):\n \"\"\"\n Our own implementation of stacked GRU.\n Needed for the decoder, because we do input feeding.\n \"\"\"\n\n def __init__(self, num_layers, input_size, rnn_size, dropout):\n super(StackedGRU, self).__init__()\n self.dropout = nn.Dropout(dropout)\n self.num_layers = num_layers\n self.layers = nn.ModuleList()\n\n for _ in range(num_layers):\n self.layers.append(nn.GRUCell(input_size, rnn_size))\n input_size = rnn_size\n\n def forward(self, input_feed, hidden):\n h_1 = []\n for i, layer in enumerate(self.layers):\n h_1_i = layer(input_feed, hidden[0][i])\n input_feed = h_1_i\n if i + 1 != self.num_layers:\n input_feed = self.dropout(input_feed)\n h_1 += [h_1_i]\n\n h_1 = torch.stack(h_1)\n return input_feed, (h_1,)\n" ]
[ [ "torch.cuda.set_device", "torch.load", "torch.cuda.manual_seed", "torch.manual_seed", "torch.multiprocessing.get_context", "torch.no_grad" ], [ "torch.nn.Dropout", "torch.cat", "torch.nn.ModuleList", "torch.nn.LSTMCell", "torch.stack", "torch.nn.GRUCell" ] ]
piboauthors/PiBO-Spearmint
[ "5759a9f9ba3c0124af2b4750e37806ce32bc3d7f" ]
[ "spearmint/choosers/.ipynb_checkpoints/acquisition_functions-checkpoint.py" ]
[ "# -*- coding: utf-8 -*-\n# Spearmint\n#\n# Academic and Non-Commercial Research Use Software License and Terms\n# of Use\n#\n# Spearmint is a software package to perform Bayesian optimization\n# according to specific algorithms (the “Software”). The Software is\n# designed to automatically run experiments (thus the code name\n# 'spearmint') in a manner that iteratively adjusts a number of\n# parameters so as to minimize some objective in as few runs as\n# possible.\n#\n# The Software was developed by Ryan P. Adams, Michael Gelbart, and\n# Jasper Snoek at Harvard University, Kevin Swersky at the\n# University of Toronto (“Toronto”), and Hugo Larochelle at the\n# Université de Sherbrooke (“Sherbrooke”), which assigned its rights\n# in the Software to Socpra Sciences et Génie\n# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement\n# between the parties, it is distributed for free academic and\n# non-commercial research use by the President and Fellows of Harvard\n# College (“Harvard”).\n#\n# Using the Software indicates your agreement to be bound by the terms\n# of this Software Use Agreement (“Agreement”). Absent your agreement\n# to the terms below, you (the “End User”) have no rights to hold or\n# use the Software whatsoever.\n#\n# Harvard agrees to grant hereunder the limited non-exclusive license\n# to End User for the use of the Software in the performance of End\n# User’s internal, non-commercial research and academic use at End\n# User’s academic or not-for-profit research institution\n# (“Institution”) on the following terms and conditions:\n#\n# 1. NO REDISTRIBUTION. The Software remains the property Harvard,\n# Toronto and Socpra, and except as set forth in Section 4, End User\n# shall not publish, distribute, or otherwise transfer or make\n# available the Software to any other party.\n#\n# 2. NO COMMERCIAL USE. End User shall not use the Software for\n# commercial purposes and any such use of the Software is expressly\n# prohibited. This includes, but is not limited to, use of the\n# Software in fee-for-service arrangements, core facilities or\n# laboratories or to provide research services to (or in collaboration\n# with) third parties for a fee, and in industry-sponsored\n# collaborative research projects where any commercial rights are\n# granted to the sponsor. If End User wishes to use the Software for\n# commercial purposes or for any other restricted purpose, End User\n# must execute a separate license agreement with Harvard.\n#\n# Requests for use of the Software for commercial purposes, please\n# contact:\n#\n# Office of Technology Development\n# Harvard University\n# Smith Campus Center, Suite 727E\n# 1350 Massachusetts Avenue\n# Cambridge, MA 02138 USA\n# Telephone: (617) 495-3067\n# Facsimile: (617) 495-9568\n# E-mail: [email protected]\n#\n# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own\n# all intellectual property in the Software. End User shall gain no\n# ownership to the Software. End User shall not remove or delete and\n# shall retain in the Software, in any modifications to Software and\n# in any Derivative Works, the copyright, trademark, or other notices\n# pertaining to Software as provided with the Software.\n#\n# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,\n# as such term is defined under U.S. copyright laws, provided that any\n# such Derivative Works shall be restricted to non-commercial,\n# internal research and academic use at End User’s Institution. End\n# User may distribute Derivative Works to other Institutions solely\n# for the performance of non-commercial, internal research and\n# academic use on terms substantially similar to this License and\n# Terms of Use.\n#\n# 5. FEEDBACK. In order to improve the Software, comments from End\n# Users may be useful. End User agrees to provide Harvard with\n# feedback on the End User’s use of the Software (e.g., any bugs in\n# the Software, the user experience, etc.). Harvard is permitted to\n# use such information provided by End User in making changes and\n# improvements to the Software without compensation or an accounting\n# to End User.\n#\n# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or\n# Sherbrooke or Socpra may develop modifications to the Software that\n# may be based on the feedback provided by End User under Section 5\n# above. Harvard, Toronto and Sherbrooke/Socpra shall not be\n# restricted in any way by End User regarding their use of such\n# information. End User acknowledges the right of Harvard, Toronto\n# and Sherbrooke/Socpra to prepare, publish, display, reproduce,\n# transmit and or use modifications to the Software that may be\n# substantially similar or functionally equivalent to End User’s\n# modifications and/or improvements if any. In the event that End\n# User obtains patent protection for any modification or improvement\n# to Software, End User agrees not to allege or enjoin infringement of\n# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,\n# or any of the researchers, medical or research staff, officers,\n# directors and employees of those institutions.\n#\n# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,\n# present, or share results from the use of the Software. In\n# accordance with customary academic practice, End User will\n# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers\n# of the Software and may cite the relevant reference(s) from the\n# following list of publications:\n#\n# Practical Bayesian Optimization of Machine Learning Algorithms\n# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams\n# Neural Information Processing Systems, 2012\n#\n# Multi-Task Bayesian Optimization\n# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams\n# Advances in Neural Information Processing Systems, 2013\n#\n# Input Warping for Bayesian Optimization of Non-stationary Functions\n# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams\n# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013\n#\n# Bayesian Optimization and Semiparametric Models with Applications to\n# Assistive Technology Jasper Snoek, PhD Thesis, University of\n# Toronto, 2013\n#\n# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED \"AS IS.\" TO THE FULLEST\n# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA\n# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR\n# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY\n# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND\n# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,\n# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE\n# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT\n# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.\n#\n# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT\n# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,\n# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL\n# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR\n# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,\n# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER\n# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH\n# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS\n# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,\n# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGES.\n#\n# 10. INDEMNIFICATION. To the extent permitted by law, End User shall\n# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke\n# and Socpra, their corporate affiliates, current or future directors,\n# trustees, officers, faculty, medical and professional staff,\n# employees, students and agents and their respective successors,\n# heirs and assigns (the \"Indemnitees\"), against any liability,\n# damage, loss or expense (including reasonable attorney's fees and\n# expenses of litigation) incurred by or imposed upon the Indemnitees\n# or any one of them in connection with any claims, suits, actions,\n# demands or judgments arising from End User’s breach of this\n# Agreement or its Institution’s use of the Software except to the\n# extent caused by the gross negligence or willful misconduct of\n# Harvard, Toronto or Sherbrooke or Socpra. This indemnification\n# provision shall survive expiration or termination of this Agreement.\n#\n# 11. GOVERNING LAW. This Agreement shall be construed and governed by\n# the laws of the Commonwealth of Massachusetts regardless of\n# otherwise applicable choice of law standards.\n#\n# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall\n# be construed as granting End Users or their Institutions any rights\n# or licenses to use any trademarks, service marks or logos associated\n# with the Software. You may not use the terms “Harvard” or\n# “University of Toronto” or “Université de Sherbrooke” or “Socpra\n# Sciences et Génie S.E.C.” (or a substantially similar term) in any\n# way that is inconsistent with the permitted uses described\n# herein. You agree not to use any name or emblem of Harvard, Toronto\n# or Sherbrooke, or any of their subdivisions for any purpose, or to\n# falsely suggest any relationship between End User (or its\n# Institution) and Harvard, Toronto and/or Sherbrooke, or in any\n# manner that would infringe or violate any of their rights.\n#\n# 13. End User represents and warrants that it has the legal authority\n# to enter into this License and Terms of Use on behalf of itself and\n# its Institution.\n\n\nimport os\nimport tempfile\nimport copy\nimport numpy as np\nimport numpy.random as npr\nimport scipy.linalg as spla\nimport scipy.stats as sps\nimport scipy.optimize as spo\nimport cPickle\nimport multiprocessing\nimport ast\n\n\ndef compute_ei(model, pred, ei_target=None, compute_grad=True):\n # TODO: use ei_target\n if pred.ndim == 1:\n pred = pred[None,:]\n\n if not compute_grad:\n func_m, func_v = model.predict(pred)\n else:\n (func_m,\n func_v,\n grad_xp_m,\n grad_xp_v) = model.predict(pred, compute_grad=True)\n\n if func_m.ndim == 1:\n func_m = func_m[:,np.newaxis]\n if func_v.ndim == 1:\n func_v = func_v[:,np.newaxis]\n\n if compute_grad:\n if grad_xp_m.ndim == 2:\n grad_xp_m = grad_xp_m[:,:,np.newaxis]\n if grad_xp_v.ndim == 2:\n grad_xp_v = grad_xp_v[:,:,np.newaxis]\n\n ei_values = model.values.min(axis=0)\n\n ei_values = np.array(ei_values)\n if ei_values.ndim == 0:\n ei_values = np.array([[ei_values]])\n\n # Expected improvement\n func_s = np.sqrt(func_v)\n u = (ei_values - func_m) / func_s\n ncdf = sps.norm.cdf(u)\n npdf = sps.norm.pdf(u)\n ei = np.mean(func_s*( u*ncdf + npdf),axis=1)\n\n if not compute_grad:\n return ei\n\n ei = np.sum(ei)\n\n # Gradients of ei w.r.t. mean and variance \n g_ei_m = -ncdf\n g_ei_s2 = 0.5*npdf / func_s\n \n # Gradient of ei w.r.t. the inputs\n grad_xp = (grad_xp_m*np.tile(g_ei_m,(pred.shape[1],1))).T + (grad_xp_v.T*g_ei_s2).T\n grad_xp = np.mean(grad_xp,axis=0)\n\n return ei, grad_xp.flatten()\n" ]
[ [ "scipy.stats.norm.cdf", "numpy.sqrt", "scipy.stats.norm.pdf", "numpy.tile", "numpy.mean", "numpy.array", "numpy.sum" ] ]
AlbertMP/Use-LSTM-For-Sentiment-Analysis
[ "bab6228fa7bf61fa685e545361eb4601799cbffc" ]
[ "sentiment-analysis/model_lstm.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import LSTM, Dense, Embedding, Dropout\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n# 导入数据\n# 文件的数据中,特征为evaluation, 类别为label.\ndef load_data(filepath, input_shape=20):\n df = pd.read_csv(filepath, error_bad_lines=False)\n\n # 标签及词汇表\n labels, vocabulary = list(df['label'].unique()), list(df['evaluation'].unique())\n\n # 构造字符级别的特征\n string = ''\n for word in vocabulary:\n string += word\n\n vocabulary = set(string)\n\n # 字典列表\n word_dictionary = {word: i+1 for i, word in enumerate(vocabulary)}\n with open('word_dict.pk', 'wb') as f:\n pickle.dump(word_dictionary, f)\n inverse_word_dictionary = {i+1: word for i, word in enumerate(vocabulary)}\n label_dictionary = {label: i for i, label in enumerate(labels)}\n with open('label_dict.pk', 'wb') as f:\n pickle.dump(label_dictionary, f)\n output_dictionary = {i: labels for i, labels in enumerate(labels)}\n\n vocab_size = len(word_dictionary.keys()) # 词汇表大小\n label_size = len(label_dictionary.keys()) # 标签类别数量\n\n # 序列填充,按input_shape填充,长度不足的按0补充\n x = [[word_dictionary[word] for word in sent] for sent in df['evaluation']]\n x = pad_sequences(maxlen=input_shape, sequences=x, padding='post', value=0)\n y = [[label_dictionary[sent]] for sent in df['label']]\n y = [np_utils.to_categorical(label, num_classes=label_size) for label in y]\n y = np.array([list(_[0]) for _ in y])\n\n return x, y, output_dictionary, vocab_size, label_size, inverse_word_dictionary\n\n# 创建深度学习模型, Embedding + LSTM + Softmax.\ndef create_LSTM(n_units, input_shape, output_dim, filepath):\n x, y, output_dictionary, vocab_size, label_size, inverse_word_dictionary = load_data(filepath)\n model = Sequential()\n model.add(Embedding(input_dim=vocab_size + 1, output_dim=output_dim,\n input_length=input_shape, mask_zero=True))\n model.add(LSTM(n_units, input_shape=(x.shape[0], x.shape[1])))\n model.add(Dropout(0.2))\n model.add(Dense(label_size, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n # plot_model(model, to_file='./model_lstm.png', show_shapes=True)\n model.summary()\n\n return model\n\n# 模型训练\ndef model_train(input_shape, filepath, model_save_path):\n\n # 将数据集分为训练集和测试集,占比为9:1\n # input_shape = 100\n x, y, output_dictionary, vocab_size, label_size, inverse_word_dictionary = load_data(filepath, input_shape)\n train_x, test_x, train_y, test_y = train_test_split(x, y, test_size = 0.2, random_state = 42)\n\n # 模型输入参数,需要自己根据需要调整\n n_units = 100\n batch_size = 32\n epochs = 30\n output_dim = 20\n\n # 模型训练\n lstm_model = create_LSTM(n_units, input_shape, output_dim, filepath)\n lstm_model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=1)\n\n # 模型保存\n lstm_model.save(model_save_path)\n\n N = test_x.shape[0] # 测试的条数\n predict = []\n label = []\n for start, end in zip(range(0, N, 1), range(1, N+1, 1)):\n sentence = [inverse_word_dictionary[i] for i in test_x[start] if i != 0]\n y_predict = lstm_model.predict(test_x[start:end])\n label_predict = output_dictionary[np.argmax(y_predict[0])]\n label_true = output_dictionary[np.argmax(test_y[start:end])]\n print(''.join(sentence), label_true, label_predict) # 输出预测结果\n predict.append(label_predict)\n label.append(label_true)\n\n acc = accuracy_score(predict, label) # 预测准确率\n print('模型在测试集上的准确率为: %s.' % acc)\n\nif __name__ == '__main__':\n filepath = './train.csv'\n input_shape = 180\n model_save_path = './train_model.h5'\n model_train(input_shape, filepath, model_save_path)" ]
[ [ "pandas.read_csv", "numpy.argmax", "sklearn.model_selection.train_test_split", "sklearn.metrics.accuracy_score" ] ]
LaureenK/3D-BoNet_LK
[ "bc2f12777ae6323ca32881bfbb168cc05f29f3ef" ]
[ "tf_ops_python3/interpolation/tf_interpolate_op_test.py" ]
[ "import tensorflow as tf\nimport numpy as np\nfrom tf_interpolate import three_nn, three_interpolate\n\nclass GroupPointTest(tf.test.TestCase):\n def test(self):\n pass\n\n def test_grad(self):\n with self.test_session():\n points = tf.constant(np.random.random((1,8,16)).astype('float32'))\n print (points)\n xyz1 = tf.constant(np.random.random((1,128,3)).astype('float32'))\n xyz2 = tf.constant(np.random.random((1,8,3)).astype('float32'))\n dist, idx = three_nn(xyz1, xyz2)\n weight = tf.ones_like(dist)/3.0\n interpolated_points = three_interpolate(points, idx, weight)\n print (interpolated_points)\n err = tf.test.compute_gradient_error(points, (1,8,16), interpolated_points, (1,128,16))\n print (err)\n self.assertLess(err, 1e-4) \n\nif __name__=='__main__':\n tf.test.main() \n" ]
[ [ "tensorflow.test.compute_gradient_error", "numpy.random.random", "tensorflow.ones_like", "tensorflow.test.main" ] ]
565353780/filter-socket
[ "4da4c1d6bf5065ffdf8e1d3e79faf97582c77d87" ]
[ "src/Python/ResNet/resnet_train.py" ]
[ "import os\r\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\r\nimport tensorflow as tf\r\nimport tensorflow.contrib.slim.nets as nets\r\n\r\nclass ResNetTrainer:\r\n def __init__(self, RootImageFolderPath, ModelSavePath, ModelSaveName, batchSize, learningRate, ImageWidth, ImageHeight, numThreads, labelNum):\r\n super().__init__()\r\n\r\n self.RootImageFolderPath = RootImageFolderPath\r\n self.ModelSavePath = ModelSavePath\r\n self.ModelSaveName = ModelSaveName\r\n self.batchSize = batchSize\r\n self.learningRate = learningRate\r\n self.ImageWidth = ImageWidth\r\n self.ImageHeight = ImageHeight\r\n self.numThreads = numThreads\r\n self.labelNum = labelNum\r\n\r\n if self.RootImageFolderPath[-1] != \"/\":\r\n self.RootImageFolderPath += \"/\"\r\n \r\n if self.ModelSavePath[-1] != \"/\":\r\n self.ModelSavePath += \"/\"\r\n\r\n def read_and_decode_tfrecord(self, filename):\r\n filename_deque = tf.train.string_input_producer(filename)\r\n reader = tf.TFRecordReader()\r\n _, serialized_example = reader.read(filename_deque)\r\n features = tf.parse_single_example(serialized_example, features={\r\n 'label': tf.FixedLenFeature([], tf.int64),\r\n 'img_raw': tf.FixedLenFeature([], tf.string)})\r\n label = tf.cast(features['label'], tf.int64)\r\n img = tf.decode_raw(features['img_raw'], tf.uint8)\r\n img = tf.reshape(img, [self.ImageWidth, self.ImageHeight, 3])\r\n img = tf.cast(img, tf.float32) / 255.0 #将矩阵归一化0-1之间\r\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!label :\", label)\r\n print(img.shape)\r\n print(label.shape)\r\n return img, label\r\n\r\n def train(self):\r\n save_dir = self.ModelSavePath\r\n\r\n if not os.path.exists(save_dir):\r\n os.makedirs(save_dir)\r\n \r\n save_dir += self.ModelSaveName\r\n\r\n batch_size_ = self.batchSize\r\n\r\n lr = tf.Variable(self.learningRate, dtype=tf.float32)\r\n x = tf.placeholder(tf.float32, [None, self.ImageWidth, self.ImageHeight, 3])\r\n y_ = tf.placeholder(tf.float32, [None])\r\n\r\n tfRecordPath = self.RootImageFolderPath.replace(\"sources\", \"tfRecord\")\r\n\r\n tfRecordName_list = os.listdir(tfRecordPath)\r\n\r\n train_list = []\r\n\r\n for tfRecordName in tfRecordName_list:\r\n train_list.append(tfRecordPath + tfRecordName)\r\n # 随机打乱顺序\r\n img, label = self.read_and_decode_tfrecord(train_list)\r\n img_batch, label_batch = tf.train.shuffle_batch([img, label], num_threads=self.numThreads, batch_size=batch_size_, capacity=10000,\r\n min_after_dequeue=9900)\r\n\r\n # 将label值进行onehot编码\r\n one_hot_labels = tf.one_hot(indices=tf.cast(y_, tf.int32), depth=self.labelNum)\r\n pred, end_points = nets.resnet_v2.resnet_v2_50(x, num_classes=self.labelNum, is_training=True)\r\n pred = tf.reshape(pred, shape=[-1, self.labelNum])\r\n\r\n # 定义损失函数和优化器\r\n loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pred, labels=one_hot_labels))\r\n optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)\r\n\r\n # 准确度\r\n a = tf.argmax(pred, 1)\r\n b = tf.argmax(one_hot_labels, 1)\r\n correct_pred = tf.equal(a, b)\r\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\r\n\r\n saver = tf.train.Saver()\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n # 创建一个协调器,管理线程\r\n coord = tf.train.Coordinator()\r\n # 启动QueueRunner,此时文件名队列已经进队\r\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\r\n i = 0\r\n while True:\r\n i += 1\r\n b_image, b_label = sess.run([img_batch, label_batch])\r\n _, loss_, y_t, y_p, a_, b_ = sess.run([optimizer, loss, one_hot_labels, pred, a, b], feed_dict={x: b_image,\r\n y_: b_label})\r\n print('step: {}, train_loss: {}'.format(i, loss_))\r\n if i % 10 == 0:\r\n _loss, acc_train = sess.run([loss, accuracy], feed_dict={x: b_image, y_: b_label})\r\n print('--------------------------------------------------------')\r\n print('step: {} train_acc: {} loss: {}'.format(i, acc_train, _loss))\r\n print('--------------------------------------------------------')\r\n if i % 100 == 0:\r\n saver.save(sess, save_dir, global_step=i)\r\n coord.request_stop()\r\n # 其他所有线程关闭之后,这一函数才能返回\r\n coord.join(threads)\r\n \r\nif __name__ == \"__main__\":\r\n resnet_trainer = ResNetTrainer(RootImageFolderPath=os.getcwd() + \"/sources/\", \\\r\n ModelSavePath=os.getcwd() + \"/models/\", \\\r\n ModelSaveName=\"train_sandstone.model\", \\\r\n batchSize=128, \\\r\n learningRate=0.0001, \\\r\n ImageWidth=224, \\\r\n ImageHeight=224, \\\r\n numThreads=8, \\\r\n labelNum=7)\r\n\r\n resnet_trainer.train()" ]
[ [ "tensorflow.FixedLenFeature", "tensorflow.cast", "tensorflow.equal", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.contrib.slim.nets.resnet_v2.resnet_v2_50", "tensorflow.train.AdamOptimizer", "tensorflow.TFRecordReader", "tensorflow.Variable", "tensorflow.decode_raw", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.argmax", "tensorflow.train.shuffle_batch", "tensorflow.train.Coordinator", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.string_input_producer", "tensorflow.train.start_queue_runners", "tensorflow.reshape" ] ]
jie-mei/NLI
[ "eb9cac23e24c1a960a46fadb2aee4ad4db3ab327" ]
[ "src/nn/cafe.py" ]
[ "\"\"\" Prediction models.\n\"\"\"\n\nimport typing as t\n\nimport numpy as np\nimport tensorflow as tf\n\nimport embed\nimport data\nimport op\nimport nn\nfrom nn.base import Model, SoftmaxCrossEntropyMixin\nfrom util.log import exec_log as log\nfrom util.debug import *\n\n\nclass CAFE(SoftmaxCrossEntropyMixin, Model):\n \n def __init__(self,\n embeddings: embed.IndexedWordEmbedding,\n class_num: int,\n scale_l1: float = 0.0,\n scale_l2: float = 0.000001,\n encode_dim: int = 300,\n fact_intr_dim: int = 10,\n fact_proj_dim: int = -1,\n char_filer_width: int = 5,\n char_embed_dim: int = 8,\n char_conv_dim: int = 100,\n lstm_unit: int = 300,\n ) -> None:\n super(CAFE, self).__init__()\n self._class_num = class_num\n self.scale_l1 = scale_l1\n self.scale_l2 = scale_l2\n self.encode_dim = encode_dim\n self.fact_proj_dim = fact_proj_dim\n self.fact_intr_dim = fact_intr_dim\n self.char_filter_width = char_filer_width\n self.char_embed_dim = char_embed_dim\n self.char_conv_dim = char_conv_dim\n self.lstm_unit = lstm_unit\n\n self.keep_prob = tf.placeholder(tf.float32, shape=[])\n\n op_kwargs = {'scale_l1': self.scale_l1,\n 'scale_l2': self.scale_l2,\n 'keep_prob': self.keep_prob}\n\n with tf.variable_scope('embed') as s:\n # Word pretrained embeddings (300D)\n word_embed = tf.constant(embeddings.get_embeddings(),\n dtype=tf.float32,\n name='word_embed')\n word_embed1, word_embed2 = map(lambda x: tf.gather(word_embed, x),\n [self.x1, self.x2])\n\n # Character convolutional embeddings (`char_conv_dim`D)\n char_embed = op.get_variable('char_embed',\n shape=(256, char_embed_dim))\n char_filter = op.get_variable('char_filter',\n shape=(1, self.char_filter_width, self.char_embed_dim,\n self.char_conv_dim))\n def embed_chars(x_char):\n embed = tf.gather(char_embed, x_char)\n # shape: [batch, seq_len, word_len, embed_dim]\n conv = tf.nn.conv2d(embed, char_filter, [1, 1, 1, 1], 'VALID')\n # shape: [batch, seq_len, word_len - filter_width + 1, conv_dim]\n return tf.reduce_max(conv, 2)\n # shape: [batch, seq_len, conv_dim]\n char_embed1, char_embed2 = map(embed_chars, [self.char1, self.char2])\n\n # Tag one-hot embeddings (72D)\n def embed_tags(x_ids, x_tags, x_len):\n x_tags *= tf.sequence_mask(x_len, tf.shape(x_tags)[1],\n dtype=tf.int32)\n # shape: [batch, seq_len]\n tag_embed = tf.one_hot(x_tags, data.SNLI.TAGS,\n dtype=tf.float32,\n name='char_embed')\n return tag_embed[:,:tf.shape(x_ids)[1]]\n tag_embed1, tag_embed2 = map(embed_tags,\n *zip((self.x1, self.tag1, self.len1),\n (self.x2, self.tag2, self.len2)))\n\n # Merge embeddings\n x1 = tf.concat([word_embed1, char_embed1, tag_embed1], 2)\n x2 = tf.concat([word_embed2, char_embed2, tag_embed2], 2)\n\n with tf.variable_scope('encode') as s:\n def encode(x):\n x = op.highway(x, scope='hw-1', dim=self.encode_dim, **op_kwargs)\n x = op.highway(x, scope='hw-2', dim=self.encode_dim, **op_kwargs)\n return x\n x1, x2 = map(encode, [x1, x2])\n # shape: [batch, seq_len, encode_dim]\n\n with tf.variable_scope('attent') as s:\n # Alignment\n def co_attent(t1, t2):\n t1 = op.linear(t1, **op_kwargs)\n t2 = op.linear(t2, **op_kwargs)\n return tf.matmul(t1, tf.matrix_transpose(t2))\n # shape: [batch, seq_len1, seq_len2]\n with tf.variable_scope('inter-align') as s:\n att = co_attent(x1, x2)\n inter1 = tf.matmul(tf.nn.softmax(att), x2)\n inter2 = tf.matmul(tf.nn.softmax(tf.matrix_transpose(att)), x1)\n with tf.variable_scope('intra-align') as s:\n def self_attent(x):\n att = co_attent(x, x)\n return x * tf.reduce_sum(att, 2, keep_dims=True)\n intra1, intra2 = map(self_attent, [x1, x2])\n def align_fact(x, x_align, scope):\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n f1 = self.fact('fact-concat', tf.concat([x, x_align], 2))\n f2 = self.fact('fact-sub', x - x_align)\n f3 = self.fact('fact-mul', x * x_align)\n return tf.stack([f1, f2, f3], 2)\n # shape: [batch, seq_len, 3]\n\n # TODO: variables may not be shared between different facts\n #x1 = tf.concat([x1, inter1, intra1], 2)\n #x2 = tf.concat([x2, inter2, intra2], 2)\n x1 = tf.concat([x1,\n align_fact(x1, inter1, 'inter'),\n align_fact(x1, intra1, 'intra')], 2)\n x2 = tf.concat([x2,\n align_fact(x2, inter2, 'inter'),\n align_fact(x2, intra2, 'intra')], 2)\n\n with tf.variable_scope('sequence', reuse=tf.AUTO_REUSE) as s:\n def lstm_encode(x):\n # shape: [batch, seq_len, encode_dim + 6]\n outputs, states = tf.nn.dynamic_rnn(\n cell=tf.nn.rnn_cell.LSTMCell(self.lstm_unit),\n inputs=x,\n dtype=tf.float32)\n outputs = tf.nn.dropout(outputs, self.keep_prob)\n return outputs\n x1, x2 = map(lstm_encode, [x1, x2])\n\n with tf.variable_scope('pooling') as s:\n def pool(x):\n return tf.concat([\n tf.reduce_max(x, axis=1),\n tf.reduce_sum(x, axis=1),\n ], 1)\n # shape: [batch, dim]\n x1, x2 = map(pool, [x1, x2])\n\n with tf.variable_scope('decode') as s:\n x = tf.concat([x1, x2, x1 - x2, x1 * x2], 1)\n x = op.highway(x, scope='hw-1', dim=self.encode_dim, **op_kwargs)\n x = op.highway(x, scope='hw-2', dim=self.encode_dim, **op_kwargs)\n y_hat = op.linear(x, dim=self._class_num, activation_fn=None)\n\n self.evaluate_and_loss(y_hat)\n\n def fact(self, scope, x):\n \"\"\" Factorize input vector into a feature scalar.\n\n Input:\n 3D-Tensor: [batch, seq_len, input_dim]\n Output:\n 2D-Tensor: [batch, seq_len]\n \"\"\"\n return self.fact_impl1(scope, x)\n\n # Factoriztion\n def fact_impl1(self, scope, x):\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n # NOTE: project to low-dimensonal space\n if self.fact_proj_dim > 0:\n x = op.linear(x, dim=self.fact_proj_dim, activation_fn=None)\n input_dim = x.get_shape()[2]\n fact_wght = op.get_variable('fact_weight',\n shape=(input_dim))\n fact_bias = op.get_variable('fact_bias', shape=(1))\n fact_intr = op.get_variable('fact_inter',\n shape=(input_dim, self.fact_intr_dim))\n l = (tf.reduce_sum(x * tf.reshape(fact_wght, [1, 1, -1]), -1)\n + fact_bias)\n # shape: [batch, seq_len]\n intr_mat = tf.matmul(fact_intr, tf.matrix_transpose(fact_intr))\n # shape: [input, input_dim]\n mask = tf.sequence_mask(tf.range(input_dim),\n maxlen=input_dim,\n dtype=tf.float32)\n # shape: [encode_dim, input_dim]\n p = tf.reduce_sum(\n tf.matmul(tf.expand_dims(x, 2), tf.expand_dims(x, 3)) *\n # shape: [batch, seq_len, input_dim, input_dim]\n tf.expand_dims(tf.expand_dims(intr_mat, 0), 0),\n #tf.expand_dims(tf.expand_dims(intr_mat * mask, 0), 0),\n # shape: [1, 1, input_dim, input_dim]\n [2, 3])\n # shape: [batch, seq_len]\n return l + p\n\n # Factoriztion\n def fact_impl2(self, scope, x):\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n if self.fact_proj_dim > 0:\n x = op.linear(x, dim=self.fact_proj_dim, activation_fn=None)\n input_dim = x.get_shape()[2]\n fact_wght = op.get_variable('fact_weight',\n shape=(input_dim))\n fact_bias = op.get_variable('fact_bias', shape=(1))\n fact_intr = op.get_variable('fact_inter',\n shape=(input_dim, self.fact_intr_dim))\n l = (tf.reduce_sum(x * tf.reshape(fact_wght, [1, 1, -1]), -1)\n + fact_bias)\n # shape: [batch, seq_len]\n\n intr_mat = tf.matmul(fact_intr, tf.matrix_transpose(fact_intr))\n # shape: [input, input_dim]\n mask = tf.sequence_mask(tf.range(input_dim),\n maxlen=input_dim,\n dtype=tf.float32)\n # shape: [encode_dim, input_dim]\n\n i = tf.constant(0)\n x_shape = tf.shape(x)\n batch_size, seq_len = x_shape[0], x_shape[1]\n p = tf.reshape(tf.zeros([batch_size]), [batch_size, -1])\n def loop_cond(i, x, p, seq_len):\n return tf.less(i, seq_len)\n def loop_body(i, x, p, seq_len):\n x_vect = x[:,i]\n # shape: [batch, input_dim]\n #x_mat = tf.matmul(tf.expand_dims(x_vect, 1),\n # tf.expand_dims(x_vect, 2))\n # NOTE: Avoid Internal Error: Blas xGEMMBatched launch failed\n x_mat = (tf.tile(tf.expand_dims(x_vect, 1), [1, input_dim, 1]) *\n tf.tile(tf.expand_dims(x_vect, 2), [1, 1, input_dim]))\n # shape: [batch, input_dim, input_dim]\n p_i = tf.reduce_sum(intr_mat * x_mat, [1, 2])\n p_i = tf.expand_dims(p_i, 1)\n # shape: [batch, 1]\n p = tf.concat([p, p_i], 1)\n return [i, x, p, seq_len]\n _, _, p_loop, _ = tf.while_loop(loop_cond, loop_body,\n [i, x, p, seq_len],\n parallel_iterations=1)\n return l + p_loop[:,1:]\n" ]
[ [ "tensorflow.concat", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.stack", "tensorflow.nn.conv2d", "tensorflow.while_loop", "tensorflow.gather", "tensorflow.matrix_transpose", "tensorflow.nn.dropout", "tensorflow.shape", "tensorflow.less", "tensorflow.placeholder", "tensorflow.one_hot", "tensorflow.reduce_max", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.range", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.variable_scope" ] ]
sunjerry019/adventOfCode18
[ "6970ece25e54b9f90c4aadd1a5d8569bd32f403b" ]
[ "2018/day_03/3_2.py" ]
[ "#!/usr/bin/env python3\n\nimport numpy as np\n\ninput = open(\"3.in\",\"r\")\ninputContents = input.readlines()\n\nfabric = np.zeros((1000,1000), dtype=int)\nuninterruptedClaims = list(range(1, len(inputContents) + 1))\n# l = 998\n# w = 999\n\nfor claim in inputContents:\n a = claim.split(\" @ \")\n a1 = a[1].split(\": \")\n b1 = a1[0].split(\",\")\n b2 = a1[1].split(\"x\")\n \"\"\"\n The number of inches between the left edge of the fabric and the left edge of the rectangle.\n The number of inches between the top edge of the fabric and the top edge of the rectangle.\n The width of the rectangle in inches.\n The height of the rectangle in inches.\n \"\"\"\n c = {\n \"id\" : int(a[0].split(\"#\")[1]),\n \"pos\" : {\"x\": int(b1[0]), \"y\": int(b1[1])},\n \"area\": {\"w\": int(b2[0]), \"h\": int(b2[1])}\n }\n\n #if c[\"pos\"][\"x\"] + c[\"area\"][\"w\"] > w:\n # w = c[\"pos\"][\"x\"] + c[\"area\"][\"w\"]\n #if c[\"pos\"][\"y\"] + c[\"area\"][\"h\"] > l:\n # l = c[\"pos\"][\"y\"] + c[\"area\"][\"h\"]\n\n for x in range(c[\"pos\"][\"x\"], c[\"pos\"][\"x\"] + c[\"area\"][\"w\"]):\n for y in range(c[\"pos\"][\"y\"], c[\"pos\"][\"y\"] + c[\"area\"][\"h\"]):\n if fabric[x, y] > 0:\n if c[\"id\"] in uninterruptedClaims:\n uninterruptedClaims.remove(c[\"id\"])\n if fabric[x, y] in uninterruptedClaims:\n uninterruptedClaims.remove(fabric[x, y])\n fabric[x, y] = c[\"id\"]\n\nprint(uninterruptedClaims)\n" ]
[ [ "numpy.zeros" ] ]
arnaou/MLOPS-GNN
[ "62a2482187688841ada75b69541e1de06df38c9d" ]
[ "src/models/model.py" ]
[ "import torch\nfrom torch_geometric.nn.models import AttentiveFP\n\n\nclass GNNModel:\n def __init__(self, in_channels=39, hidden_channels=150, out_channels=1, edge_dim=10,\n num_layers=2, num_timesteps=2, dropout=0.0):\n self.in_channels = in_channels\n self.hidden_channels = hidden_channels\n self.out_channels = out_channels\n self.edge_dim = edge_dim\n self.num_layers = num_layers\n self.num_timesteps = num_timesteps\n self.dropout = dropout\n self.device = self.get_device()\n\n def get_device(self):\n return torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n def model(self):\n return AttentiveFP(\n in_channels=self.in_channels,\n hidden_channels=self.hidden_channels,\n out_channels=self.out_channels,\n edge_dim=self.edge_dim,\n num_layers=self.num_layers,\n num_timesteps=self.num_timesteps,\n dropout=self.dropout,\n ).to(self.device)\n" ]
[ [ "torch.cuda.is_available" ] ]
agclark12/tumor_migration_analysis
[ "c63d43306f9f381ddec04a8301fcd268a5d71c38" ]
[ "tumor_migration_analysis/piv_plot_vectors.py" ]
[ "#!/opt/local/bin/python\n\n\"\"\"\n\nPlots PIV vector data (just vectors, no fame)\n\n\"\"\"\n\nimport os\n\nimport numpy as np\nfrom skimage.io._plugins import tifffile_plugin as tifffile\nfrom matplotlib import rcParams\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap\n\nimport utility_functions as uf\n\ndef plot_vectors(stk_path,px_size=1,scale_factor=0.004,scale_length=0.1,vector_width=1.5):\n \"\"\"Plots PIV vectors overlaid onto the orginal image stack\n The images are automatically written to a new directory.\n\n Parameters\n ----------\n stk_path : string\n the path to the image stack to be analyzed\n px_size : float\n the pixel size in um/px\n scale_factor : float\n a scaling to determine the vector size\n scale_length : int\n the length of the scale vector (in scaled units, usually um/min)\n vector_width : float\n the width of the PIV vectors for the quiver plots\n\n \"\"\"\n\n # opens the image stack to get the aspect ratio\n stk = tifffile.imread(stk_path)\n width = stk[0].shape[1] * px_size\n height = stk[0].shape[0] * px_size\n ar = height / width\n\n # finds the PIV vector data\n data_dir = os.path.splitext(stk_path)[0] + \"_piv_data\"\n if not os.path.isdir(data_dir):\n raise FileNotFoundError(\"No PIV vector data found. Please run extraction script first.\")\n\n # get unique basename list (from x coordinate data)\n basename_list = [_[:-6] for _ in os.listdir(data_dir) if '_x.dat' in _]\n basename_list = uf.natural_sort(basename_list)\n\n for tag in [\"\", \"_interp\"]: #plots both the raw and interpolated data\n\n print(\"Tag = \", tag)\n\n #makes new directory for plotting\n plot_dir = os.path.join(data_dir,\"vectors_w%sum-min_scale\"%(str(scale_length).replace(\".\",\"p\")) + tag)\n uf.make_dir(plot_dir)\n\n #goes through each time frame\n for i, basename in enumerate(basename_list):\n\n #sets up the plot and plots the image data underneath\n rcParams['axes.linewidth'] = 0\n fig, ax = plt.subplots(figsize=(6,6*ar))\n # ax.patch.set_facecolor('black')\n ax.imshow(stk[i],cmap='Greys_r',extent=(0,width,0,height))\n\n #plots each frame\n print('Grabbing frame:', basename)\n x = np.array(uf.read_file(os.path.join(data_dir, basename + \"_x.dat\")),dtype=float)\n y = np.array(uf.read_file(os.path.join(data_dir, basename + \"_y.dat\")),dtype=float)\n if \"_t0\" in basename:\n U = np.array(uf.read_file(os.path.join(data_dir, basename + \"_u.dat\")),dtype=float)\n V = np.array(uf.read_file(os.path.join(data_dir, basename + \"_v.dat\")),dtype=float)\n else:\n U = np.array(uf.read_file(os.path.join(data_dir, basename + \"_u%s.dat\" % tag)),dtype=float)\n V = np.array(uf.read_file(os.path.join(data_dir, basename + \"_v%s.dat\" % tag)),dtype=float)\n\n #plots vectors with color code according to angle\n phis = np.arctan2(V, U) * 180. / np.pi\n plt.quiver(x, y, U, V, phis.ravel(), cmap='rainbow', clim=(-180., 180.),\n units='xy',scale=scale_factor,scale_units='x',width=vector_width)\n\n #makes an arrow for scale\n cm = LinearSegmentedColormap.from_list('cm', [(1,1,1),(1,1,1)])\n plt.quiver([width-30],[height-height*0.05], [scale_length],[0],[0],cmap=cm,\n units='xy', scale=scale_factor, scale_units='x',width=vector_width)\n\n #finishes plot\n ax.set_xlim(0,width)\n ax.set_ylim(0,height)\n ax.invert_yaxis()\n ax.set_xticks([])\n ax.set_yticks([])\n fig.subplots_adjust(bottom=0,left=0,top=1,right=1)\n\n #saves plot as png\n plt.savefig(plot_dir + '/%s_vectors.png'%basename)\n plt.close()\n\ndef main():\n \"\"\"Sets up the analysis for extracting PIV vectors.\n You should update the image path and pixel size here.\n You should not have to change anything in the rest of the script.\n\n \"\"\"\n\n #sets some initial parameters\n stk_path = './sample_data/tumor_nuclei_small/tumor_nuclei_small.tif'\n px_size = 0.91 #um/px\n scale_factor = 0.004 #for scaling the PIV vectors on the plots\n scale_length = 0.1 #sets the length of the scale vector on the plots (in um/min)\n\n plot_vectors(stk_path,px_size=px_size,scale_factor=scale_factor,scale_length=scale_length)\n\nif __name__==\"__main__\":\n main()" ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "numpy.arctan2", "matplotlib.pyplot.close", "matplotlib.pyplot.quiver", "matplotlib.colors.LinearSegmentedColormap.from_list" ] ]
FreeUKGen/ProbateParsing
[ "dc273ae7516a8a6fa3ad19dbc259cd22f01eb4e2" ]
[ "NER/test_ner.py" ]
[ "'''\nThis script tests the Named Entity Recognition model against the unannotated probate data. The model can be imported to test the remaining entries that are not annotated to extract the entities.\n\nThe input to the script is the folder containing the training model generated during the train_ner.py and a text file containing all the entries that need to be extracted.\n\nTo run the script:\n\npython3 test_ner.py <path_to_model_directory> <path_to_test_data> <destination_path_for_files>\n\nDependencies:\n1. SpaCy :\n pip install -U spacy\n\n Then, download and install a language model so that the semantics for the English language are used while training.\n\n python3 -m spacy download en\n'''\nimport spacy\nimport sys\nimport pandas as pd\nimport os\n\nentities = []\nocr_data = \"\"\n\nmodel_name = sys.argv[1]\ntest_data = sys.argv[2]\noutput_directory = sys.argv[3]\n\nif output_directory is not None:\n if not os.path.exists(output_directory):\n os.mkdir(output_directory)\n\nmodel_name = model_name + \"/\" if (model_name[-1] is not '/') else model_name\noutput_directory = output_directory + \"/\" if (output_directory[-1] is not '/') else output_directory\n\nwith open(test_data) as myfile:\n directory_data = [x.replace('\\n', ' ') for x in myfile]\n\nprint(\"Loading from\", model_name)\nnlp = spacy.load(model_name) # loading the trained model\nfor entry in directory_data:\n doc = nlp(entry) # finding the ner entities\n entities.append([(ent.text, ent.label_) for ent in doc.ents])\n\ndf = pd.DataFrame() # creating a Pandas Dataframe\ndf['text'] = pd.Series(directory_data)\n\nfor i in range(0, len(entities)):\n labels = entities[i]\n for label in labels:\n entity = label[1]\n if entity.lower() not in df:\n df[entity.lower()] = [[] for _ in range(len(df))]\n df.loc[i][entity.lower()].append(label[0]) # adding entities\n else:\n df.loc[i][entity.lower()].append(label[0])\n\nprint(\"Saved .tsv, .csv and .xlsx files to ---> \" + output_directory)\ndf.to_csv(output_directory + 'trained_ner.tsv', sep='\\t', index=False) # saving the files in different formats\ndf.to_csv(output_directory + 'trained_ner.csv', index=False)\ndf.to_excel(output_directory + 'trained_ner.xlsx', index=False)\n" ]
[ [ "pandas.Series", "pandas.DataFrame" ] ]
Deepayan137/Adapting-OCR
[ "0604fe573e58f3d2e918461bdccece3af8b28059" ]
[ "src/modules/trainer.py" ]
[ "import os\nimport logging\nimport numpy as np\nfrom collections import OrderedDict\nfrom argparse import ArgumentParser\nfrom tqdm import *\n\nimport pdb\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, StepLR\nfrom torch.nn.utils.clip_grad import clip_grad_norm_\n\nfrom itertools import chain\nfrom src.utils.utils import AverageMeter, Eval, OCRLabelConverter\nfrom src.optim.optimizer import STLR\nfrom src.utils.utils import gaussian\n\nclass OCRTrainer(object):\n def __init__(self, opt):\n super(OCRTrainer, self).__init__()\n self.data_train = opt.data_train\n self.data_val = opt.data_val\n self.model = opt.model\n self.criterion = opt.criterion\n self.optimizer = opt.optimizer\n self.schedule = opt.schedule\n self.alpha = opt.alpha\n self.converter = OCRLabelConverter(opt.alphabet)\n self.evaluator = Eval()\n print('Scheduling is {}'.format(self.schedule))\n self.scheduler = STLR(self.optimizer, T_max=opt.epochs)\n self.batch_size = opt.batch_size\n self.count = opt.epoch\n self.epochs = opt.epochs\n self.cuda = opt.cuda\n self.collate_fn = opt.collate_fn\n self.noise = opt.noise\n self.init_meters()\n\n def init_meters(self):\n self.avgTrainLoss = AverageMeter(\"Train loss\")\n self.avgTrainCharAccuracy = AverageMeter(\"Train Character Accuracy\")\n self.avgTrainWordAccuracy = AverageMeter(\"Train Word Accuracy\")\n self.avgValLoss = AverageMeter(\"Validation loss\")\n self.avgValCharAccuracy = AverageMeter(\"Validation Character Accuracy\")\n self.avgValWordAccuracy = AverageMeter(\"Validation Word Accuracy\")\n\n def forward(self, x):\n logits = self.model(x)\n return logits.transpose(1, 0)\n\n def loss(self, logits, targets, pred_sizes, target_sizes):\n loss = self.criterion(logits, targets, pred_sizes, target_sizes)\n return loss\n\n def step(self):\n self.max_grad_norm = 0.05\n clip_grad_norm_(self.model.parameters(), self.max_grad_norm)\n self.optimizer.step()\n \n def schedule_lr(self):\n if self.schedule:\n self.scheduler.step()\n\n def mixup_data(self, x, y, lengths, alpha):\n y = self.evaluator.format_target(y, lengths)\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n batch_size = x.size()[0]\n index = torch.randperm(batch_size)\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, [y[i] for i in index]\n lengths_b = torch.LongTensor([lengths[i] for i in index])\n y_a, y_b = torch.LongTensor(torch.LongTensor(list(chain((*y_a))))), \\\n torch.LongTensor(torch.LongTensor(list(chain((*y_b)))))\n return mixed_x, y_a, y_b, lengths, lengths_b, lam\n\n def mixup_criterion(self, logits, y_a, y_b, l_a, l_b, pred_sizes, lam):\n return lam * self.loss(logits, y_a, pred_sizes, l_a) + \\\n (1 - lam) * self.loss(logits, y_b, pred_sizes, l_b)\n\n def _run_batch(self, batch, report_accuracy=False, validation=False):\n input_, targets = batch['img'].cuda(), batch['label']\n targets, lengths = self.converter.encode(targets)\n if not validation:\n if self.noise:\n input_ = gaussian(input_)\n input_, targets_a, targets_b, lengths_a, lengths_b, lam = self.mixup_data(input_, targets, \n lengths, self.alpha)\n else:\n input_, targets_a, targets_b, lengths_a, lengths_b, lam = self.mixup_data(input_, targets, \n lengths, 0)\n logits = self.forward(input_)\n logits = logits.contiguous().cpu()\n logits = torch.nn.functional.log_softmax(logits, 2)\n T, B, H = logits.size()\n pred_sizes = torch.LongTensor([T for i in range(B)])\n targets_a = targets_a.view(-1).contiguous()\n targets_b = targets_b.view(-1).contiguous()\n loss = self.mixup_criterion(logits, targets_a, targets_b, lengths_a, lengths_b, pred_sizes, lam)\n if report_accuracy:\n probs, preds = logits.max(2)\n preds = preds.transpose(1, 0).contiguous().view(-1)\n sim_preds = self.converter.decode(preds.data, pred_sizes.data, raw=False)\n ca = np.mean((list(map(self.evaluator.char_accuracy, list(zip(sim_preds, batch['label']))))))\n wa = np.mean((list(map(self.evaluator.word_accuracy, list(zip(sim_preds, batch['label']))))))\n return loss, ca, wa\n\n def run_epoch(self, validation=False):\n if not validation:\n loader = self.train_dataloader()\n pbar = tqdm(loader, desc='Epoch: [%d]/[%d] Training'%(self.count, \n self.epochs), leave=True)\n self.model.train()\n else:\n loader = self.val_dataloader()\n pbar = tqdm(loader, desc='Validating', leave=True)\n self.model.eval()\n outputs = []\n for batch_nb, batch in enumerate(pbar):\n if not validation:\n output = self.training_step(batch)\n else:\n output = self.validation_step(batch)\n # pbar.set_description('%.2f'%output['loss'].item())\n pbar.set_postfix(output)\n outputs.append(output)\n # self.count+=1\n self.schedule_lr()\n if not validation:\n result = self.train_end(outputs)\n else:\n result = self.validation_end(outputs)\n return result\n\n def training_step(self, batch):\n loss, ca, wa = self._run_batch(batch, report_accuracy=True)\n self.optimizer.zero_grad()\n loss.backward()\n self.step()\n output = OrderedDict({\n 'loss': abs(loss.item()),\n 'train_ca': ca.item(),\n 'train_wa': wa.item()\n })\n return output\n\n def validation_step(self, batch):\n loss, ca, wa = self._run_batch(batch, report_accuracy=True, validation=True)\n output = OrderedDict({\n 'val_loss': abs(loss.item()),\n 'val_ca': ca.item(),\n 'val_wa': wa.item()\n })\n return output\n\n def train_dataloader(self):\n # logging.info('training data loader called')\n loader = torch.utils.data.DataLoader(self.data_train,\n batch_size=self.batch_size,\n collate_fn=self.collate_fn,\n shuffle=True,\n num_workers=5)\n return loader\n \n def val_dataloader(self):\n # logging.info('val data loader called')\n loader = torch.utils.data.DataLoader(self.data_val,\n batch_size=self.batch_size,\n collate_fn=self.collate_fn,\n num_workers=5)\n return loader\n\n def train_end(self, outputs):\n for output in outputs:\n self.avgTrainLoss.add(output['loss'])\n self.avgTrainCharAccuracy.add(output['train_ca'])\n self.avgTrainWordAccuracy.add(output['train_wa'])\n\n train_loss_mean = abs(self.avgTrainLoss.compute())\n train_ca_mean = self.avgTrainCharAccuracy.compute()\n train_wa_mean = self.avgTrainWordAccuracy.compute()\n\n result = {'train_loss': train_loss_mean, 'train_ca': train_ca_mean,\n 'train_wa': train_wa_mean}\n # result = {'progress_bar': tqdm_dict, 'log': tqdm_dict, 'val_loss': train_loss_mean}\n return result\n\n def validation_end(self, outputs):\n for output in outputs:\n self.avgValLoss.add(output['val_loss'])\n self.avgValCharAccuracy.add(output['val_ca'])\n self.avgValWordAccuracy.add(output['val_wa'])\n\n val_loss_mean = abs(self.avgValLoss.compute())\n val_ca_mean = self.avgValCharAccuracy.compute()\n val_wa_mean = self.avgValWordAccuracy.compute()\n\n result = {'val_loss': val_loss_mean, 'val_ca': val_ca_mean,\n 'val_wa': val_wa_mean}\n # result = {'progress_bar': tqdm_dict, 'log': tqdm_dict, 'val_loss': val_loss_mean}\n return result\n\n\n" ]
[ [ "torch.LongTensor", "numpy.random.beta", "torch.nn.functional.log_softmax", "torch.randperm", "torch.utils.data.DataLoader" ] ]
dadadidodi/m3ddpg
[ "48659721eb634d53c5c73b6b8e32e41b0f1f9a02" ]
[ "experiments/train.py" ]
[ "import argparse\nimport numpy as np\nimport tensorflow as tf\nimport time\nimport pickle\nimport sys\nimport os\n\nsys.path.append('../')\nsys.path.append('../../')\nsys.path.append('../../../')\n\nimport maddpg.common.tf_util as U\nfrom maddpg.trainer.maddpg import MADDPGAgentTrainer\nimport tensorflow.contrib.layers as layers\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Reinforcement Learning experiments for multiagent environments\")\n # Environment\n parser.add_argument(\"--scenario\", type=str, default=\"simple\", help=\"name of the scenario script\")\n parser.add_argument(\"--max-episode-len\", type=int, default=25, help=\"maximum episode length\")\n parser.add_argument(\"--num-episodes\", type=int, default=60000, help=\"number of episodes\")\n parser.add_argument(\"--num-adversaries\", type=int, default=0, help=\"number of adversaries\")\n parser.add_argument(\"--good-policy\", type=str, default=\"maddpg\", help=\"policy for good agents\")\n parser.add_argument(\"--bad-policy\", type=str, default=\"maddpg\", help=\"policy of adversaries\")\n # Core training parameters\n parser.add_argument(\"--lr\", type=float, default=1e-2, help=\"learning rate for Adam optimizer\")\n parser.add_argument(\"--gamma\", type=float, default=0.95, help=\"discount factor\")\n parser.add_argument(\"--batch-size\", type=int, default=1024, help=\"number of episodes to optimize at the same time\")\n parser.add_argument(\"--num-units\", type=int, default=64, help=\"number of units in the mlp\")\n parser.add_argument(\"--adv-eps\", type=float, default=1e-3, help=\"adversarial training rate\")\n parser.add_argument(\"--adv-eps-s\", type=float, default=1e-5, help=\"small adversarial training rate\")\n # Checkpointing\n parser.add_argument(\"--exp-name\", type=str, default=None, help=\"name of the experiment\")\n parser.add_argument(\"--save-dir\", type=str, default=\"/tmp/policy/\", help=\"directory in which training state and model should be saved\")\n parser.add_argument(\"--save-rate\", type=int, default=1000, help=\"save model once every time this many episodes are completed\")\n parser.add_argument(\"--load-name\", type=str, default=\"\", help=\"name of which training state and model are loaded, leave blank to load seperately\")\n parser.add_argument(\"--load-good\", type=str, default=\"\", help=\"which good policy to load\")\n parser.add_argument(\"--load-bad\", type=str, default=\"\", help=\"which bad policy to load\")\n # Evaluation\n parser.add_argument(\"--test\", action=\"store_true\", default=False)\n parser.add_argument(\"--restore\", action=\"store_true\", default=False)\n parser.add_argument(\"--display\", action=\"store_true\", default=False)\n parser.add_argument(\"--benchmark\", action=\"store_true\", default=False)\n parser.add_argument(\"--benchmark-iters\", type=int, default=100000, help=\"number of iterations run for benchmarking\")\n parser.add_argument(\"--benchmark-dir\", type=str, default=\"./benchmark_files/\", help=\"directory where benchmark data is saved\")\n parser.add_argument(\"--plots-dir\", type=str, default=\"./learning_curves/\", help=\"directory where plot data is saved\")\n return parser.parse_args()\n\ndef mlp_model(input, num_outputs, scope, reuse=False, num_units=64, rnn_cell=None):\n # This model takes as input an observation and returns values of all actions\n with tf.variable_scope(scope, reuse=reuse):\n out = input\n out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)\n out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)\n out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=None)\n return out\n\ndef make_env(scenario_name, arglist, benchmark=False):\n from multiagent.environment import MultiAgentEnv\n import multiagent.scenarios as scenarios\n\n # load scenario from script\n scenario = scenarios.load(scenario_name + \".py\").Scenario()\n # create world\n world = scenario.make_world()\n # create multiagent environment\n if benchmark:\n env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data)\n else:\n env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation)\n return env\n\ndef get_trainers(env, num_adversaries, obs_shape_n, arglist):\n trainers = []\n model = mlp_model\n trainer = MADDPGAgentTrainer\n for i in range(num_adversaries):\n print(\"{} bad agents\".format(i))\n policy_name = arglist.bad_policy\n trainers.append(trainer(\n \"agent_%d\" % i, model, obs_shape_n, env.action_space, i, arglist,\n policy_name == 'ddpg', policy_name, policy_name == 'mmmaddpg'))\n for i in range(num_adversaries, env.n):\n print(\"{} good agents\".format(i))\n policy_name = arglist.good_policy\n trainers.append(trainer(\n \"agent_%d\" % i, model, obs_shape_n, env.action_space, i, arglist,\n policy_name == 'ddpg', policy_name, policy_name == 'mmmaddpg'))\n return trainers\n\n\ndef train(arglist):\n if arglist.test:\n np.random.seed(71)\n with U.single_threaded_session():\n # Create environment\n env = make_env(arglist.scenario, arglist, arglist.benchmark)\n # Create agent trainers\n obs_shape_n = [env.observation_space[i].shape for i in range(env.n)]\n num_adversaries = min(env.n, arglist.num_adversaries)\n trainers = get_trainers(env, num_adversaries, obs_shape_n, arglist)\n print('Using good policy {} and bad policy {} with {} adversaries'.format(arglist.good_policy, arglist.bad_policy, num_adversaries))\n\n # Initialize\n U.initialize()\n\n # Load previous results, if necessary\n if arglist.test or arglist.display or arglist.restore or arglist.benchmark:\n if arglist.load_name == \"\":\n # load seperately\n bad_var_list = []\n for i in range(num_adversaries):\n bad_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=trainers[i].scope)\n saver = tf.train.Saver(bad_var_list)\n U.load_state(arglist.load_bad, saver)\n\n good_var_list = []\n for i in range(num_adversaries, env.n):\n good_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=trainers[i].scope)\n saver = tf.train.Saver(good_var_list)\n U.load_state(arglist.load_good, saver)\n else:\n print('Loading previous state from {}'.format(arglist.load_name))\n U.load_state(arglist.load_name)\n\n episode_rewards = [0.0] # sum of rewards for all agents\n agent_rewards = [[0.0] for _ in range(env.n)] # individual agent reward\n final_ep_rewards = [] # sum of rewards for training curve\n final_ep_ag_rewards = [] # agent rewards for training curve\n agent_info = [[[]]] # placeholder for benchmarking info\n saver = tf.train.Saver()\n obs_n = env.reset()\n episode_step = 0\n train_step = 0\n t_start = time.time()\n\n print('Starting iterations...')\n while True:\n # get action\n action_n = [agent.action(obs) for agent, obs in zip(trainers,obs_n)]\n # environment step\n new_obs_n, rew_n, done_n, info_n = env.step(action_n)\n episode_step += 1\n done = all(done_n)\n terminal = (episode_step >= arglist.max_episode_len)\n # collect experience\n for i, agent in enumerate(trainers):\n agent.experience(obs_n[i], action_n[i], rew_n[i], new_obs_n[i], done_n[i], terminal)\n obs_n = new_obs_n\n\n for i, rew in enumerate(rew_n):\n episode_rewards[-1] += rew\n agent_rewards[i][-1] += rew\n\n if done or terminal:\n obs_n = env.reset()\n episode_step = 0\n episode_rewards.append(0)\n for a in agent_rewards:\n a.append(0)\n agent_info.append([[]])\n\n # increment global step counter\n train_step += 1\n\n # for benchmarking learned policies\n if arglist.benchmark:\n for i, info in enumerate(info_n):\n agent_info[-1][i].append(info_n['n'])\n if train_step > arglist.benchmark_iters and (done or terminal):\n file_name = arglist.benchmark_dir + arglist.exp_name + '.pkl'\n print('Finished benchmarking, now saving...')\n with open(file_name, 'wb') as fp:\n pickle.dump(agent_info[:-1], fp)\n break\n continue\n\n # for displaying learned policies\n if arglist.display:\n time.sleep(0.1)\n env.render()\n continue\n\n # update all trainers, if not in display or benchmark mode\n if not arglist.test:\n loss = None\n for agent in trainers:\n agent.preupdate()\n for agent in trainers:\n loss = agent.update(trainers, train_step)\n\n # save model, display training output\n if terminal and (len(episode_rewards) % arglist.save_rate == 0):\n U.save_state(arglist.save_dir, global_step = len(episode_rewards), saver=saver)\n # print statement depends on whether or not there are adversaries\n if num_adversaries == 0:\n print(\"steps: {}, episodes: {}, mean episode reward: {}, time: {}\".format(\n train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]), round(time.time()-t_start, 3)))\n else:\n print(\"{} vs {} steps: {}, episodes: {}, mean episode reward: {}, agent episode reward: {}, time: {}\".format(arglist.bad_policy, arglist.good_policy,\n train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]),\n [np.mean(rew[-arglist.save_rate:]) for rew in agent_rewards], round(time.time()-t_start, 3)))\n t_start = time.time()\n # Keep track of final episode reward\n final_ep_rewards.append(np.mean(episode_rewards[-arglist.save_rate:]))\n for rew in agent_rewards:\n final_ep_ag_rewards.append(np.mean(rew[-arglist.save_rate:]))\n\n # saves final episode reward for plotting training curve later\n if len(episode_rewards) > arglist.num_episodes:\n suffix = '_test.pkl' if arglist.test else '.pkl'\n rew_file_name = arglist.plots_dir + arglist.exp_name + '_rewards' + suffix\n agrew_file_name = arglist.plots_dir + arglist.exp_name + '_agrewards' + suffix\n\n if not os.path.exists(os.path.dirname(rew_file_name)):\n try:\n os.makedirs(os.path.dirname(rew_file_name))\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n with open(rew_file_name, 'wb') as fp:\n pickle.dump(final_ep_rewards, fp)\n with open(agrew_file_name, 'wb') as fp:\n pickle.dump(final_ep_ag_rewards, fp)\n print('...Finished total of {} episodes.'.format(len(episode_rewards)))\n break\n\nif __name__ == '__main__':\n arglist = parse_args()\n train(arglist)\n" ]
[ [ "numpy.random.seed", "tensorflow.get_collection", "tensorflow.contrib.layers.fully_connected", "numpy.mean", "tensorflow.variable_scope", "tensorflow.train.Saver" ] ]
DandelionLau/NetworkCollections
[ "29e5cd2091f7085b3241209ed9447f2baadbce41" ]
[ "CNN/GoogleNet.py" ]
[ "\"\"\"\n@FileName: GoogleNet.py\n@Description: Implement GoogleNet\n@Author : Ryuk\n@CreateDate: 2019/11/14 16:11\n@LastEditTime: 2019/11/14 16:11\n@LastEditors: Please set LastEditors\n@Version: v1.0\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Inception(nn.Module):\n def __init__(self, in_channels, out_1x1, out_3x3_1, out_n3x3_2, out_5x5_1, out_5x5_2, pool_size):\n super(Inception, self).__init__()\n # 1x1 conv branch\n self.b1 = nn.Sequential(\n nn.Conv2d(in_channels, out_1x1, kernel_size=1),\n nn.BatchNorm2d(out_1x1),\n nn.ReLU(True),\n )\n\n # 1x1 conv -> 3x3 conv branch\n self.b2 = nn.Sequential(\n nn.Conv2d(in_channels, out_3x3_1, kernel_size=1),\n nn.BatchNorm2d(out_3x3_1),\n nn.ReLU(True),\n nn.Conv2d(out_3x3_1, out_n3x3_2, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_n3x3_2),\n nn.ReLU(True),\n )\n\n # 1x1 conv -> 5x5 conv branch\n self.b3 = nn.Sequential(\n nn.Conv2d(in_channels, out_5x5_1, kernel_size=1),\n nn.BatchNorm2d(out_5x5_1),\n nn.ReLU(True),\n nn.Conv2d(out_5x5_1, out_5x5_2, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_5x5_2),\n nn.ReLU(True),\n nn.Conv2d(out_5x5_2, out_5x5_2, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_5x5_2),\n nn.ReLU(True),\n )\n\n # 3x3 pool -> 1x1 conv branch\n self.b4 = nn.Sequential(\n nn.MaxPool2d(3, stride=1, padding=1),\n nn.Conv2d(in_channels, pool_size, kernel_size=1),\n nn.BatchNorm2d(pool_size),\n nn.ReLU(True),\n )\n\n def forward(self, x):\n y1 = self.b1(x)\n y2 = self.b2(x)\n y3 = self.b3(x)\n y4 = self.b4(x)\n return torch.cat([y1,y2,y3,y4], 1)\n\n\nclass Googlenet(nn.Module):\n def __init__(self):\n super(Googlenet, self).__init__()\n self.pre_layers = nn.Sequential(\n nn.Conv2d(3, 192, kernel_size=3, padding=1),\n nn.BatchNorm2d(192),\n nn.ReLU(True),\n )\n\n self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)\n self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)\n\n self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)\n\n self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)\n self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)\n self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)\n self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)\n self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)\n\n self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)\n self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)\n\n self.avgpool = nn.AvgPool2d(8, stride=1)\n self.linear = nn.Linear(1024, 10)\n\n def forward(self, x):\n x = self.pre_layers(x)\n x = self.a3(x)\n x = self.b3(x)\n x = self.maxpool(x)\n x = self.a4(x)\n x = self.b4(x)\n x = self.c4(x)\n x = self.d4(x)\n x = self.e4(x)\n x = self.maxpool(x)\n x = self.a5(x)\n x = self.b5(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.linear(x)\n return x\n\n\ndef main():\n x = torch.randn(1, 3, 32, 32)\n net = Googlenet()\n print(net(x))\n\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "torch.cat", "torch.randn", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
tskTNK/DualSatEarthNav
[ "f61069793e3a8b9991435dcc53d4fe09b6e843fa" ]
[ "gnsspy/position/position.py" ]
[ "\"\"\"\nPosition computation and related functions\n\"\"\"\n# ===========================================================\n# ========================= imports =========================\nimport time\nfrom datetime import timedelta as _timedelta\nimport numpy as _np\nimport pandas as _pd\nfrom operator import itemgetter as _itemgetter\nfrom gnsspy.geodesy.coordinate import _distance_euclidean\nfrom gnsspy.position.atmosphere import tropospheric_delay\nfrom gnsspy.position.satellite import _reception_coord, _sagnac, _azel, _relativistic_clock\nfrom gnsspy.funcs.constants import (_SYSTEM_RNX2, _SYSTEM_RNX3,\n _SYSTEM_NAME, _CLIGHT)\n\nfrom gnsspy.geodesy.coordinate import cart2ell, ell2topo\nimport csv\nimport matplotlib.pyplot as plt\nimport pymap3d as pm\n\n# ===========================================================\n__all__ = [\"spp\",\"multipath\"]\n\ndef mdpo(station, stationR, orbit, system=\"G\", cut_off=7.0):\n\n start = time.time() # Time of start\n debug_out = []\n debug_output = []\n debug_output2 = []\n\n if len(system)>1:\n raise Warning(\"SPP does not support multiple satellite system | This feature will be implemented in the next version\")\n observation_list = _observation_picker(station, system)\n observation_listR = _observation_picker(stationR, system)\n\n print(observation_list)\n print(observation_listR)\n\n gnss = gnssDataframe(station, orbit, system, cut_off)\n gnssR = gnssDataframe(stationR, orbit, system, cut_off)\n\n #-----------------------------------------------------------------------------\n if len(observation_list) >=2:\n carrierPhase1 = getattr(gnss,observation_list[0][2])\n carrierPhase2 = getattr(gnss,observation_list[1][2])\n pseudorange1 = getattr(gnss,observation_list[0][3])\n pseudorange2 = getattr(gnss,observation_list[1][3])\n frequency1 = observation_list[0][4]\n frequency2 = observation_list[1][4]\n carrierPhase1R = getattr(gnssR,observation_listR[0][2])\n carrierPhase2R = getattr(gnssR,observation_listR[1][2])\n pseudorange1R = getattr(gnssR,observation_listR[0][3])\n pseudorange2R = getattr(gnssR,observation_listR[1][3])\n frequency1R = observation_listR[0][4]\n frequency2R = observation_listR[1][4]\n else:\n raise Warning(\"Ionosphere-free combination is not available\")\n # ----------------------------------------------------------------------------\n\n gnss[\"Ionosphere_Free\"] = (frequency1**2*pseudorange1-frequency2**2*pseudorange2)/(frequency1**2-frequency2**2)\n gnss = gnss.dropna(subset = ['Ionosphere_Free'])\n gnss[\"Travel_time\"] = gnss[\"Ionosphere_Free\"] / _CLIGHT\n gnss[\"X_Reception\"],gnss[\"Y_Reception\"],gnss[\"Z_Reception\"] = _reception_coord(gnss.X, gnss.Y, gnss.Z, gnss.Vx, gnss.Vy, gnss.Vz, gnss.Travel_time)\n\n gnssR[\"Ionosphere_Free\"] = (frequency1R**2*pseudorange1R-frequency2R**2*pseudorange2R)/(frequency1R**2-frequency2R**2)\n gnssR = gnssR.dropna(subset = ['Ionosphere_Free'])\n gnssR[\"Travel_time\"] = gnssR[\"Ionosphere_Free\"] / _CLIGHT\n gnssR[\"X_Reception\"],gnssR[\"Y_Reception\"],gnssR[\"Z_Reception\"] = _reception_coord(gnssR.X, gnssR.Y, gnssR.Z, gnssR.Vx, gnssR.Vy, gnssR.Vz, gnssR.Travel_time)\n\n # satList = []\n # satList.append(gnss.index.get_level_values(\"SV\").unique().sort_values())\n # satList.append(gnssR.index.get_level_values(\"SV\").unique().sort_values())\n # print (satList)\n satList = ['G01','G02','G03','G04','G05','G06','G07','G08','G09','G10','G11','G12','G13','G14','G15','G16','G17','G18','G19','G20','G21','G22','G23','G24','G25','G26','G27','G28','G29','G30','G31']\n # satList = ['G01','G02','G03','G04','G05','G06','G07','G08','G09','G10']\n # satList = ['G10','G23']\n\n for i in range(len(satList)):\n for j in range(i):\n satA = satList[i]\n satB = satList[j]\n\n print (i,j)\n debug_output3 = []\n debug_output4 = []\n\n epochList =gnss.index.get_level_values(\"Epoch\").unique().sort_values()\n epoch_start = epochList[0]\n epochListR =gnssR.index.get_level_values(\"Epoch\").unique().sort_values()\n epoch_startR = epochListR[0]\n\n #-----------------------------------------------------------------------------\n if not epoch_start == epoch_startR:\n raise Warning(\"Start epochs are different\")\n # ----------------------------------------------------------------------------\n\n epoch_offset= _timedelta(seconds=300)\n epoch_interval = _timedelta(seconds=station.interval-0.000001)\n epoch_stop = epochList[-1] + _timedelta(seconds=0.000001)\n\n approx_position = [station.approx_position[0], station.approx_position[1], station.approx_position[2]]\n receiver_clock = station.receiver_clock\n position_list = []\n\n approx_positionR = [stationR.approx_position[0], stationR.approx_position[1], stationR.approx_position[2]]\n receiver_clockR = stationR.receiver_clock\n position_listR = []\n\n approx_positionD = [station.approx_position[0]-stationR.approx_position[0], station.approx_position[1]-stationR.approx_position[1], station.approx_position[2]-stationR.approx_position[2]]\n receiver_clockD = station.receiver_clock - stationR.receiver_clock\n position_listD = []\n\n numOfEpochs = 0\n maxObservationEpochs = 3 # the number of observation epochs used for the MDPO estimation (use -1)\n newtonRaphsonUpdate = 0\n maxNewtonRaphsonUpdate = 1 # max number of NR iterations\n\n coeffMatrixD = _np.zeros([maxObservationEpochs,2])\n coeffMatrixD2 = _np.zeros([maxObservationEpochs,2])\n lMatrixD = _np.zeros([maxObservationEpochs,1])\n\n previous_matchSatelliteList_select = []\n numberOfSatellite = 0\n numOfEpochs = 0\n invalid_Sat_Combination = 0\n\n [lat0, lon0, h0] = pm.ecef2geodetic(approx_positionR[0], approx_positionR[1], approx_positionR[2])\n userENU = pm.ecef2enu(approx_position[0],approx_position[1],approx_position[2], lat0, lon0, h0)\n userENU = _np.array(userENU)\n # print('userENU ini')\n # print(userENU)\n\n while True:\n\n epoch_step = epoch_start + epoch_interval\n\n gnss_temp = gnss.xs((slice(epoch_start,epoch_step))).copy()\n gnss_tempR = gnssR.xs((slice(epoch_start,epoch_step))).copy()\n\n # pick up satellite that are seen from both stations\n satelliteList = gnss_temp.index.get_level_values(\"SV\").unique().sort_values()\n satelliteListR = gnss_tempR.index.get_level_values(\"SV\").unique().sort_values()\n matchSatelliteList = []\n for count in range(len(satelliteList)):\n for count2 in range(len(satelliteListR)):\n if satelliteList[count] == satelliteListR[count2]:\n matchSatelliteList.append(satelliteList[count])\n\n numberOfSatellite = len(matchSatelliteList)\n matchSatelliteList_select = []\n\n # print('matching test')\n matching = 0\n for count in range(len(matchSatelliteList)):\n if matchSatelliteList[count] == satA:\n matching += 1\n if matchSatelliteList[count] == satB:\n matching += 1\n if matching == 2:\n # print('match')\n pickupSatelliteList = [satA,satB]\n gnss_temp2 = gnss_temp.loc[pickupSatelliteList]\n gnss_temp2R = gnss_tempR.loc[pickupSatelliteList]\n matchSatelliteList = pickupSatelliteList\n matchSatelliteList_select = matchSatelliteList\n invalid_Sat_Combination = 0\n else:\n # print('no matching')\n invalid_Sat_Combination = 1\n\n gnss_temp2 = gnss_temp.loc[matchSatelliteList]\n gnss_temp2R = gnss_tempR.loc[matchSatelliteList]\n gnss_temp = gnss_temp2\n gnss_tempR = gnss_temp2R\n\n # adding satellite clock bias for debug\n for count in range(len(gnss_temp)):\n gnss_temp.Relativistic_clock[count] = 0.0000002*count\n for count in range(len(gnss_tempR)):\n gnss_tempR.Relativistic_clock[count] = 0.0000002*count\n\n if previous_matchSatelliteList_select != matchSatelliteList_select or invalid_Sat_Combination == 1:\n # print ('skip estimation')\n numOfEpochs = 0\n else:\n gnss_temp2 = gnss_temp.loc[matchSatelliteList_select]\n gnss_temp2R = gnss_tempR.loc[matchSatelliteList_select]\n gnss_temp = gnss_temp2\n gnss_tempR = gnss_temp2R\n\n distance = _distance_euclidean(approx_position[0],approx_position[1],approx_position[2], gnss_temp.X_Reception, gnss_temp.Y_Reception, gnss_temp.Z_Reception)\n gnss_temp[\"Distance\"] = distance + _sagnac(approx_position[0],approx_position[1],approx_position[2], gnss_temp.X_Reception, gnss_temp.Y_Reception, gnss_temp.Z_Reception)\n gnss_temp[\"Azimuth\"], gnss_temp[\"Elevation\"], gnss_temp[\"Zenith\"] = _azel(station.approx_position[0], station.approx_position[1], station.approx_position[2], gnss_temp.X, gnss_temp.Y, gnss_temp.Z, gnss_temp.Distance)\n gnss_temp[\"Tropo\"] = tropospheric_delay(station.approx_position[0],station.approx_position[1],station.approx_position[2], gnss_temp.Elevation, station.epoch)\n\n distanceR = _distance_euclidean(approx_positionR[0],approx_positionR[1],approx_positionR[2], gnss_tempR.X_Reception, gnss_tempR.Y_Reception, gnss_tempR.Z_Reception)\n gnss_tempR[\"Distance\"] = distanceR + _sagnac(approx_positionR[0],approx_positionR[1],approx_positionR[2], gnss_tempR.X_Reception, gnss_tempR.Y_Reception, gnss_tempR.Z_Reception)\n gnss_tempR[\"Azimuth\"], gnss_tempR[\"Elevation\"], gnss_tempR[\"Zenith\"] = _azel(stationR.approx_position[0], stationR.approx_position[1], stationR.approx_position[2], gnss_tempR.X, gnss_tempR.Y, gnss_tempR.Z, gnss_tempR.Distance)\n gnss_tempR[\"Tropo\"] = tropospheric_delay(stationR.approx_position[0],stationR.approx_position[1],stationR.approx_position[2], gnss_tempR.Elevation, stationR.epoch)\n\n sat1 = 0\n sat2 = 1\n\n enuSatX, enuSatY, enuSatZ = pm.ecef2enu(gnss_temp.X_Reception, gnss_temp.Y_Reception, gnss_temp.Z_Reception, lat0, lon0, h0)\n enuSatX = _np.array(enuSatX)\n enuSatY = _np.array(enuSatY)\n enuSatZ = _np.array(enuSatZ)\n\n vector_sat1 = [enuSatX[sat1], enuSatY[sat1], enuSatZ[sat1]]\n vector_sat2 = [enuSatX[sat2], enuSatY[sat2], enuSatZ[sat2]]\n unitvector_sat1 = vector_sat1 / _np.linalg.norm(vector_sat1)\n unitvector_sat2 = vector_sat2 / _np.linalg.norm(vector_sat2)\n dot_product = _np.dot(unitvector_sat1,unitvector_sat2)\n angle = _np.degrees(_np.arccos(dot_product))\n el = (gnss_temp.Elevation[sat1]+gnss_temp.Elevation[sat2])/2\n\n coeffMatrixD[numOfEpochs,0] = (userENU[0] - enuSatX[sat1]) / gnss_temp.Distance[sat1] - ((userENU[0] - enuSatX[sat2]) / gnss_temp.Distance[sat2])\n coeffMatrixD[numOfEpochs,1] = (userENU[1] - enuSatY[sat1]) / gnss_temp.Distance[sat1] - ((userENU[1] - enuSatY[sat2]) / gnss_temp.Distance[sat2])\n\n coeffMatrixD2[numOfEpochs,0] = (approx_position[0] - gnss_temp.X_Reception[sat1]) / gnss_temp.Distance[sat1] - ((approx_position[0] - gnss_temp.X_Reception[sat2]) / gnss_temp.Distance[sat2])\n coeffMatrixD2[numOfEpochs,1] = (approx_position[1] - gnss_temp.Y_Reception[sat1]) / gnss_temp.Distance[sat1] - ((approx_position[1] - gnss_temp.Y_Reception[sat2]) / gnss_temp.Distance[sat2])\n\n lMatrix = gnss_temp.Ionosphere_Free[sat1] - gnss_temp.Distance[sat1] + _CLIGHT * (gnss_temp.DeltaTSV[sat1] + gnss_temp.Relativistic_clock[sat1] - receiver_clock) - gnss_temp.Tropo[sat1] - (gnss_temp.Ionosphere_Free[sat2] - gnss_temp.Distance[sat2] + _CLIGHT * (gnss_temp.DeltaTSV[sat2] + gnss_temp.Relativistic_clock[sat2] - receiver_clock) - gnss_temp.Tropo[sat2])\n lMatrixR = gnss_tempR.Ionosphere_Free[sat1] - gnss_tempR.Distance[sat1] + _CLIGHT * (gnss_tempR.DeltaTSV[sat1] + gnss_tempR.Relativistic_clock[sat1] - receiver_clockR) - gnss_tempR.Tropo[sat1] - (gnss_tempR.Ionosphere_Free[sat2] - gnss_tempR.Distance[sat2] + _CLIGHT * (gnss_tempR.DeltaTSV[sat2] + gnss_tempR.Relativistic_clock[sat2] - receiver_clockR) - gnss_tempR.Tropo[sat2])\n lMatrixD[numOfEpochs,0] = lMatrix - lMatrixR\n\n numOfEpochs = numOfEpochs + 1\n\n # if newtonRaphsonUpdate == 0:\n # print ('epoch_start')\n # print (epoch_start)\n # print (matchSatelliteList)\n\n if newtonRaphsonUpdate == maxNewtonRaphsonUpdate:\n debug_output3.append([epoch_start, matchSatelliteList_select[sat1], matchSatelliteList_select[sat2], gnss_temp.Azimuth[sat1], gnss_temp.Elevation[sat1], gnss_tempR.Azimuth[sat1], gnss_tempR.Elevation[sat1], gnss_temp.Azimuth[sat2], gnss_temp.Elevation[sat2], gnss_tempR.Azimuth[sat2], gnss_tempR.Elevation[sat2]])\n\n if numOfEpochs > maxObservationEpochs-1:\n\n # DOP Calculation\n GTG = _np.dot(coeffMatrixD.T, coeffMatrixD)\n DOP = _np.linalg.inv(GTG)\n GDOP = (DOP[0][0]+DOP[1][1])**0.5\n XDOP = (DOP[0][0])**0.5\n YDOP = (DOP[1][1])**0.5\n HDOP = (DOP[0][0]+DOP[1][1])**0.5\n\n GTG2 = _np.dot(coeffMatrixD2.T, coeffMatrixD2)\n DOP2 = _np.linalg.inv(GTG2)\n GDOP2 = (DOP2[0][0]+DOP2[1][1])**0.5\n XDOP2 = (DOP2[0][0])**0.5\n YDOP2 = (DOP2[1][1])**0.5\n HDOP2 = (DOP2[0][0]+DOP2[1][1])**0.5\n\n # if newtonRaphsonUpdate == 1:\n # print('newtonRaphsonUpdate == 1')\n # print(XDOP, YDOP, HDOP)\n # print(XDOP2, YDOP2, HDOP2)\n\n if GDOP > 300 and newtonRaphsonUpdate == maxNewtonRaphsonUpdate:\n debug_output4.append(debug_output3[-3])\n debug_output4.append(debug_output3[-2])\n debug_output4.append(debug_output3[-1])\n\n if GDOP > 300 and newtonRaphsonUpdate == maxNewtonRaphsonUpdate:\n # print([epoch_start, matchSatelliteList_select[0], matchSatelliteList_select[1], GDOP, XDOP, YDOP, float(posD[0]), float(posD[1]), float(posD[2]), gnss_temp.Azimuth[0], gnss_temp.Elevation[0], gnss_tempR.Azimuth[0], gnss_tempR.Elevation[0], gnss_temp.Azimuth[1], gnss_temp.Elevation[1], gnss_tempR.Azimuth[1], gnss_tempR.Elevation[1], coeffMatrixD[0,0], coeffMatrixD[0,1], coeffMatrixD[1,0], coeffMatrixD[1,1], coeffMatrixD[2,0], coeffMatrixD[2,1]])\n debug_output2.append([epoch_start, matchSatelliteList_select[0], matchSatelliteList_select[1], GDOP, XDOP, YDOP, float(posD[0]), float(posD[1]), float(posD[2]), angle, el, gnss_temp.Azimuth[0], gnss_temp.Elevation[0], gnss_tempR.Azimuth[0], gnss_tempR.Elevation[0], gnss_temp.Azimuth[1], gnss_temp.Elevation[1], gnss_tempR.Azimuth[1], gnss_tempR.Elevation[1], coeffMatrixD[0,0], coeffMatrixD[0,1], coeffMatrixD[1,0], coeffMatrixD[1,1], coeffMatrixD[2,0], coeffMatrixD[2,1]])\n # debug_output4.append([epoch_start, matchSatelliteList_select[0], matchSatelliteList_select[1], GDOP, XDOP, YDOP, float(posD[0]), float(posD[1]), float(posD[2]), gnss_temp.Azimuth[0], gnss_temp.Elevation[0], gnss_tempR.Azimuth[0], gnss_tempR.Elevation[0], gnss_temp.Azimuth[1], gnss_temp.Elevation[1], gnss_tempR.Azimuth[1], gnss_tempR.Elevation[1], coeffMatrixD[0,0], coeffMatrixD[0,1], coeffMatrixD[1,0], coeffMatrixD[1,1], coeffMatrixD[2,0], coeffMatrixD[2,1]])\n\n if GDOP < 1000000000: # dop cut\n\n newtonRaphsonUpdate = newtonRaphsonUpdate + 1\n\n if newtonRaphsonUpdate > maxNewtonRaphsonUpdate:\n newtonRaphsonUpdate = 0\n # print([epoch_start, matchSatelliteList_select[0], matchSatelliteList_select[1], GDOP, XDOP, YDOP, float(posD[0]), float(posD[1]), float(posD[2])])\n debug_output.append([epoch_start, matchSatelliteList_select[0], matchSatelliteList_select[1], GDOP, XDOP, YDOP, float(posD[0]), float(posD[1]), float(posD[2]), angle, el])\n position_listD.append(posD)\n # print(float(posD[0]), float(posD[1]), float(posD[2]), XDOP, YDOP, HDOP, angle, el)\n approx_position = [station.approx_position[0], station.approx_position[1], station.approx_position[2]]\n userENU = pm.ecef2enu(approx_position[0],approx_position[1],approx_position[2], lat0, lon0, h0)\n userENU = _np.array(userENU)\n\n else:\n try:\n linearEquationSolutionD = _np.linalg.lstsq(coeffMatrixD,lMatrixD,rcond=None)\n xMatrixD = linearEquationSolutionD[0]\n # approx_position[0], approx_position[1], approx_position[2] = approx_position[0] + xMatrixD[0], approx_position[1] + xMatrixD[1], approx_position[2]\n # posD = [approx_position[0]-approx_positionR[0], approx_position[1]-approx_positionR[1], approx_position[2]-approx_positionR[2]]\n posD = [userENU[0] + xMatrixD[0], userENU[1] + xMatrixD[1], userENU[2]]\n userENU[0], userENU[1], userENU[2] = posD[0], posD[1], posD[2]\n approx_position[0],approx_position[1],approx_position[2] = pm.enu2ecef(userENU[0], userENU[1], userENU[2], lat0, lon0, h0)\n approx_position = _np.array(approx_position)\n epoch_start -= 3*epoch_offset\n epoch_step -= 3*epoch_offset\n except:\n # print(\"Cannot solve normal equations for epoch\", epoch_start,\"| Skipping...\")\n epoch_start -= 3*epoch_offset\n epoch_step -= 3*epoch_offset\n\n numOfEpochs = 0\n coeffMatrixD = _np.zeros([maxObservationEpochs,2])\n lMatrixD = _np.zeros([maxObservationEpochs,1])\n\n previous_matchSatelliteList_select = matchSatelliteList_select\n\n epoch_start += epoch_offset\n epoch_step += epoch_offset\n if (epoch_step - epoch_stop) > _timedelta(seconds=station.interval):\n break\n\n # skyplot\n az11 = []\n el11 = []\n az12 = []\n el12 = []\n az21 = []\n el21 = []\n az22 = []\n el22 = []\n azh11 = []\n elh11 = []\n azh12 = []\n elh12 = []\n\n if len(debug_output3) > 0:\n\n for id in range(len(debug_output3)):\n az = debug_output3[id][3]\n el = debug_output3[id][4]\n azr = _np.deg2rad(az)\n elr = _np.deg2rad(el)\n if az < 0:\n az = az + 360\n if el < 0:\n el = 0\n az11.append(azr)\n el11.append(el)\n\n for id in range(len(debug_output3)):\n # print(id)\n az = debug_output3[id][7]\n el = debug_output3[id][8]\n azr = _np.deg2rad(az)\n elr = _np.deg2rad(el)\n if az < 0:\n az = az + 360\n if el < 0:\n el = 0\n az12.append(azr)\n el12.append(el)\n\n if len(debug_output4) > 0:\n\n for id in range(len(debug_output4)):\n az = debug_output4[id][3]\n el = debug_output4[id][4]\n azr = _np.deg2rad(az)\n elr = _np.deg2rad(el)\n if az < 0:\n az = az + 360\n if el < 0:\n el = 0\n azh11.append(azr)\n elh11.append(el)\n\n for id in range(len(debug_output4)):\n az = debug_output4[id][7]\n el = debug_output4[id][8]\n azr = _np.deg2rad(az)\n elr = _np.deg2rad(el)\n if az < 0:\n az = az + 360\n if el < 0:\n el = 0\n azh12.append(azr)\n elh12.append(el)\n\n if len(debug_output) > 14:\n\n for id in range(len(debug_output)):\n debug_out.append(debug_output[id])\n debug_output.clear()\n\n fig = plt.figure()\n ax = fig.add_subplot(projection='polar')\n ax.set_theta_zero_location('N')\n ax.set_yticks(_np.arange(0, 91, 15))\n ax.set_rlim(bottom=90, top=0)\n ax.scatter(az11, el11, color = 'b')\n ax.scatter(az12, el12, color = 'r')\n ax.scatter(azh11, elh11, color = 'g')\n ax.scatter(azh12, elh12, color = 'g')\n # plt.show()\n filename = \"{}{}sky.png\".format(satA, satB)\n plt.savefig(filename)\n plt.clf()\n\n fig = plt.figure()\n ax = fig.add_subplot(projection='polar')\n ax.set_theta_zero_location('N')\n ax.set_yticks(_np.arange(0, 91, 15))\n ax.set_rlim(bottom=90, top=0)\n ax.scatter(az11, el11, color = 'b')\n ax.scatter(az12, el12, color = 'r')\n # plt.show()\n filename = \"{}{}sky_raw.png\".format(satA, satB)\n plt.savefig(filename)\n plt.clf()\n else:\n debug_output.clear()\n\n # debug out to csv (for every combination of satellites)\n # fields = ['epoch_start', 'SV1', 'SV2', 'sta1 sat1 AZ', 'sta1 sat1 EL', 'sta2 sat1 AZ', 'sta2 sat1 EL','sta1 sat2 AZ', 'sta1 sat2 EL', 'sta2 sat2 AZ', 'sta2 sat2 EL']\n # filename = \"data_debug2.csv\"\n # with open(filename, 'w', newline='') as csvfile:\n # csvwriter = csv.writer(csvfile)\n # csvwriter.writerow(fields)\n # csvwriter.writerows(debug_output3)\n\n # debug out to csv\n fields = ['epoch_start', 'SV1', 'SV2', 'GDOP', 'XDOP', 'YDOP', 'pos est X', 'pos est Y', 'pos est Z', 'angle', 'el']\n filename = \"data.csv\"\n with open(filename, 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(fields)\n csvwriter.writerows(debug_out)\n\n fields = ['epoch_start', 'SV1', 'SV2', 'GDOP', 'XDOP', 'YDOP', 'pos est X', 'pos est Y', 'pos est Z', 'angle', 'el', 'sta1 sat1 AZ', 'sta1 sat1 EL', 'sta2 sat1 AZ', 'sta2 sat1 EL','sta1 sat2 AZ', 'sta1 sat2 EL', 'sta2 sat2 AZ', 'sta2 sat2 EL', 'G[0][0]', 'G[0][1]', 'G[1][0]', 'G[1][1]', 'G[2][0]', 'G[2][1]']\n filename = \"data_debug.csv\"\n with open(filename, 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(fields)\n csvwriter.writerows(debug_output2)\n\n x_coordinateD = _np.mean([posD[0] for posD in position_listD])\n y_coordinateD = _np.mean([posD[1] for posD in position_listD])\n z_coordinateD = _np.mean([posD[2] for posD in position_listD])\n x_coordinateD = _np.mean([posD[0] for posD in position_listD])\n y_coordinateD = _np.mean([posD[1] for posD in position_listD])\n z_coordinateD = _np.mean([posD[2] for posD in position_listD])\n\n finish = time.time() # Time of finish\n print(\"Pseudorange calculation is done in\", \"{0:.2f}\".format(finish-start), \"seconds.\")\n print (\"\\n\")\n\n return (x_coordinateD, y_coordinateD, z_coordinateD)\n\ndef dpp(station, stationR, orbit, system=\"G\", cut_off=7.0):\n start = time.time() # Time of start\n if len(system)>1:\n raise Warning(\"SPP does not support multiple satellite system | This feature will be implemented in the next version\")\n observation_list = _observation_picker(station, system)\n observation_listR = _observation_picker(stationR, system)\n\n gnss = gnssDataframe(station, orbit, system, cut_off)\n gnssR = gnssDataframe(stationR, orbit, system, cut_off)\n\n #-----------------------------------------------------------------------------\n if len(observation_list) >=2:\n carrierPhase1 = getattr(gnss,observation_list[0][2])\n carrierPhase2 = getattr(gnss,observation_list[1][2])\n pseudorange1 = getattr(gnss,observation_list[0][3])\n pseudorange2 = getattr(gnss,observation_list[1][3])\n frequency1 = observation_list[0][4]\n frequency2 = observation_list[1][4]\n carrierPhase1R = getattr(gnssR,observation_listR[0][2])\n carrierPhase2R = getattr(gnssR,observation_listR[1][2])\n pseudorange1R = getattr(gnssR,observation_listR[0][3])\n pseudorange2R = getattr(gnssR,observation_listR[1][3])\n frequency1R = observation_listR[0][4]\n frequency2R = observation_listR[1][4]\n else:\n raise Warning(\"Ionosphere-free combination is not available\")\n # ----------------------------------------------------------------------------\n\n gnss[\"Ionosphere_Free\"] = (frequency1**2*pseudorange1-frequency2**2*pseudorange2)/(frequency1**2-frequency2**2)\n gnss = gnss.dropna(subset = ['Ionosphere_Free'])\n gnss[\"Travel_time\"] = gnss[\"Ionosphere_Free\"] / _CLIGHT\n gnss[\"X_Reception\"],gnss[\"Y_Reception\"],gnss[\"Z_Reception\"] = _reception_coord(gnss.X, gnss.Y, gnss.Z, gnss.Vx, gnss.Vy, gnss.Vz, gnss.Travel_time)\n\n gnssR[\"Ionosphere_Free\"] = (frequency1R**2*pseudorange1R-frequency2R**2*pseudorange2R)/(frequency1R**2-frequency2R**2)\n gnssR = gnssR.dropna(subset = ['Ionosphere_Free'])\n gnssR[\"Travel_time\"] = gnssR[\"Ionosphere_Free\"] / _CLIGHT\n gnssR[\"X_Reception\"],gnssR[\"Y_Reception\"],gnssR[\"Z_Reception\"] = _reception_coord(gnssR.X, gnssR.Y, gnssR.Z, gnssR.Vx, gnssR.Vy, gnssR.Vz, gnssR.Travel_time)\n\n epochList =gnss.index.get_level_values(\"Epoch\").unique().sort_values()\n epoch_start = epochList[0]\n epochListR =gnssR.index.get_level_values(\"Epoch\").unique().sort_values()\n epoch_startR = epochListR[0]\n\n #-----------------------------------------------------------------------------\n if epoch_start == epoch_startR:\n print(\"epochs are same\")\n else:\n raise Warning(\"Start epochs are different\")\n # ----------------------------------------------------------------------------\n\n epoch_offset= _timedelta(seconds=300)\n epoch_interval = _timedelta(seconds=station.interval-0.000001)\n epoch_stop = epochList[-1] + _timedelta(seconds=0.000001)\n\n approx_position = [station.approx_position[0], station.approx_position[1], station.approx_position[2]]\n receiver_clock = station.receiver_clock\n position_list = []\n\n approx_positionR = [stationR.approx_position[0], stationR.approx_position[1], stationR.approx_position[2]]\n receiver_clockR = stationR.receiver_clock\n position_listR = []\n\n approx_positionD = [station.approx_position[0]-stationR.approx_position[0], station.approx_position[1]-stationR.approx_position[1], station.approx_position[2]-stationR.approx_position[2]]\n receiver_clockD = station.receiver_clock - stationR.receiver_clock\n position_listD = []\n\n [lat0, lon0, h0] = pm.ecef2geodetic(approx_positionR[0], approx_positionR[1], approx_positionR[2])\n userENU = pm.ecef2enu(approx_position[0],approx_position[1],approx_position[2], lat0, lon0, h0)\n userENU = _np.array(userENU)\n\n while True:\n\n epoch_step = epoch_start + epoch_interval\n\n gnss_temp = gnss.xs((slice(epoch_start,epoch_step))).copy()\n gnss_tempR = gnssR.xs((slice(epoch_start,epoch_step))).copy()\n\n # pick up satellite that are seen from both stations\n satelliteList = gnss_temp.index.get_level_values(\"SV\").unique().sort_values()\n satelliteListR = gnss_tempR.index.get_level_values(\"SV\").unique().sort_values()\n matchSatelliteList = []\n for count in range(len(satelliteList)):\n for count2 in range(len(satelliteListR)):\n if satelliteList[count] == satelliteListR[count2]:\n matchSatelliteList.append(satelliteList[count])\n\n gnss_temp2 = gnss_temp.loc[matchSatelliteList]\n gnss_temp2R = gnss_tempR.loc[matchSatelliteList]\n gnss_temp = gnss_temp2\n gnss_tempR = gnss_temp2R\n\n # wrong satellite clock for debug\n for count in range(len(gnss_temp)):\n gnss_temp.Relativistic_clock[count] = 0.0000002*count\n for count in range(len(gnss_tempR)):\n gnss_tempR.Relativistic_clock[count] = 0.0000002*count\n\n for iter in range(6):\n\n distance = _distance_euclidean(approx_position[0],approx_position[1],approx_position[2], gnss_temp.X_Reception, gnss_temp.Y_Reception, gnss_temp.Z_Reception)\n gnss_temp[\"Distance\"] = distance + _sagnac(approx_position[0],approx_position[1],approx_position[2], gnss_temp.X_Reception, gnss_temp.Y_Reception, gnss_temp.Z_Reception)\n gnss_temp[\"Azimuth\"], gnss_temp[\"Elevation\"], gnss_temp[\"Zenith\"] = _azel(station.approx_position[0], station.approx_position[1], station.approx_position[2], gnss_temp.X, gnss_temp.Y, gnss_temp.Z, gnss_temp.Distance)\n gnss_temp[\"Tropo\"] = tropospheric_delay(station.approx_position[0],station.approx_position[1],station.approx_position[2], gnss_temp.Elevation, station.epoch)\n\n distanceR = _distance_euclidean(approx_positionR[0],approx_positionR[1],approx_positionR[2], gnss_tempR.X_Reception, gnss_tempR.Y_Reception, gnss_tempR.Z_Reception)\n gnss_tempR[\"Distance\"] = distanceR + _sagnac(approx_positionR[0],approx_positionR[1],approx_positionR[2], gnss_tempR.X_Reception, gnss_tempR.Y_Reception, gnss_tempR.Z_Reception)\n gnss_tempR[\"Azimuth\"], gnss_tempR[\"Elevation\"], gnss_tempR[\"Zenith\"] = _azel(stationR.approx_position[0], stationR.approx_position[1], stationR.approx_position[2], gnss_tempR.X, gnss_tempR.Y, gnss_tempR.Z, gnss_tempR.Distance)\n gnss_tempR[\"Tropo\"] = tropospheric_delay(stationR.approx_position[0],stationR.approx_position[1],stationR.approx_position[2], gnss_tempR.Elevation, stationR.epoch)\n\n enuSatX, enuSatY, enuSatZ = pm.ecef2enu(gnss_temp.X_Reception, gnss_temp.Y_Reception, gnss_temp.Z_Reception, lat0, lon0, h0)\n enuSatX = _np.array(enuSatX)\n enuSatY = _np.array(enuSatY)\n enuSatZ = _np.array(enuSatZ)\n\n coeffMatrixD = _np.zeros([len(gnss_temp)-1,2])\n\n for count in range(len(gnss_temp)-1):\n coeffMatrixD[count,0] = (userENU[0] - enuSatX[count]) / gnss_temp.Distance[count] - ((userENU[0] - enuSatX[count+1]) / gnss_temp.Distance[count+1])\n coeffMatrixD[count,1] = (userENU[1] - enuSatY[count]) / gnss_temp.Distance[count] - ((userENU[1] - enuSatY[count+1]) / gnss_temp.Distance[count+1])\n # coeffMatrixD[count,2] = (userENU[2] - enuSatZ[count]) / gnss_temp.Distance[count] - ((userENU[2] - enuSatZ[count+1]) / gnss_temp.Distance[count+1])\n\n lMatrix = _np.zeros([len(gnss_temp)-1,1])\n lMatrixR = _np.zeros([len(gnss_temp)-1,1])\n lMatrixD = _np.zeros([len(gnss_temp)-1,1])\n\n for count in range(len(gnss_temp)-1):\n lMatrix[count] = gnss_temp.Ionosphere_Free[count] - gnss_temp.Distance[count] + _CLIGHT * (gnss_temp.DeltaTSV[count] + gnss_temp.Relativistic_clock[count] - receiver_clock) - gnss_temp.Tropo[count] - (gnss_temp.Ionosphere_Free[count+1] - gnss_temp.Distance[count+1] + _CLIGHT * (gnss_temp.DeltaTSV[count+1] + gnss_temp.Relativistic_clock[count+1] - receiver_clock) - gnss_temp.Tropo[count+1])\n lMatrixR[count] = gnss_tempR.Ionosphere_Free[count] - gnss_tempR.Distance[count] + _CLIGHT * (gnss_tempR.DeltaTSV[count] + gnss_tempR.Relativistic_clock[count] - receiver_clockR) - gnss_tempR.Tropo[count] - (gnss_tempR.Ionosphere_Free[count+1] - gnss_tempR.Distance[count+1] + _CLIGHT * (gnss_tempR.DeltaTSV[count+1] + gnss_tempR.Relativistic_clock[count+1] - receiver_clockR) - gnss_tempR.Tropo[count+1])\n\n lMatrixD = lMatrix - lMatrixR\n\n if iter == 0:\n # DOP in NED coordinate\n GTG = _np.dot(coeffMatrixD.T, coeffMatrixD)\n DOP = _np.linalg.inv(GTG)\n HDOP = (DOP[0][0]+DOP[1][1])**0.5\n XDOP = (DOP[0][0])**0.5\n YDOP = (DOP[1][1])**0.5\n\n try:\n linearEquationSolutionD = _np.linalg.lstsq(coeffMatrixD,lMatrixD,rcond=None)\n xMatrixD = linearEquationSolutionD[0]\n # posD = [userENU[0] + xMatrixD[0], userENU[1] + xMatrixD[1], userENU[2] + xMatrixD[2]]\n posD = [userENU[0] + xMatrixD[0], userENU[1] + xMatrixD[1], userENU[2]] # 2D est\n userENU[0], userENU[1], userENU[2] = posD[0], posD[1], posD[2]\n approx_position[0],approx_position[1],approx_position[2] = pm.enu2ecef(userENU[0], userENU[1], userENU[2], lat0, lon0, h0)\n approx_position = _np.array(approx_position)\n except:\n print(\"Cannot solve normal equations for epoch\", epoch_start,\"| Skipping...\")\n\n position_listD.append(posD)\n print(float(posD[0]), float(posD[1]), float(posD[2]), XDOP, YDOP, HDOP)\n\n epoch_start += epoch_offset\n epoch_step += epoch_offset\n if (epoch_step - epoch_stop) > _timedelta(seconds=station.interval):\n break\n\n x_coordinateD = _np.mean([posD[0] for posD in position_listD])\n y_coordinateD = _np.mean([posD[1] for posD in position_listD])\n z_coordinateD = _np.mean([posD[2] for posD in position_listD])\n\n finish = time.time() # Time of finish\n # print(\"Pseudorange calculation is done in\", \"{0:.2f}\".format(finish-start), \"seconds.\")\n # print (\"\\n\")\n\n # return (x_coordinate, y_coordinate, z_coordinate, rec_clock, x_coordinateR, y_coordinateR, z_coordinateR, rec_clockR)\n return (x_coordinateD, y_coordinateD, z_coordinateD)\n\ndef spp(station, orbit, system=\"G\", cut_off=7.0):\n start = time.time() # Time of start\n if len(system)>1:\n raise Warning(\"SPP does not support multiple satellite system | This feature will be implemented in the next version\")\n observation_list = _observation_picker(station, system)\n gnss = gnssDataframe(station, orbit, system, cut_off)\n\n #-----------------------------------------------------------------------------\n if len(observation_list) >=2:\n carrierPhase1 = getattr(gnss,observation_list[0][2])\n carrierPhase2 = getattr(gnss,observation_list[1][2])\n pseudorange1 = getattr(gnss,observation_list[0][3])\n pseudorange2 = getattr(gnss,observation_list[1][3])\n frequency1 = observation_list[0][4]\n frequency2 = observation_list[1][4]\n else:\n raise Warning(\"Ionosphere-free combination is not available\")\n # ----------------------------------------------------------------------------\n\n gnss[\"Ionosphere_Free\"] = (frequency1**2*pseudorange1-frequency2**2*pseudorange2)/(frequency1**2-frequency2**2)\n gnss = gnss.dropna(subset = ['Ionosphere_Free'])\n gnss[\"Travel_time\"] = gnss[\"Ionosphere_Free\"] / _CLIGHT\n gnss[\"X_Reception\"],gnss[\"Y_Reception\"],gnss[\"Z_Reception\"] = _reception_coord(gnss.X, gnss.Y, gnss.Z, gnss.Vx, gnss.Vy, gnss.Vz, gnss.Travel_time)\n\n epochList =gnss.index.get_level_values(\"Epoch\").unique().sort_values()\n epoch_start = epochList[0]\n epoch_offset= _timedelta(seconds=300)\n epoch_interval = _timedelta(seconds=station.interval-0.000001)\n epoch_stop = epochList[-1] + _timedelta(seconds=0.000001)\n approx_position = [station.approx_position[0], station.approx_position[1], station.approx_position[2]]\n receiver_clock = station.receiver_clock\n position_list = []\n\n # [lat0, lon0, h0] = pm.ecef2geodetic(-3947764.0793, 3364399.9344, 3699430.4794) # MTKA\n [lat0, lon0, h0] = pm.ecef2geodetic(approx_position[0], approx_position[1], approx_position[2])\n userENU = pm.ecef2enu(approx_position[0],approx_position[1],approx_position[2], lat0, lon0, h0)\n userENU = _np.array(userENU)\n # print(approx_position)\n # print(lat0, lon0, h0)\n # print(userENU)\n\n userECFE = pm.enu2ecef(userENU[0],userENU[1],userENU[2], lat0, lon0, h0)\n userECFE = _np.array(userECFE)\n # print(userECFE)\n\n # test rotation matrix converting ECEF to ENU\n # lat0d = _np.radians(lat0)\n # lon0d = _np.radians(lon0)\n # rotationMat1 = [[_np.cos(lon0d),-1*_np.sin(lon0d),0],[_np.sin(lon0d),_np.cos(lon0d),0],[0,0,1]]\n # rotationMat2 = [[_np.cos(-lat0d),0,_np.sin(-lat0d)],[0,1,0],[-1*_np.sin(-lat0d),0,_np.cos(-lat0d)]]\n # rotationMat3 = [[0,0,1],[1,0,0],[0,1,0]]\n # rotationXYZ2ENU = _np.dot(_np.dot(rotationMat1,rotationMat2),rotationMat3)\n # userENU2 = _np.dot(approx_position,rotationXYZ2ENU)\n # userENU2 = _np.array(userENU2)\n # print(userENU2)\n\n while True:\n epoch_step = epoch_start + epoch_interval\n gnss_temp = gnss.xs((slice(epoch_start,epoch_step))).copy()\n\n for iter in range(6):\n\n distance = _distance_euclidean(approx_position[0],approx_position[1],approx_position[2], gnss_temp.X_Reception, gnss_temp.Y_Reception, gnss_temp.Z_Reception)\n gnss_temp[\"Distance\"] = distance + _sagnac(approx_position[0],approx_position[1],approx_position[2], gnss_temp.X_Reception, gnss_temp.Y_Reception, gnss_temp.Z_Reception)\n gnss_temp[\"Azimuth\"], gnss_temp[\"Elevation\"], gnss_temp[\"Zenith\"] = _azel(station.approx_position[0], station.approx_position[1], station.approx_position[2], gnss_temp.X, gnss_temp.Y, gnss_temp.Z, gnss_temp.Distance)\n gnss_temp[\"Tropo\"] = tropospheric_delay(station.approx_position[0],station.approx_position[1],station.approx_position[2], gnss_temp.Elevation, station.epoch)\n\n # coeffMatrix = _np.zeros([len(gnss_temp),4])\n # coeffMatrix[:,0] = (approx_position[0] - gnss_temp.X_Reception) / gnss_temp.Distance\n # coeffMatrix[:,1] = (approx_position[1] - gnss_temp.Y_Reception) / gnss_temp.Distance\n # coeffMatrix[:,2] = (approx_position[2] - gnss_temp.Z_Reception) / gnss_temp.Distance\n # coeffMatrix[:,3] = 1\n\n # from observer to target, ECEF => enu\n enuSatX, enuSatY, enuSatZ = pm.ecef2enu(gnss_temp.X_Reception, gnss_temp.Y_Reception, gnss_temp.Z_Reception, lat0, lon0, h0)\n enuSatX = _np.array(enuSatX)\n enuSatY = _np.array(enuSatY)\n enuSatZ = _np.array(enuSatZ)\n\n coeffMatrix = _np.zeros([len(gnss_temp),4])\n coeffMatrix[:,0] = (userENU[0] - enuSatX) / gnss_temp.Distance\n coeffMatrix[:,1] = (userENU[1] - enuSatY) / gnss_temp.Distance\n coeffMatrix[:,2] = (userENU[2] - enuSatZ) / gnss_temp.Distance\n coeffMatrix[:,3] = 1\n\n lMatrix = gnss_temp.Ionosphere_Free - gnss_temp.Distance + _CLIGHT * (gnss_temp.DeltaTSV + gnss_temp.Relativistic_clock - receiver_clock) - gnss_temp.Tropo\n lMatrix = _np.array(lMatrix)\n\n # G = coeffMatrix\n # GTG = _np.dot(G.T, G)\n # GTG_inv = _np.linalg.inv(GTG)\n # GTG_inv_GT = _np.dot(GTG_inv, G.T)\n # xMatrix = _np.dot(GTG_inv_GT,lMatrix)\n #\n # posenu = [userENU[0] + xMatrix[0], userENU[1] + xMatrix[1], userENU[2] + xMatrix[2], receiver_clock + xMatrix[3] / _CLIGHT]\n # userENU[0] = userENU[0] + xMatrix[0]\n # userENU[1] = userENU[1] + xMatrix[1]\n # userENU[2] = userENU[2] + xMatrix[2]\n # receiver_clock = receiver_clock + xMatrix[3] / _CLIGHT\n # approx_position = pm.enu2ecef(userENU[0], userENU[1], userENU[2], lat0, lon0, h0)\n # approx_position = _np.array(approx_position)\n\n if iter == 0:\n # DOP in NED coordinate\n GTG = _np.dot(coeffMatrix.T, coeffMatrix)\n DOP = _np.linalg.inv(GTG)\n HDOP = (DOP[0][0]+DOP[1][1])**0.5\n XDOP = (DOP[0][0])**0.5\n YDOP = (DOP[1][1])**0.5\n TDOP = (DOP[3][3])**0.5\n # print (XDOP, YDOP, HDOP)\n\n try:\n linearEquationSolution = _np.linalg.lstsq(coeffMatrix,lMatrix,rcond=None)\n xMatrix = linearEquationSolution[0]\n posenu = [userENU[0] + xMatrix[0], userENU[1] + xMatrix[1], userENU[2] + xMatrix[2], receiver_clock + xMatrix[3] / _CLIGHT]\n userENU[0], userENU[1], userENU[2], receiver_clock = posenu[0], posenu[1], posenu[2], posenu[3]\n approx_position[0],approx_position[1],approx_position[2] = pm.enu2ecef(userENU[0], userENU[1], userENU[2], lat0, lon0, h0)\n approx_position = _np.array(approx_position)\n\n except:\n print(\"Cannot solve normal equations for epoch\", epoch_start,\"| Skipping...\")\n\n position_list.append(posenu)\n print(posenu, XDOP, YDOP, HDOP)\n\n # [lat, lon, h] = pm.ecef2geodetic(pos[0],pos[1],pos[2])\n # positionNED = []\n # positionNED = pm.geodetic2enu(lat, lon, h, lat0, lon0, h0)\n # print(positionNED)\n\n # print(pos)\n epoch_start += epoch_offset\n epoch_step += epoch_offset\n if (epoch_step - epoch_stop) > _timedelta(seconds=station.interval):\n break\n\n x_coordinate = _np.mean([posenu[0] for pos in position_list])\n y_coordinate = _np.mean([posenu[1] for pos in position_list])\n z_coordinate = _np.mean([posenu[2] for pos in position_list])\n rec_clock = _np.mean([posenu[3] for pos in position_list])\n finish = time.time() # Time of finish\n print(\"Pseudorange calculation is done in\", \"{0:.2f}\".format(finish-start), \"seconds.\")\n return (x_coordinate, y_coordinate, z_coordinate, rec_clock)\n\ndef gnssDataframe(station, orbit, system=\"G+R+E+C+J+I+S\", cut_off=7.0):\n try:\n system = _itemgetter(*system.split(\"+\"))(_SYSTEM_NAME)\n if type(system)==str: system = tuple([system])\n except KeyError:\n raise Warning(\"Unknown Satellite System:\", system, \"OPTIONS: G-R-E-C-J-R-I-S\")\n epochMatch = station.observation.index.intersection(orbit.index)\n gnss = _pd.concat([station.observation.loc[epochMatch].copy(), orbit.loc[epochMatch]], axis=1)\n gnss = gnss[gnss['SYSTEM'].isin(system)]\n gnss[\"Distance\"] = _distance_euclidean(station.approx_position[0], station.approx_position[1], station.approx_position[2], gnss.X, gnss.Y, gnss.Z)\n gnss[\"Relativistic_clock\"] = _relativistic_clock(gnss.X, gnss.Y, gnss.Z, gnss.Vx, gnss.Vy, gnss.Vz)\n gnss['Azimuth'], gnss['Elevation'], gnss['Zenith'] = _azel(station.approx_position[0], station.approx_position[1], station.approx_position[2], gnss.X, gnss.Y, gnss.Z, gnss.Distance)\n gnss = gnss.loc[gnss['Elevation'] > cut_off]\n gnss[\"Tropo\"] = tropospheric_delay(station.approx_position[0],station.approx_position[1],station.approx_position[2], gnss.Elevation, station.epoch)\n return gnss\n\ndef multipath(station, system=\"G\"):\n if len(system)>1:\n raise Warning(\"Multiple satellite system is not applicable for multipath | This feature will be implemented in next version.\")\n observation_list = _observation_picker(station, system=system)\n observation = station.observation.dropna(subset=[observation_list[0][2],observation_list[1][2],observation_list[0][3],observation_list[1][3]])\n observation = observation.loc[observation.SYSTEM==_SYSTEM_NAME[system]].copy(deep=True)\n carrierPhase1 = getattr(observation,observation_list[0][2])\n carrierPhase2 = getattr(observation,observation_list[1][2])\n pseudorange1 = getattr(observation,observation_list[0][3])\n pseudorange2 = getattr(observation,observation_list[1][3])\n frequency1 = observation_list[0][4]\n frequency2 = observation_list[1][4]\n lam1 = _CLIGHT/frequency1\n lam2 = _CLIGHT/frequency2\n ioncoeff = (frequency1/frequency2)**2\n observation[\"Multipath1\"] = pseudorange1 - (2/(ioncoeff-1)+1)*(carrierPhase1*lam1) + (2/(ioncoeff-1))*(carrierPhase2*lam2)\n observation[\"Multipath2\"] = pseudorange2 - (2*ioncoeff/(ioncoeff-1))*(carrierPhase1*lam1) + (2*ioncoeff/(ioncoeff-1)-1)*(carrierPhase2*lam2)\n observation = observation.reorder_levels(['SV','Epoch'])\n observation = observation.sort_index()\n sv_list = observation.index.get_level_values('SV').unique()\n # ----------------------------------------------------------------------------\n Multipath1 = []\n Multipath2 = []\n for sv in sv_list:\n ObsSV = observation.loc[sv]\n multipathSV1 = []\n multipathSV2 = []\n j = 0\n for i in range(1, len(ObsSV)):\n if (ObsSV.iloc[i].epoch - ObsSV.iloc[i-1].epoch) > _pd.Timedelta('0 days 00:15:00'):\n multipath1 = ObsSV.iloc[j:i].Multipath1.values - _np.nanmean(ObsSV.iloc[j:i].Multipath1.values)\n multipath2 = ObsSV.iloc[j:i].Multipath1.values - _np.nanmean(ObsSV.iloc[j:i].Multipath1.values)\n multipathSV1.extend(multipath1)\n multipathSV2.extend(multipath2)\n j=i\n multipath1 = ObsSV.iloc[j:].Multipath1.values - _np.nanmean(ObsSV.iloc[j:].Multipath1.values)\n multipath2 = ObsSV.iloc[j:].Multipath1.values - _np.nanmean(ObsSV.iloc[j:].Multipath1.values)\n multipathSV1.extend(multipath1)\n multipathSV2.extend(multipath2)\n Multipath1.extend(multipathSV1)\n Multipath2.extend(multipathSV2)\n # Re-assign multipath values\n observation[\"Multipath1\"] = Multipath1\n observation[\"Multipath2\"] = Multipath2\n return observation\n\ndef _adjustment(coeffMatrix,LMatrix):\n NMatrix = _np.linalg.inv(_np.dot(_np.transpose(coeffMatrix), coeffMatrix))\n nMatrix = _np.matmul(_np.transpose(coeffMatrix), LMatrix)\n XMatrix = _np.dot(NMatrix, nMatrix)\n vMatrix = _np.dot(coeffMatrix, XMatrix) - LMatrix\n m0 = _np.sqrt(_np.dot(_np.transpose(vMatrix), vMatrix)/(len(LMatrix)-len(NMatrix)))\n diagN = _np.diag(NMatrix)\n rmse = m0*_np.sqrt(diagN)\n mp = _np.sqrt(rmse[0]**2+rmse[1]**2+rmse[2]**2)\n return XMatrix, rmse\n\ndef _observation_picker(station, system=\"G\"):\n try:\n system = _SYSTEM_NAME[system.upper()]\n except KeyError:\n raise Warning(\"Unknown Satellite System:\", system, \"OPTIONS: G-R-E-C-J-R-I-S\")\n #-------------------------------------------------------------------\n # RINEX-3\n if station.version.startswith(\"3\"):\n observation_codes = station.observation.columns.tolist()\n system_observations = getattr(station.observation_types, system)\n band_list = set(\"L\" + code[1] for code in observation_codes if len(code)==3)\n channel_list = set([code[2] for code in observation_codes if len(code)==3])\n obs_codes = []\n for band in band_list:\n if band in _SYSTEM_RNX3[system]:\n for channel in channel_list:\n if (band+channel) in _SYSTEM_RNX3[system][band][\"Carrierphase\"] and (band+channel) in system_observations:\n obs_codes.append([system,band,(band+channel),(\"C\"+band[1]+channel),_SYSTEM_RNX3[system][band][\"Frequency\"]])\n break\n # RINEX-2\n elif station.version.startswith(\"2\"):\n observation_codes = station.observation.columns.tolist()\n system_observations = station.observation_types\n band_list = set(code for code in observation_codes if code.startswith((\"L\")))\n obs_codes = []\n for band in band_list:\n if band in _SYSTEM_RNX2[system].keys():\n for code in _SYSTEM_RNX2[system][band][\"Pseudorange\"]:\n if code in system_observations:\n obs_codes.append([system,band,band,code,_SYSTEM_RNX2[system][band][\"Frequency\"]])\n break\n obs_codes = sorted(obs_codes, key=_itemgetter(1))\n return (obs_codes[0],obs_codes[1])\n\ndef _observation_picker_by_band(station, system=\"G\", band=\"L1\"):\n #-------------------------------------------------------------------\n try:\n system = _SYSTEM_NAME[system.upper()]\n if band not in _SYSTEM_RNX3[system].keys():\n raise Warning(band,\"band cannot be found in\",system,\"satellite system! Band options for\",system,\"system:\",tuple(_SYSTEM_RNX3[system].keys()))\n except KeyError:\n raise Warning(\"Unknown Satellite System:\", system, \"OPTIONS: G-R-E-C-J-R-I-S\")\n #-------------------------------------------------------------------\n\n # RINEX-3\n if station.version.startswith(\"3\"):\n observation_codes = station.observation.columns.tolist()\n system_observations = getattr(station.observation_types, system)\n channel_list = set([code[2] for code in observation_codes if len(code)==3])\n obs_codes = []\n if band in _SYSTEM_RNX3[system]:\n for channel in channel_list:\n if (band+channel) in _SYSTEM_RNX3[system][band][\"Carrierphase\"] and (band+channel) in system_observations:\n obs_codes.append([system,band,(band+channel),(\"C\"+band[1]+channel),_SYSTEM_RNX3[system][band][\"Frequency\"],(\"D\"+band[1]+channel),(\"S\"+band[1]+channel)])\n break\n # RINEX-2\n elif station.version.startswith(\"2\"):\n observation_codes = station.observation.columns.tolist()\n system_observations = station.observation_types\n obs_codes = []\n if band in _SYSTEM_RNX2[system].keys():\n for code in _SYSTEM_RNX2[system][band][\"Pseudorange\"]:\n if code in system_observations:\n obs_codes.append([system,band,band,code,_SYSTEM_RNX2[system][band][\"Frequency\"],(\"D\"+band[1]),(\"S\"+band[1])])\n break\n return (obs_codes[0])\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.sqrt", "numpy.linalg.inv", "numpy.arange", "numpy.linalg.norm", "pandas.Timedelta", "matplotlib.pyplot.savefig", "numpy.arccos", "numpy.linalg.lstsq", "numpy.deg2rad", "matplotlib.pyplot.clf", "numpy.mean", "numpy.nanmean", "numpy.transpose", "numpy.array", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
rowles/numstore
[ "75e1e00431578042bfa29d92dde476665aec696d" ]
[ "setup.py" ]
[ "from distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\n\nfrom distutils import sysconfig\nimport numpy\n\n\nsetup(\n ext_modules=[\n Extension(\n \"numstore\",\n [\n \"numstore.pyx\",\n ],\n language=\"c++\",\n include_dirs=[numpy.get_include()],\n libraries=[\"numstore\"],\n extra_compile_args=[\"-I./include/\", \"-I./ext/\", \"-fPIC\", \"-std=c++2a\"],\n )\n ],\n cmdclass={\"build_ext\": build_ext},\n)\n" ]
[ [ "numpy.get_include" ] ]
fpdcc/fpcc_data_dashboard
[ "575e10a72a85617bd214906822271aa65ecced04" ]
[ "tests/tests.py" ]
[ "import pytest\nimport pandas as pd\nimport datatest as dt\nfrom numbers import Number\n\[email protected](scope='module')\[email protected]_directory(__file__)\ndef df():\n return pd.read_csv('data.csv')\n\[email protected]\ndef test_columns(df):\n dt.validate(\n df.columns,\n {'Id','Category','Sub-Category','FPCC Zone','Project Description','Other','Funded','Priority','New Amenity','Rollover CD','Bond','Grant','2020 New CD Funds','Total 2020 Funds','2021','2022','2023','2024','Total 2021-2024'},\n )\n\n\ndef test_rating(df):\n dt.validate.superset(\n df['Category'],\n {'building improvements', 'capital outlays', 'land improvements', 'planning'},\n )\n\n\ndef test_year(df):\n dt.validate(df['Total 2020 Funds'], Number)\n\n\ndef test_runtime(df):\n dt.validate(df['2021'], Number)\n" ]
[ [ "pandas.read_csv" ] ]
ecarver1/Creation-of-Synthetic-MRI-for-use-in-GBM-tumor-segmentation
[ "147761a0371941742da982ae33404af854fd24ff" ]
[ "UNET/prepare_data.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jun 14 13:57:44 2018\r\n\r\n@author: ecarver1\r\n\"\"\"\r\n\r\nimport nibabel as nib\r\nimport os\r\nimport numpy\r\nimport gc\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\ndef run3(rootdir,outputdir,post_fix,outputdir_nifti): \r\n img_data = []\r\n print(post_fix)\r\n img_data=run_folder_no_folder_folder(img_data,img_files,rootdir,post_fix,outputdir_nifti)\r\n\r\n numpy.save(outputdir + '/'+post_fix+'.npy', numpy.asarray(img_data,dtype='float32'))\r\n\r\n\r\n# 1. remove empty slices and reduce the number of slices to 128\r\n# 2. pad the image to 256x256x128\r\ndef trim_and_pad(data, file_path,post_fix, output_size=[256,256,64],dt='uint16'):\r\n\r\n input_img = data\r\n\r\n seg_file = file_path.replace(post_fix+'.nii.gz', 'seg.nii.gz')\r\n print(seg_file)\r\n seg_img = nib.nifti1.load(seg_file)\r\n input_imgg = seg_img.get_fdata()\r\n \r\n output_img = numpy.zeros([output_size[0],output_size[1],output_size[2]],dtype=dt)\r\n # remove empty slices from the top\r\n top = input_imgg.shape[2]-1\r\n for k in range(0,input_imgg.shape[2]):\r\n tk = input_imgg.shape[2]-k-1\r\n if numpy.max(input_imgg[:,:,tk]) != 0 or tk == output_size[2]-1:\r\n top = tk\r\n break\r\n print(top)\r\n marx = int((output_size[0]-input_img.shape[0])/2)\r\n mary = int((output_size[1]-input_img.shape[1])/2)\r\n output_img[marx:marx+input_img.shape[0],mary:mary+input_img.shape[1],:] = input_img[:,:,top-output_size[2]+1:top+1]\r\n print(output_img.shape,numpy.mean(output_img))\r\n return output_img,top\r\n\r\n\r\n\r\ndef run_folder_no_folder_folder(img_data,img_files,rootdir,post_fix,outputdir_nifti):\r\n\r\n for root,subFolders,files in os.walk(rootdir):\r\n for file in files:\r\n g=os.path.join(rootdir,file)\r\n\r\n filename, file_extension = os.path.splitext(file)\r\n if file_extension == '.gz':\r\n corename, core_extension = os.path.splitext(filename)\r\n if corename[-len(post_fix):]==post_fix:\r\n if 'N4' not in filename:\r\n if post_fix=='seg':\r\n \r\n img_file = g\r\n print(img_file)\r\n img = nib.nifti1.load(img_file)\r\n data = img.get_fdata()\r\n data,top=trim_and_pad(data, g,post_fix, output_size=[256,256,64],dt='float32')\r\n\r\n\r\n else:\r\n img_file = g\r\n print(img_file)\r\n img = nib.nifti1.load(img_file)\r\n data = img.get_fdata()\r\n\r\n data,top=trim_and_pad(data, g,post_fix, output_size=[256,256,64],dt='float32')\r\n print(data.shape)\r\n data=numpy.asarray(data)\r\n if post_fix in filename:\r\n img_data.append(data)\r\n \r\n else:\r\n continue\r\n \r\n return img_data\r\nif __name__== \"__main__\":\r\n rootdir = '/Brats18_train'\r\n outputdir ='/Brats18_train_output'\r\n outputdir_nifti='/Brats18_train_output'\r\n if not os.path.exists(outputdir):\r\n os.mkdir(outputdir)\r\n if not os.path.exists(outputdir_nifti):\r\n os.mkdir(outputdir_nifti)\r\n img_data=[]\r\n img_files=[]\r\n\r\n run3(rootdir,outputdir,\"t1\",outputdir_nifti)\r\n gc.collect()\r\n run3(rootdir,outputdir,\"t1ce\",outputdir_nifti)\r\n gc.collect()\r\n run3(rootdir,outputdir,\"t2\",outputdir_nifti)\r\n gc.collect()\r\n run3(rootdir,outputdir,\"flair\",outputdir_nifti)\r\n gc.collect()\r\n run3(rootdir,outputdir,\"seg\",outputdir_nifti)\r\n gc.collect() \r\n \r\n\r\n \r\n\r\n \r\n \r\n" ]
[ [ "numpy.asarray", "numpy.max", "numpy.zeros", "numpy.mean" ] ]
moeraza/ali-g
[ "342e24e139fc1e75f4bf576d0784ed886f305cf8" ]
[ "alig/tf/alig.py" ]
[ "try:\n import tensorflow as tf\nexcept ImportError:\n raise ImportError(\"Tensorflow is not installed, impossible to import `alig.tf.AliG`\")\n\n\ndef minimize(optimizer, loss, global_step=None, var_list=None,\n gate_gradients=tf.compat.v1.train.Optimizer.GATE_OP, aggregation_method=None,\n colocate_gradients_with_ops=False, name=None,\n grad_loss=None):\n \"\"\"\n Re-write of tf.train.Optimizer.minimize\n \"\"\"\n # first part of method is identical to tf\n grads_and_vars = optimizer.compute_gradients(\n loss, var_list=var_list, gate_gradients=gate_gradients,\n aggregation_method=aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops,\n grad_loss=grad_loss)\n\n vars_with_grad = [v for g, v in grads_and_vars if g is not None]\n if not vars_with_grad:\n raise ValueError(\n \"No gradients provided for any variable, check your graph for ops\"\n \" that do not support gradients, between variables %s and loss %s.\" %\n ([str(v) for _, v in grads_and_vars], loss))\n\n # compute step-size here\n grad_sqrd_norm = sum(tf.norm(grad) ** 2 for grad, _ in grads_and_vars)\n optimizer._learning_rate = loss / (grad_sqrd_norm + optimizer.eps)\n if optimizer._max_lr is not None:\n optimizer._learning_rate = tf.clip_by_value(optimizer._learning_rate, clip_value_min=0,\n clip_value_max=optimizer._max_lr)\n\n return optimizer.apply_gradients(grads_and_vars, global_step=global_step,\n name=name)\n\n\nclass AliGwithMomentum(tf.compat.v1.train.MomentumOptimizer):\n \"\"\"Optimizer that implements the AliG algorithm.\n \"\"\"\n\n def __init__(self, max_lr=None, momentum=0, use_locking=False, name=\"AliG\", eps=1e-5):\n super(AliGwithMomentum, self).__init__(\n learning_rate=None, momentum=momentum, use_locking=use_locking,\n name=name, use_nesterov=True)\n self._max_lr = max_lr\n self.eps = eps\n\n def minimize(self, loss, global_step=None, var_list=None,\n gate_gradients=tf.compat.v1.train.Optimizer.GATE_OP, aggregation_method=None,\n colocate_gradients_with_ops=False, name=None,\n grad_loss=None):\n return minimize(self, loss, global_step=global_step, var_list=var_list,\n gate_gradients=gate_gradients, aggregation_method=aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops, name=name,\n grad_loss=grad_loss)\n\n\nclass AliGwithoutMomentum(tf.compat.v1.train.GradientDescentOptimizer):\n \"\"\"Optimizer that implements the AliG algorithm.\n \"\"\"\n\n def __init__(self, max_lr=None, use_locking=False, name=\"AliG\", eps=1e-5):\n super(AliGwithoutMomentum, self).__init__(\n learning_rate=None, use_locking=use_locking, name=name)\n self._max_lr = max_lr\n self.eps = eps\n\n def minimize(self, loss, global_step=None, var_list=None,\n gate_gradients=tf.compat.v1.train.Optimizer.GATE_OP, aggregation_method=None,\n colocate_gradients_with_ops=False, name=None,\n grad_loss=None):\n return minimize(self, loss, global_step=global_step, var_list=var_list,\n gate_gradients=gate_gradients, aggregation_method=aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops, name=name,\n grad_loss=grad_loss)\n\n\ndef AliG(max_lr=None, momentum=0, use_locking=False, name=\"AliG\", eps=1e-5):\n if momentum < 0:\n raise ValueError(\"Momentum cannot be negative ({})\".format(momentum))\n elif momentum > 0:\n return AliGwithMomentum(max_lr=max_lr, momentum=momentum,\n use_locking=use_locking, name=name, eps=eps)\n else:\n return AliGwithoutMomentum(max_lr=max_lr, use_locking=use_locking, name=name, eps=eps)\n" ]
[ [ "tensorflow.clip_by_value", "tensorflow.norm" ] ]
nabeel3133/combining3Dmorphablemodels
[ "76dc6fe290918ea2d14e32a736d3b0e7736f1fbb" ]
[ "Prediction/head_prediction_rand_bfm.py" ]
[ "from scipy.io import loadmat\nimport numpy as np\nimport random\n\ndef tensorToVertices(tensor):\n\tj = 0\n\tvertices = []\n\tfor i in range(0, int(len(tensor)/3)):\n\t\tvertices.append([tensor[j], tensor[j+1], tensor[j+2]])\n\t\tj=j+3\n\n\tvertices = np.reshape(np.array(vertices), (len(vertices),3))\n\treturn (np.array(vertices))\n\ndef write_obj(verts,writeFile,triFile):\n\tfor i in range(0, len(verts)):\n\t\twriteFile.write(\"v \"+str(verts[i][0])+\" \"+str(verts[i][1])+\" \"+str(verts[i][2])+\"\\n\")\n\n\tfor i, line in enumerate(triFile):\n\t\twriteFile.write(line)\n\ndef predictHead(head_mean, head_U, Whf, input_face, face_U, face_mean):\n\t# Equation (6) of the paper\n\tstep1 = input_face - face_mean\n\tstep2 = np.matmul(np.transpose(face_U), step1)\n\tstep3 = np.matmul(Whf, step2)\n\tstep4 = np.matmul(head_U, step3)\n\thead = head_mean + step4\n\treturn head\n\nprint ('Generating Random BFM...')\nface_model_dict = loadmat('../Regression Matrix Calculation/01_MorphableModel.mat')\nface_U = face_model_dict['shapePC']\nface_mean = face_model_dict['shapeMU']\n\nlow = 0\nhigh = np.shape(face_U)[0]\nsize = 1\nrand_row = [low + int(random.random() * (high - low)) for _ in range(size)][0] \n\nshape_parameters = np.reshape(face_U[rand_row][:], (np.shape(face_U)[1],1))\nrand_bfm_tensor = face_mean + np.matmul(face_U,shape_parameters)\n\nrand_bfm_verts = tensorToVertices(rand_bfm_tensor)\n\n\nbfm_file = open('Input_Face.obj','w')\nbfm_tri = open('bfm_tri.txt','r')\nwrite_obj(rand_bfm_verts, bfm_file, bfm_tri)\nbfm_file.close()\nbfm_tri.close()\nprint ('Random BFM generated and saved as \"Input_Face.obj\" file\\n')\n\nprint(\"Predicting Head Shape...\")\n\nhead_model_dict = loadmat('../Regression Matrix Calculation/LYHM_male.mat')\nhead_U = head_model_dict['shp'][0][0][0]\nhead_mean = np.transpose(head_model_dict['shp'][0][0][2])\n\nregression_matrix_dict = loadmat('../Regression Matrix Calculation/Regression_Matrix.mat')\nWhf = regression_matrix_dict['Whf']\n\ninput_face_tensor = rand_bfm_tensor\npred_head_tensor = predictHead(head_mean, head_U, Whf, input_face_tensor, face_U, face_mean) \n\npred_head_verts = tensorToVertices(pred_head_tensor)\n\nhead_file = open('Output_Head.obj','w')\nhead_tri = open('head_tri.txt','r')\nwrite_obj(pred_head_verts, head_file, head_tri)\nhead_file.close()\nhead_tri.close()\n\nprint('Head Shape Prediction Completed and saved as \"Output_Head.obj\" file')\n" ]
[ [ "scipy.io.loadmat", "numpy.matmul", "numpy.shape", "numpy.transpose", "numpy.array" ] ]
ChoiIseungil/korquad-open-cs492i
[ "ddb27f64479acbe8bd98f91744bf6105856297f1" ]
[ "open_squad.py" ]
[ "\"\"\"\nKorQuAD open 형 데이터 processor\n\n본 스크립트는 다음의 파일을 바탕으로 작성 됨\nhttps://github.com/huggingface/transformers/blob/master/src/transformers/data/processors/squad.py\n\n\"\"\"\nfrom soynlp.normalizer import repeat_normalize\nimport re\nimport gensim\nimport json\nimport logging\nimport os\nimport sys\nfrom functools import partial\nfrom multiprocessing import Pool, cpu_count\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom transformers.file_utils import is_tf_available, is_torch_available\nfrom transformers.tokenization_bert import whitespace_tokenize\nfrom transformers.data.processors.utils import DataProcessor\nfrom transformers import ElectraTokenizer\n\nif is_torch_available():\n import torch\n from torch.utils.data import TensorDataset\n\nif is_tf_available():\n import tensorflow as tf\n\nlogger = logging.getLogger(__name__)\nhandler = logging.StreamHandler(sys.stdout)\nlogger.addHandler(handler)\n\nmodel = gensim.models.Word2Vec.load('ko.bin')\n\ndef soft_max(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)\n\ndef clear(str):\n return str.replace(\" \",\"\").replace(\"#\",\"\")\n\nsub1 = re.compile('[^ .?!/@$%~|0-9|\\x41-\\x7A|ㄱ-ㅣ가-힣]+')\nsub2 = re.compile('[\\s]+')\nsub3 = re.compile('[\\.]+')\n\ntokenizer_distance = ElectraTokenizer.from_pretrained(\n \"monologg/koelectra-base-v2-finetuned-korquad\",\n do_lower_case=None,\n cache_dir=None,\n)\n\ndef _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start: (new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)\n\n\ndef _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\ndef _new_check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n # if len(doc_spans) == 1:\n # return True\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span[\"start\"] + doc_span[\"length\"] - 1\n if position < doc_span[\"start\"]:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span[\"start\"]\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span[\"length\"]\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\ndef _is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n\ndef squad_convert_example_to_features(example, max_seq_length, doc_stride, max_query_length, is_training):\n features = []\n if is_training and not example.is_impossible:\n # Get start and end position\n start_position = example.start_position\n end_position = example.end_position\n\n # If the answer cannot be found in the text, then skip this example.\n actual_text = \" \".join(example.doc_tokens[start_position: (end_position + 1)])\n cleaned_answer_text = \" \".join(whitespace_tokenize(example.answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n # logger.warning(\"Could not find answer: '%s' vs. '%s'\", actual_text, cleaned_answer_text)\n return []\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text\n )\n\n spans = []\n\n truncated_query = tokenizer.encode(example.question_text, add_special_tokens=False, max_length=max_query_length)\n sequence_added_tokens = (\n tokenizer.max_len - tokenizer.max_len_single_sentence + 1\n if \"roberta\" in str(type(tokenizer))\n else tokenizer.max_len - tokenizer.max_len_single_sentence\n )\n sequence_pair_added_tokens = tokenizer.max_len - tokenizer.max_len_sentences_pair\n\n span_doc_tokens = all_doc_tokens\n while len(spans) * doc_stride < len(all_doc_tokens):\n\n encoded_dict = tokenizer.encode_plus(\n truncated_query if tokenizer.padding_side == \"right\" else span_doc_tokens,\n span_doc_tokens if tokenizer.padding_side == \"right\" else truncated_query,\n max_length=max_seq_length,\n return_overflowing_tokens=True,\n pad_to_max_length=True,\n stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens,\n truncation_strategy=\"only_second\" if tokenizer.padding_side == \"right\" else \"only_first\",\n )\n\n paragraph_len = min(\n len(all_doc_tokens) - len(spans) * doc_stride,\n max_seq_length - len(truncated_query) - sequence_pair_added_tokens,\n )\n\n if tokenizer.pad_token_id in encoded_dict[\"input_ids\"]:\n non_padded_ids = encoded_dict[\"input_ids\"][: encoded_dict[\"input_ids\"].index(tokenizer.pad_token_id)]\n else:\n non_padded_ids = encoded_dict[\"input_ids\"]\n\n tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)\n\n token_to_orig_map = {}\n for i in range(paragraph_len):\n index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == \"right\" else i\n token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i]\n\n encoded_dict[\"paragraph_len\"] = paragraph_len\n encoded_dict[\"tokens\"] = tokens\n encoded_dict[\"token_to_orig_map\"] = token_to_orig_map\n encoded_dict[\"truncated_query_with_special_tokens_length\"] = len(truncated_query) + sequence_added_tokens\n encoded_dict[\"token_is_max_context\"] = {}\n encoded_dict[\"start\"] = len(spans) * doc_stride\n encoded_dict[\"length\"] = paragraph_len\n\n spans.append(encoded_dict)\n\n if \"overflowing_tokens\" not in encoded_dict or (\n \"overflowing_tokens\" in encoded_dict and len(encoded_dict[\"overflowing_tokens\"])==0\n ):\n break\n span_doc_tokens = encoded_dict[\"overflowing_tokens\"]\n\n for doc_span_index in range(len(spans)):\n for j in range(spans[doc_span_index][\"paragraph_len\"]):\n is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j)\n index = (\n j\n if tokenizer.padding_side == \"left\"\n else spans[doc_span_index][\"truncated_query_with_special_tokens_length\"] + j\n )\n spans[doc_span_index][\"token_is_max_context\"][index] = is_max_context\n\n for span in spans:\n # Identify the position of the CLS token\n cls_index = span[\"input_ids\"].index(tokenizer.cls_token_id)\n\n # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)\n # Original TF implem also keep the classification token (set to 0) (not sure why...)\n p_mask = np.array(span[\"token_type_ids\"])\n\n p_mask = np.minimum(p_mask, 1)\n\n if tokenizer.padding_side == \"right\":\n # Limit positive values to one\n p_mask = 1 - p_mask\n\n p_mask[np.where(np.array(span[\"input_ids\"]) == tokenizer.sep_token_id)[0]] = 1\n\n # Set the CLS index to '0'\n p_mask[cls_index] = 0\n\n span_is_impossible = example.is_impossible\n start_position = 0\n end_position = 0\n if is_training and not span_is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = span[\"start\"]\n doc_end = span[\"start\"] + span[\"length\"] - 1\n out_of_span = False\n\n if not (tok_start_position >= doc_start and tok_end_position <= doc_end):\n out_of_span = True\n\n if out_of_span:\n start_position = cls_index\n end_position = cls_index\n span_is_impossible = True\n else:\n if tokenizer.padding_side == \"left\":\n doc_offset = 0\n else:\n doc_offset = len(truncated_query) + sequence_added_tokens\n\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n features.append(\n SquadFeatures(\n span[\"input_ids\"],\n span[\"attention_mask\"],\n span[\"token_type_ids\"],\n cls_index,\n p_mask.tolist(),\n example_index=0,\n # Can not set unique_id and example_index here. They will be set after multiple processing.\n unique_id=0,\n paragraph_len=span[\"paragraph_len\"],\n token_is_max_context=span[\"token_is_max_context\"],\n tokens=span[\"tokens\"],\n token_to_orig_map=span[\"token_to_orig_map\"],\n start_position=start_position,\n end_position=end_position,\n )\n )\n return features\n\n\ndef squad_convert_example_to_features_init(tokenizer_for_convert):\n global tokenizer\n tokenizer = tokenizer_for_convert\n\n\ndef squad_convert_example_to_features_sp(example, max_seq_length, doc_stride, max_query_length, is_training,\n tokenizer_for_convert):\n global tokenizer\n tokenizer = tokenizer_for_convert\n return squad_convert_example_to_features(example, max_seq_length, doc_stride, max_query_length, is_training)\n\n\ndef squad_convert_examples_to_features(\n examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, return_dataset=False, threads=1\n):\n \"\"\"\n Converts a list of examples into a list of features that can be directly given as input to a model.\n It is model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.\n\n Args:\n examples: list of :class:`~transformers.data.processors.squad.SquadExample`\n tokenizer: an instance of a child of :class:`~transformers.PreTrainedTokenizer`\n max_seq_length: The maximum sequence length of the inputs.\n doc_stride: The stride used when the context is too large and is split across several features.\n max_query_length: The maximum length of the query.\n is_training: whether to create features for model evaluation or model training.\n return_dataset: Default False. Either 'pt' or 'tf'.\n if 'pt': returns a torch.data.TensorDataset,\n if 'tf': returns a tf.data.Dataset\n threads: multiple processing threadsa-smi\n\n\n Returns:\n list of :class:`~transformers.data.processors.squad.SquadFeatures`\n\n Example::\n\n processor = SquadV2Processor()\n examples = processor.get_dev_examples(data_dir)\n\n features = squad_convert_examples_to_features(\n examples=examples,\n tokenizer=tokenizer,\n max_seq_length=args.max_seq_length,\n doc_stride=args.doc_stride,\n max_query_length=args.max_query_length,\n is_training=not evaluate,\n )\n \"\"\"\n\n # Defining helper methods\n features = []\n threads = min(threads, cpu_count())\n if threads == 1:\n print(\"squad_convert_examples_to_features\")\n features = []\n for eg in tqdm(examples, total=len(examples), desc=\"convert squad examples to features\"):\n feat = squad_convert_example_to_features_sp(\n eg,\n max_seq_length=max_seq_length,\n doc_stride=doc_stride,\n max_query_length=max_query_length,\n is_training=is_training,\n tokenizer_for_convert=tokenizer)\n features.append(feat)\n\n else:\n print(\"squad_convert_examples_to_features w/ {} threads\".format(threads))\n with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p:\n annotate_ = partial(\n squad_convert_example_to_features,\n max_seq_length=max_seq_length,\n doc_stride=doc_stride,\n max_query_length=max_query_length,\n is_training=is_training,\n )\n features = list(\n tqdm(\n p.imap(annotate_, examples, chunksize=32),\n total=len(examples),\n desc=\"convert squad examples to features\",\n )\n )\n\n new_features = []\n unique_id = 1000000000\n example_index = 0\n for example_features in tqdm(features, total=len(features), desc=\"add example index and unique id\"):\n if not example_features:\n continue\n for example_feature in example_features:\n example_feature.example_index = example_index\n example_feature.unique_id = unique_id\n new_features.append(example_feature)\n unique_id += 1\n example_index += 1\n features = new_features\n del new_features\n if return_dataset == \"pt\":\n if not is_torch_available():\n raise RuntimeError(\"PyTorch must be installed to return a PyTorch dataset.\")\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)\n all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)\n\n if not is_training:\n all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)\n dataset = TensorDataset(\n all_input_ids, all_attention_masks, all_token_type_ids, all_example_index, all_cls_index, all_p_mask\n )\n else:\n all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)\n all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)\n dataset = TensorDataset(\n all_input_ids,\n all_attention_masks,\n all_token_type_ids,\n all_start_positions,\n all_end_positions,\n all_cls_index,\n all_p_mask,\n )\n\n return features, dataset\n\n return features\n\n\nclass SquadProcessor(DataProcessor):\n \"\"\"\n Processor for the SQuAD data set.\n Overriden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and version 2.0 of SQuAD, respectively.\n \"\"\"\n\n train_file = None\n dev_file = None\n\n def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False):\n if not evaluate:\n answer = tensor_dict[\"answers\"][\"text\"][0].numpy().decode(\"utf-8\")\n answer_start = tensor_dict[\"answers\"][\"answer_start\"][0].numpy()\n answers = []\n else:\n answers = [\n {\"answer_start\": start.numpy(), \"text\": text.numpy().decode(\"utf-8\")}\n for start, text in zip(tensor_dict[\"answers\"][\"answer_start\"], tensor_dict[\"answers\"][\"text\"])\n ]\n\n answer = None\n answer_start = None\n\n return SquadExample(\n qas_id=tensor_dict[\"id\"].numpy().decode(\"utf-8\"),\n question_text=tensor_dict[\"question\"].numpy().decode(\"utf-8\"),\n context_text=tensor_dict[\"context\"].numpy().decode(\"utf-8\"),\n answer_text=answer,\n start_position_character=answer_start,\n title=tensor_dict[\"title\"].numpy().decode(\"utf-8\"),\n answers=answers,\n )\n\n def get_examples_from_dataset(self, dataset, evaluate=False):\n \"\"\"\n Creates a list of :class:`~transformers.data.processors.squad.SquadExample` using a TFDS dataset.\n\n Args:\n dataset: The tfds dataset loaded from `tensorflow_datasets.load(\"squad\")`\n evaluate: boolean specifying if in evaluation mode or in training mode\n\n Returns:\n List of SquadExample\n\n Examples::\n\n import tensorflow_datasets as tfds\n dataset = tfds.load(\"squad\")\n\n training_examples = get_examples_from_dataset(dataset, evaluate=False)\n evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)\n \"\"\"\n\n if evaluate:\n dataset = dataset[\"validation\"]\n else:\n dataset = dataset[\"train\"]\n\n examples = []\n for tensor_dict in tqdm(dataset):\n examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate))\n\n return examples\n\n def get_train_examples(self, data_dir, filename=None):\n \"\"\"\n Returns the training examples from the data directory.\n\n Args:\n data_dir: Directory containing the data files used for training and evaluating.\n filename: None by default, specify this if the training file has a different name than the original one\n which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.\n\n \"\"\"\n if data_dir is None:\n data_dir = \"\"\n\n if self.train_file is None:\n raise ValueError(\"SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor\")\n\n with open(\n os.path.join(data_dir, self.train_file if filename is None else filename), \"r\", encoding=\"utf-8\"\n ) as reader:\n input_data = json.load(reader)[\"data\"]\n return self._create_examples(input_data, \"train\")\n\n def get_eval_examples(self, data_dir, filename=None):\n \"\"\"\n Returns the evaluation example from the data directory.\n\n Args:\n data_dir: Directory containing the data files used for training and evaluating.\n filename: None by default, specify this if the evaluation file has a different name than the original one\n which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.\n \"\"\"\n if data_dir is None:\n data_dir = \"\"\n\n if self.dev_file is None:\n raise ValueError(\"SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor\")\n\n with open(\n os.path.join(data_dir, self.dev_file if filename is None else filename), \"r\", encoding=\"utf-8\"\n ) as reader:\n input_data = json.load(reader)[\"data\"]\n return self._create_examples(input_data, \"dev\")\n\n def _create_examples(self, input_data, set_type):\n is_training = set_type == \"train\"\n examples = []\n\n has_answer_cnt, no_answer_cnt = 0, 0\n for entry in tqdm(input_data[:]):\n temp_examples = []\n distances = []\n qa = entry['qa']\n question_text = qa[\"question\"]\n answer_text = qa['answer']\n\n question_pos_list = tokenizer_distance.tokenize(question_text)\n question_pos_list = list(map(clear, question_pos_list))\n\n if question_text is None or answer_text is None:\n continue\n\n per_qa_paragraph_cnt = 0\n per_qa_unans_paragraph_cnt = 0\n for pi, paragraph in enumerate(entry[\"paragraphs\"]):\n title = paragraph[\"title\"]\n context_text = str(paragraph[\"contents\"])\n context_pos_list = tokenizer_distance.tokenize(context_text)\n context_pos_list = list(map(clear,context_pos_list))\n\n if context_text is None:\n continue\n qas_id = \"{}[SEP]{}[SEP]{}\".format(question_text, answer_text, pi)\n start_position_character = None\n answers = []\n\n # preprocessing\n if is_training:\n # #todo: 일부 Brace로 감싸진 단어 제거\n context_text = re.sub(\"[\\{\\[\\【\\<].*?[\\}\\]\\】\\>]\", \"\", context_text)\n # #todo: preprocessing: 한글, 영어, 띄어쓰기, 일부 특수 문자 등을 제외하고 모두 제거\n context_text = sub1.sub('', context_text) \n context_text = sub2.sub(' ', context_text)\n context_text = sub3.sub('.', context_text)\n # #todo: 반복되는 글자 제거\n context_text = repeat_normalize(context_text, num_repeats=2)\n\n if answer_text not in context_text:\n is_impossible = True\n else:\n is_impossible = False\n \n if not is_impossible:\n if is_training:\n start_position_character = context_text.index(answer_text) # answer[\"answer_start\"]\n else:\n answers = [{\"text\": answer_text,\n \"answer_start\": context_text.index(answer_text)}]\n \n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n context_text=context_text,\n answer_text=answer_text,\n start_position_character=start_position_character,\n title=title,\n is_impossible=is_impossible,\n answers=answers,\n )\n if is_impossible:\n no_answer_cnt += 1\n per_qa_unans_paragraph_cnt += 1\n else:\n has_answer_cnt += 1\n\n if is_impossible and per_qa_unans_paragraph_cnt > 3:\n continue\n\n # todo: How to select training samples considering a memory limit.\n\n # # vanilla\n # per_qa_paragraph_cnt += 1\n # if is_training and per_qa_paragraph_cnt > 3:\n # break\n \n # examples.append(example)\n # # vanilla\n # sampling strategy\n if is_impossible:\n examples.append(example)\n else:\n\n # if not is_impossible:\n \n total_distance = 0 \n\n for question_pos in question_pos_list:\n def get_distance(x):\n try: return model.similarity(x,question_pos) \n except KeyError: return 0\n \n temp_list = np.array(list(map(get_distance,context_pos_list)))\n temp_list[temp_list!=0]\n\n if len(temp_list) > 0:\n total_distance = temp_list.mean()\n else:\n total_distance = 0\n\n distances.append(total_distance)\n temp_examples.append(example)\n\n sorted_index = sorted(range(len(distances)), key=distances.__getitem__)\n if is_training:\n number_to_select = min(len(temp_examples),10)\n for i in range(number_to_select):\n examples.append(temp_examples[sorted_index[-1 * (i+1)]])\n else: examples+=temp_examples\n # sampling strategy\n\n print(\"[{}] Has Answer({}) / No Answer({})\".format(set_type, has_answer_cnt, no_answer_cnt))\n return examples\n\n\nclass SquadV1Processor(SquadProcessor):\n train_file = \"train-v1.1.json\"\n dev_file = \"dev-v1.1.json\"\n\n\nclass SquadV2Processor(SquadProcessor):\n train_file = \"train_data/korquad_open_train.json\"\n dev_file = \"train_data/korquad_open_dev.json\"\n test_file = \"test_data/korquad_open_test.json\"\n\n\nclass SquadExample(object):\n \"\"\"\n A single training/test example for the Squad dataset, as loaded from disk.\n\n Args:\n qas_id: The example's unique identifier\n question_text: The question string\n context_text: The context string\n answer_text: The answer string\n start_position_character: The character position of the start of the answer\n title: The title of the example\n answers: None by default, this is used during evaluation. Holds answers as well as their start positions.\n is_impossible: False by default, set to True if the example has no possible answer.\n \"\"\"\n\n def __init__(\n self,\n qas_id,\n question_text,\n context_text,\n answer_text,\n start_position_character,\n title,\n answers=[],\n is_impossible=False,\n ):\n self.qas_id = qas_id\n self.question_text = question_text\n self.context_text = context_text\n self.answer_text = answer_text\n self.title = title\n self.is_impossible = is_impossible\n self.answers = answers\n\n self.start_position, self.end_position = 0, 0\n\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n\n # Split on whitespace so that different tokens may be attributed to their original position.\n for c in self.context_text:\n if _is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n self.doc_tokens = doc_tokens\n self.char_to_word_offset = char_to_word_offset\n\n # Start end end positions only has a value during evaluation.\n if start_position_character is not None and not is_impossible:\n self.start_position = char_to_word_offset[start_position_character]\n self.end_position = char_to_word_offset[\n min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)\n ]\n\n\nclass SquadFeatures(object):\n \"\"\"\n Single squad example features to be fed to a model.\n Those features are model-specific and can be crafted from :class:`~transformers.data.processors.squad.SquadExample`\n using the :method:`~transformers.data.processors.squad.squad_convert_examples_to_features` method.\n\n Args:\n input_ids: Indices of input sequence tokens in the vocabulary.\n attention_mask: Mask to avoid performing attention on padding token indices.\n token_type_ids: Segment token indices to indicate first and second portions of the inputs.\n cls_index: the index of the CLS token.\n p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.\n Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer\n example_index: the index of the example\n unique_id: The unique Feature identifier\n paragraph_len: The length of the context\n token_is_max_context: List of booleans identifying which tokens have their maximum context in this feature object.\n If a token does not have their maximum context in this feature object, it means that another feature object\n has more information related to that token and should be prioritized over this feature for that token.\n tokens: list of tokens corresponding to the input ids\n token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.\n start_position: start of the answer token index\n end_position: end of the answer token index\n \"\"\"\n\n def __init__(\n self,\n input_ids,\n attention_mask,\n token_type_ids,\n cls_index,\n p_mask,\n example_index,\n unique_id,\n paragraph_len,\n token_is_max_context,\n tokens,\n token_to_orig_map,\n start_position,\n end_position,\n ):\n self.input_ids = input_ids\n self.attention_mask = attention_mask\n self.token_type_ids = token_type_ids\n self.cls_index = cls_index\n self.p_mask = p_mask\n\n self.example_index = example_index\n self.unique_id = unique_id\n self.paragraph_len = paragraph_len\n self.token_is_max_context = token_is_max_context\n self.tokens = tokens\n self.token_to_orig_map = token_to_orig_map\n\n self.start_position = start_position\n self.end_position = end_position\n\n\nclass SquadResult(object):\n \"\"\"\n Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.\n\n Args:\n unique_id: The unique identifier corresponding to that example.\n start_logits: The logits corresponding to the start of the answer\n end_logits: The logits corresponding to the end of the answer\n \"\"\"\n\n def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):\n self.start_logits = start_logits\n self.end_logits = end_logits\n self.unique_id = unique_id\n\n if start_top_index:\n self.start_top_index = start_top_index\n self.end_top_index = end_top_index\n self.cls_logits = cls_logits\n" ]
[ [ "numpy.minimum", "torch.utils.data.TensorDataset", "torch.tensor", "numpy.max", "numpy.array" ] ]
lmmentel/chemtools
[ "3f8f870da52b5d8a76a4be4f99e18cd865adaa7c" ]
[ "tests/test_basisset/test_basisset_json_io.py" ]
[ "\nimport numpy as np\nfrom chemtools.basisset import BasisSet\n\n\nVTZGAUSSIAN = \"\"\"\n****\nBe 0\nS 11 1.00\n 6863.0000000000 0.00023600\n 1030.0000000000 0.00182600\n 234.7000000000 0.00945200\n 66.5600000000 0.03795700\n 21.6900000000 0.11996500\n 7.7340000000 0.28216200\n 2.9160000000 0.42740400\n 1.1300000000 0.26627800\n 0.2577000000 0.01819300\n 0.1101000000 -0.00727500\n 0.0440900000 0.00190300\nS 11 1.00\n 6863.0000000000 -0.00004300\n 1030.0000000000 -0.00033300\n 234.7000000000 -0.00173600\n 66.5600000000 -0.00701200\n 21.6900000000 -0.02312600\n 7.7340000000 -0.05813800\n 2.9160000000 -0.11455600\n 1.1300000000 -0.13590800\n 0.2577000000 0.22802600\n 0.1101000000 0.57744100\n 0.0440900000 0.31787300\nS 1 1.00\n 0.2577000000 1.00000000\nS 1 1.00\n 0.0440900000 1.00000000\nP 5 1.00\n 7.4360000000 0.01073600\n 1.5770000000 0.06285400\n 0.4352000000 0.24818000\n 0.1438000000 0.52369900\n 0.0499400000 0.35342500\nP 1 1.00\n 0.1438000000 1.00000000\nP 1 1.00\n 0.0499400000 1.00000000\nD 1 1.00\n 0.3493000000 1.00000000\nD 1 1.00\n 0.1724000000 1.00000000\nF 1 1.00\n 0.3423000000 1.00000000\n****\n\"\"\"\n\n\ndef ordered(obj):\n if isinstance(obj, dict):\n return sorted((k, ordered(v)) for k, v in obj.items())\n if isinstance(obj, list):\n return sorted(ordered(x) for x in obj)\n else:\n return obj\n\n\ndef test_to_json():\n\n bs = BasisSet.from_str(VTZGAUSSIAN, fmt='gaussian', name='VTZ')\n\n bsdumped = bs.to_json(indent=4)\n bsloaded = BasisSet.from_json(bsdumped)\n\n assert bs.name == bsloaded.name, 'inconsistent name'\n assert bs.element == bsloaded.element, 'inconsistent element'\n\n for shell, funs in bs.functions.items():\n\n assert shell in bsloaded.functions.keys(), 'missing shell {}'.format(shell)\n\n assert np.allclose(funs['e'], bsloaded.functions[shell]['e'])\n\n for f1, f2 in zip(funs['cf'], bsloaded.functions[shell]['cf']):\n assert np.allclose(f1['idx'], f2['idx']), 'inconsistent idx'\n assert np.allclose(f1['cc'], f2['cc']), 'inconsistent cc'\n" ]
[ [ "numpy.allclose" ] ]
mostofashakib/TechHundred
[ "e6b0205552e0f524ce2e2585cdf2032a32a2b4b3" ]
[ "vision.py" ]
[ "import face_recognition\nimport cv2\nimport numpy as np\nfrom PIL import Image, ImageDraw\n\nvideo_capture = cv2.VideoCapture(0)\n\nadib_image = face_recognition.load_image_file(\"adib.jpg\")\nadib_face_encoding = face_recognition.face_encodings(adib_image)[0]\n\njoe_image = face_recognition.load_image_file(\"joe.jpg\")\njoe_face_encoding = face_recognition.face_encodings(joe_image)[0]\n\npranjal_image = face_recognition.load_image_file(\"pranjal.jpg\")\npranjal_face_encoding = face_recognition.face_encodings(pranjal_image)[0]\n\nknown_face_encodings = [\n adib_face_encoding,\n joe_face_encoding,\n pranjal_face_encoding\n]\nknown_face_names = [\n \"Adib Shakib\",\n \"Joseph Martinez\",\n \"Pranjal Ghimire\"\n]\n\nface_locations = []\nface_encodings = []\nface_names = []\nprocess_this_frame = True\n\nwhile True:\n ret, frame = video_capture.read()\n\n small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n\n rgb_small_frame = small_frame[:, :, ::-1]\n\n if process_this_frame:\n face_locations = face_recognition.face_locations(rgb_small_frame)\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n\n face_names = []\n for face_encoding in face_encodings:\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\n name = \"Unknown\"\n\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n name = known_face_names[best_match_index]\n\n face_names.append(name)\n\n process_this_frame = not process_this_frame\n\n for (top, right, bottom, left), name in zip(face_locations, face_names):\n top *= 4\n right *= 4\n bottom *= 4\n left *= 4\n\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n\n cv2.imshow('Video', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nvideo_capture.release()\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.argmin" ] ]
TuomoKareoja/product-type-sales-predictions
[ "1002589fd2b0aec86f93a65d6d5c22f9452bd94a" ]
[ "notebooks/reports/lm_tuning.py" ]
[ "#%%\n\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport sklearn\nfrom dotenv import find_dotenv, load_dotenv\nfrom IPython.core.interactiveshell import InteractiveShell\nfrom sklearn import metrics, preprocessing\nfrom sklearn.feature_selection import RFECV\nfrom sklearn.linear_model import ElasticNet, ElasticNetCV, LinearRegression\nfrom sklearn.model_selection import KFold, cross_val_predict, cross_val_score\nfrom sklearn.pipeline import make_pipeline\n\nfrom src.visualization.visualize_cv import plot_cv_predictions\n\n# Setting styles\n# %matplotlib inline\nsns.set(style=\"ticks\", color_codes=True)\nInteractiveShell.ast_node_interactivity = \"all\"\n\nseed = 42\n\n#%% # Loading the data\nX_train_path = os.path.join(\"data\", \"processed\", \"X_train.csv\")\ny_train_path = os.path.join(\"data\", \"processed\", \"y_train.csv\")\nX = pd.read_csv(X_train_path)\ny = pd.read_csv(y_train_path, index_col=False)\ny = y.iloc[:, 0]\n\n#%%\n\n# feature selection affected linear model greatly so lets automate this process\n# by using instead of the normal LinearRegression library lets use ElasticNet\n# that combines L1 and L2 regularization. This works as a kind of automatic feature\n# selection\n\n# lets start by optimizing the parameters in crossvalidation. There is a separate\n# function for this package that does this more efficiently\n\nglmnet = ElasticNetCV(cv=70, random_state=seed)\nglmnet.fit(X, y)\nglmnet_best_params = glmnet.get_params()\n\n#%%\n\n# Defining the method for crossvalidation. We crossvalidate each individual row\ncrossvalidation = KFold(n_splits=70, shuffle=True, random_state=seed)\n\n# Defining list of scoring methods\nscoring = [\"neg_mean_squared_error\", \"neg_mean_absolute_error\"]\n\n#%%\n\nglmnet_model = ElasticNet()\nglmnet_best_params_matching = {\n key: glmnet_best_params[key]\n for key in glmnet_model.get_params().keys()\n if key in glmnet_best_params\n}\n\n# manual tuning so that things work\nglmnet_best_params_matching[\"precompute\"] = False\n\npipelines = []\n\npipelines.append((\"GLMNET\", make_pipeline(ElasticNet(**glmnet_best_params_matching))))\n\n#%%\n\nplot_cv_predictions(\n pipelines=pipelines,\n X=X,\n y=y,\n crossvalidation=crossvalidation,\n file_suffix=\"optimized\",\n)\n\n# The model is performing quite poorly and the results are still not as good as with the\n# regular lm model with manual (uninformed) variable dropping.\n\n#%%\n\nglmnet_best_params_matching\n\n# We can see that the l1-l2 ratio is 0.5, so a perfect mixture.\n# To get better and more strict variable selection, lets try to force\n# a more strict feature selection by using full lasso regression (only l1 regularization)\n\n#%%\n\nglmnet_model = ElasticNet()\nglmnet_best_params_matching = {\n key: glmnet_best_params[key]\n for key in glmnet_model.get_params().keys()\n if key in glmnet_best_params\n}\n\n# manual tuning so that things work\nglmnet_best_params_matching[\"precompute\"] = False\n\n# forcing only l1 regularization\nglmnet_best_params_matching[\"l1_ratio\"] = 1\n\npipelines = []\n\npipelines.append((\"Lasso\", make_pipeline(ElasticNet(**glmnet_best_params_matching))))\n\n#%%\n\nplot_cv_predictions(\n pipelines=pipelines,\n X=X,\n y=y,\n crossvalidation=crossvalidation,\n file_suffix=\"optimized\",\n)\n\n# The model performs now even worse in the outsample even though the insample error is smaller\n\n#%%\n\n# Plotting the volume histogram to see if a transformation could help\nsns.distplot(y, bins=20)\n\n#%%\n# Taking the log of the target makes it much more normally distributed\n# this should lessen the effects of extreme values\n\nsns.distplot(np.log(y).clip(lower=0), bins=20)\n\n#%%\n\n# Lets transform y to log and continue with that\n# We need to apply lower 0 because there are some zero values and these would\n# be -inf if we don't do this\n\ny_log = np.log(y).clip(lower=0)\n\n#%%\n\n# lets try to do the same as before but this time with a transformed y-variable\n\n# lets start by optimizing the parameters in crossvalidation.\n\nglmnet = ElasticNetCV(cv=70, random_state=seed)\nglmnet.fit(X, y_log)\nglmnet_best_params = glmnet.get_params()\n\n#%%\n\n# Defining list of scoring methods\nscoring = [\"neg_mean_squared_error\", \"neg_mean_absolute_error\"]\n\nglmnet_model = ElasticNet()\nglmnet_best_params_matching = {\n key: glmnet_best_params[key]\n for key in glmnet_model.get_params().keys()\n if key in glmnet_best_params\n}\n\n# manual tuning so that things work\nglmnet_best_params_matching[\"precompute\"] = False\n\npipelines = []\n\npipelines.append((\"GLMNET\", make_pipeline(ElasticNet(**glmnet_best_params_matching))))\n\nplot_cv_predictions(\n pipelines=pipelines,\n X=X,\n y=y_log,\n crossvalidation=crossvalidation,\n file_suffix=\"optimized_log\",\n transformation=\"exp\",\n round_digits=2,\n)\n\nglmnet_best_params_matching\n\n#%%\n\nglmnet_model = ElasticNet()\nglmnet_best_params_matching = {\n key: glmnet_best_params[key]\n for key in glmnet_model.get_params().keys()\n if key in glmnet_best_params\n}\n\n# manual tuning so that things work\nglmnet_best_params_matching[\"precompute\"] = False\n\n# forcing only l1 regularization\nglmnet_best_params_matching[\"l1_ratio\"] = 1\n\npipelines = []\n\npipelines.append((\"Lasso\", make_pipeline(ElasticNet(**glmnet_best_params_matching))))\n\nplot_cv_predictions(\n pipelines=pipelines,\n X=X,\n y=y_log,\n crossvalidation=crossvalidation,\n file_suffix=\"optimized_log\",\n transformation=\"exp\",\n round_digits=2,\n)\n\nglmnet_best_params_matching\n\n\n#%%\n\n# Both models have one huge outlier in their predictions. Lets try to find out what\n# observation this is\n\npredicted_outsample = cross_val_predict(pipelines[0][1], X, y_log, cv=crossvalidation)\n\ntest = list(np.exp(y_log) - np.exp(predicted_outsample))\nX.iloc[test.index(min(test))]\ny.iloc[test.index(min(test))]\n\n# It is an accessory with lots of good reviews and excellent sales.\n# Seems like a legit observations. Good sales and even better reviews.\n\n#%%\n\n# Trying out a recursive feature elimination with crossvalidation\n# and running these variables trough the glmnet optimization and just normal linear model\n\nestimator = LinearRegression()\nscoring = \"neg_mean_absolute_error\"\nrfecv_selector = RFECV(estimator, step=1, cv=crossvalidation, scoring=scoring)\nrfecv_selector = rfecv_selector.fit(X, y)\n\nX.columns\nX.columns[rfecv_selector.support_]\nrfecv_selector.grid_scores_\nrfecv_selector.support_\n\nX_rfe = X.iloc[:, rfecv_selector.support_]\n\nglmnet = ElasticNetCV(cv=crossvalidation, random_state=seed)\nglmnet = glmnet.fit(X_rfe, y)\nglmnet_best_params = glmnet.get_params()\n\n# Defining list of scoring methods\nscoring = [\"neg_mean_squared_error\", \"neg_mean_absolute_error\"]\n\nglmnet_model = ElasticNet()\nglmnet_best_params_matching = {\n key: glmnet_best_params[key]\n for key in glmnet_model.get_params().keys()\n if key in glmnet_best_params\n}\n\n# manual tuning so that things work\nglmnet_best_params_matching[\"precompute\"] = False\n\npipelines = []\n\npipelines.append((\"GLMNET\", make_pipeline(ElasticNet(**glmnet_best_params_matching))))\n\npipelines.append((\"LM\", make_pipeline(LinearRegression())))\n\nplot_cv_predictions(\n pipelines=pipelines,\n X=X_rfe,\n y=y,\n crossvalidation=crossvalidation,\n file_suffix=\"optimized_rfe\",\n)\n\nglmnet_best_params_matching\n\n# Now the model performs better than any of our manual efforts for feature selection\n# with linear models. We can also see that the glmnet performs bit better than the simple\n# linear model even after we have done aggressive feature selection.\n\n#%%\n\n# lets build and tune a final linear model. We use the rfe feature selection with\n# crossvalidated glmnet with a the l1 ratio decided by this process\n\nestimator = LinearRegression()\nscoring = \"neg_mean_absolute_error\"\nrfecv_selector = RFECV(estimator, step=1, cv=crossvalidation, scoring=scoring)\nrfecv_selector = rfecv_selector.fit(X, y)\n\nX_rfe = X.iloc[:, rfecv_selector.support_]\n\nglmnet = ElasticNetCV(cv=crossvalidation, random_state=seed)\nglmnet = glmnet.fit(X_rfe, y)\nglmnet_best_params = glmnet.get_params()\n\n# Defining list of scoring methods\nscoring = [\"neg_mean_squared_error\", \"neg_mean_absolute_error\"]\n\nglmnet_model = ElasticNet(random_state=seed)\nglmnet_best_params_matching = {\n key: glmnet_best_params[key]\n for key in glmnet_model.get_params().keys()\n if key in glmnet_best_params\n}\n\n# manual tuning so that things work\nglmnet_best_params_matching[\"precompute\"] = False\nglmnet_best_params_matching\n\npipelines = []\n\npipelines.append((\"GLMNET\", make_pipeline(ElasticNet(**glmnet_best_params_matching))))\npipelines.append((\"LM\", make_pipeline(LinearRegression())))\n\nplot_cv_predictions(\n pipelines=pipelines,\n X=X_rfe,\n y=y,\n crossvalidation=crossvalidation,\n file_suffix=\"best_rfe\",\n)\n\n# This makes the model perform clearly better and interestingly glmnet still\n# performs better than linear regression even though we already did aggressive\n# feature selection so the regularization should help less\n\n#%%\n\n# Checking which columns were used\n\nX_rfe.columns\n\n# [\n# \"rew_4star\",\n# \"rew_3star\",\n# \"rew_2star\",\n# \"negative_service_review\",\n# \"recommend_product\",\n# \"width\",\n# \"profit_margin\",\n# \"product_type_Accessories\",\n# \"product_type_GameConsole\",\n# \"product_type_Laptop\",\n# \"product_type_Netbook\",\n# \"product_type_PC\",\n# \"product_type_Printer\",\n# \"product_type_PrinterSupplies\",\n# \"product_type_Smartphone\",\n# \"product_type_Software\",\n# \"product_type_Tablet\",\n# ]\n\n# Interestingly all product types were retained\n\n#%%\n\n# Trying how the model handels when we limit the predictions to only interesting product types\n\nis_pc = X.product_type_PC == 1\nis_laptop = X.product_type_Laptop == 1\nis_netbook = X.product_type_Netbook == 1\nis_smartphone = X.product_type_Smartphone == 1\n\nlimited_cat = ((is_pc) | (is_laptop) | (is_netbook) | (is_smartphone)).to_list()\n\nplot_cv_predictions(\n pipelines=pipelines,\n X=X_rfe,\n y=y,\n crossvalidation=crossvalidation,\n file_suffix=\"best_rfe_lim_pred\",\n limited_pred_mask=limited_cat,\n)\n\n# Looks quite bad as the some of the predictions are actually negative\n\n#%%\n\n# Testing how things change if we use only the relevant product types also for training\n# We don't bother to try to do hyperparameter optimization for this dataset\n# as it is just too small to do this reliably\n\n\n# Dropping rows that are not interesting for our predictions\nis_pc = X.product_type_PC == 1\nis_laptop = X.product_type_Laptop == 1\nis_netbook = X.product_type_Netbook == 1\nis_smartphone = X.product_type_Smartphone == 1\n\ny_lim_pred = y[(is_pc) | (is_laptop) | (is_netbook) | (is_smartphone)]\nX_lim_pred = X_rfe[(is_pc) | (is_laptop) | (is_netbook) | (is_smartphone)]\n\n# We need to define our crossvalidation again to match the more limited number\n# of observations\ncrossvalidation = KFold(n_splits=13, shuffle=True, random_state=seed)\n\npath_figures = os.path.join(\"reports\", \"figures\")\n\nfor name, pipeline in pipelines:\n predicted_outsample = cross_val_predict(\n pipeline, X_lim_pred, y_lim_pred, cv=crossvalidation\n )\n predicted_insample = pipeline.fit(X_lim_pred, y_lim_pred).predict(X_lim_pred)\n fig, ax = plt.subplots()\n ax.scatter(y_lim_pred, predicted_outsample, c=\"r\", marker=\"+\", label=\"outsample\")\n ax.scatter(y_lim_pred, predicted_insample, c=\"b\", marker=\"x\", label=\"insample\")\n ax.plot(\n [y_lim_pred.min(), y_lim_pred.max()],\n [y_lim_pred.min(), y_lim_pred.max()],\n \"k--\",\n lw=3,\n )\n model_mean_absolute_error_outsample = round(\n metrics.mean_absolute_error(y_lim_pred, predicted_outsample), 0\n )\n model_mean_absolute_error_insample = round(\n metrics.mean_absolute_error(y_lim_pred, predicted_insample), 0\n )\n plt.text(\n 0.05,\n 0.9,\n \"Mean absolute error (outsample): \" + str(model_mean_absolute_error_outsample),\n transform=ax.transAxes,\n )\n plt.text(\n 0.05,\n 0.8,\n \"Mean absolute error (insample): \" + str(model_mean_absolute_error_insample),\n transform=ax.transAxes,\n )\n ax.set_xlabel(\"Actual Volume\")\n ax.set_ylabel(\"Predicted Volume\")\n plt.legend(loc=4)\n fig.suptitle(name + \": \" + \"Predicted vs Actual\")\n plot_title = (\n \"predictions_\" + \"best_ref_lim_pred_train\" + \"_\" + name.lower() + \".png\"\n )\n plt.savefig(os.path.join(path_figures, plot_title))\n plt.show()\n plt.clf()\n\n\n# models are much worse than they were with using bigger dataset\n\n#%%\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.log", "pandas.read_csv", "sklearn.model_selection.cross_val_predict", "sklearn.linear_model.ElasticNet", "sklearn.metrics.mean_absolute_error", "sklearn.linear_model.ElasticNetCV", "matplotlib.pyplot.subplots", "sklearn.model_selection.KFold", "matplotlib.pyplot.clf", "sklearn.linear_model.LinearRegression", "numpy.exp", "sklearn.feature_selection.RFECV", "matplotlib.pyplot.show" ] ]
RobertTLange/code-and-blog
[ "a727390b8668dbf7352759220666548e2d1a0e2d" ]
[ "01_dual_number_ad/dual_ad_logistic_reg.py" ]
[ "import numpy as np\nfrom sklearn.linear_model import LogisticRegression\n\nclass DataLoader(object):\n # Small class object for generating synthetic data\n def __init__(self, n, d, batch_size, binary=False):\n self.total_dim = d + 1\n self.X, self.y = self.generate_regression_data(n, d, binary)\n # Set batch_id for different indices\n self.num_batches = np.ceil(n/batch_size).astype(int)\n self.batch_ids = np.array([np.repeat(i, batch_size) for i in range(self.num_batches)]).flatten()[:n]\n\n def generate_regression_data(self, n, d, binary=False):\n # Generate the regression/classification data from Gaussians\n self.b_true = self.generate_coefficients(d)\n X = np.random.normal(0, 1, n*d).reshape((n, d))\n noise = np.random.normal(0, 1, n).reshape((n, 1))\n inter = np.ones(n).reshape((n, 1))\n X = np.hstack((inter, X))\n y = np.matmul(X, self.b_true) + noise\n # Make data binary if task is classification/logistic regression\n if binary:\n y[y > 0] = 1\n y[y <= 0] = 0\n return X, y\n\n def generate_coefficients(self, d, intercept=True):\n # Generate random integer-valued coefficients for Xb + e\n b_random = np.random.randint(-5, 5, d + intercept)\n return b_random.reshape((d + intercept, 1))\n\n def shuffle_arrays(self):\n # Shuffle the dataset for diversity when looping over batches\n assert len(self.X) == len(self.y)\n p = np.random.permutation(len(self.X))\n self.X, self.y = self.X[p], self.y[p]\n\n def get_batch_idx(self, batch_id):\n # Subselect the current batch to be processed!\n idx = np.where(self.batch_ids == batch_id)[0]\n return self.X[idx, :], self.y[idx].flatten()\n\nclass DualTensor(object):\n # Class object for dual representation of a tensor/matrix/vector\n def __init__(self, real, dual):\n self.real = real\n self.dual = dual\n\n def zero_grad(self):\n # Reset the gradient for the next batch evaluation\n dual_part = np.zeros((len(self.real), len(self.real)))\n np.fill_diagonal(dual_part, 1)\n self.dual = dual_part\n return\n\ndef dot_product(b_dual, x, both_require_grad=False):\n # Function to perform dot product between a dual and a no grad_req vector\n real_part = np.dot(x.real, b_dual.real)\n dual_part = np.dot(x.real, b_dual.dual)\n if both_require_grad:\n dual_part += np.dot(b_dual.real, x.dual)\n return DualTensor(real_part, dual_part)\n\ndef add_duals(dual_a, dual_b):\n # Operator non-\"overload\": Add a two dual numbers\n real_part = dual_a.real + dual_b.real\n dual_part = dual_a.dual + dual_b.dual\n return DualTensor(real_part, dual_part)\n\ndef log(dual_tensor):\n # Operator non-\"overload\": Log (real) & its derivative (dual)\n real_part = np.log(dual_tensor.real)\n temp_1 = 1/dual_tensor.real\n # Fill matrix with diagonal entries of log derivative\n temp_2 = np.zeros((temp_1.shape[0], temp_1.shape[0]))\n np.fill_diagonal(temp_2, temp_1)\n dual_part = np.dot(temp_2, dual_tensor.dual)\n return DualTensor(real_part, dual_part)\n\ndef sigmoid(dual_tensor):\n # Operator non-\"overload\": Sigmoid (real) & its derivative (dual)\n real_part = 1/(1+np.exp(-dual_tensor.real))\n temp_1 = np.multiply(real_part, 1-real_part)\n # Fill matrix with diagonal entries of sigmoid derivative\n temp_2 = np.zeros((temp_1.shape[0], temp_1.shape[0]))\n np.fill_diagonal(temp_2, temp_1)\n dual_part = np.dot(temp_2, dual_tensor.dual)\n return DualTensor(real_part, dual_part)\n\ndef forward(X, b_dual):\n # Apply element-wise sigmoid activation\n y_pred_1 = sigmoid(dot_product(b_dual, X))\n y_pred_2 = DualTensor(1-y_pred_1.real, -y_pred_1.dual)\n # Make numerically stable!\n y_pred_1.real = np.clip(y_pred_1.real, 1e-15, 1-1e-15)\n y_pred_2.real = np.clip(y_pred_2.real, 1e-15, 1-1e-15)\n return y_pred_1, y_pred_2\n\ndef binary_cross_entropy_dual(y_true, y_pred_1, y_pred_2):\n # Compute actual binary cross-entropy term\n log_y_pred_1, log_y_pred_2 = log(y_pred_1), log(y_pred_2)\n bce_l1, bce_l2 = dot_product(log_y_pred_1, -y_true), dot_product(log_y_pred_2, -(1 - y_true))\n bce = add_duals(bce_l1, bce_l2)\n # Calculate the batch classification accuracy\n acc = (y_true == (y_pred_1.real > 0.5)).sum()/y_true.shape[0]\n return bce, acc\n\ndef train_logistic_regression(n, d, n_epoch, batch_size, b_init, l_rate):\n # Generate the data for a coefficient vector & init progress tracker!\n data_loader = DataLoader(n, d, batch_size, binary=True)\n b_hist, func_val_hist, param_error, acc_hist = [], [], [], []\n\n # Get the coefficients as solution to optimized sklearn function\n logreg = LogisticRegression(penalty='none', solver='lbfgs', multi_class='multinomial')\n logreg.fit(data_loader.X, data_loader.y)\n norm_coeff = np.linalg.norm(logreg.coef_.ravel())\n\n b_dual = DualTensor(b_init, None)\n\n # Start running the training loop\n for epoch in range(n_epoch):\n # Shuffle the batch identities at beginning of each epoch\n data_loader.shuffle_arrays()\n for batch_id in range(data_loader.num_batches):\n # Clear the gradient\n b_dual.zero_grad()\n # Select the current batch & perform \"mini-forward\" pass\n X, y = data_loader.get_batch_idx(batch_id)\n y_pred_1, y_pred_2 = forward(X, b_dual)\n # Calculate the forward AD - real = func, dual = deriv\n current_dual, acc = binary_cross_entropy_dual(y, y_pred_1, y_pred_2)\n # Perform grad step & append results to the placeholder list\n b_dual.real -= l_rate*np.array(current_dual.dual).flatten()\n b_hist.append(b_dual.real)\n func_val_hist.append(current_dual.real)\n param_error.append(np.linalg.norm(logreg.coef_.ravel() - b_hist[-1])/norm_coeff)\n acc_hist.append(acc)\n\n if np.abs(param_error[-1] - param_error[-2]) < 0.00001:\n break\n\n if epoch % 1 == 0:\n print(\"Accuracy: {} | Euclidean Param Norm: {} | fct min: {}\".format(acc, param_error[-1], current_dual.real))\n return b_hist, func_val_hist, param_error, acc_hist\n\nif __name__ == \"__main__\":\n np.random.seed(1)\n b, f, error, ac = train_logistic_regression(1000, 4, 40, 100, np.array([0, 0, 0, 0, 0]).astype(float), 0.005)\n" ]
[ [ "numpy.dot", "numpy.fill_diagonal", "numpy.exp", "numpy.where", "numpy.random.randint", "numpy.hstack", "numpy.clip", "numpy.matmul", "numpy.ceil", "numpy.repeat", "numpy.zeros", "numpy.log", "numpy.multiply", "numpy.array", "sklearn.linear_model.LogisticRegression", "numpy.random.seed", "numpy.abs", "numpy.ones", "numpy.random.normal" ] ]
SpontaneousDuck/finn
[ "7cdfd6271159c6cc4636bd33047a7f2e175a7390" ]
[ "tests/brevitas/test_brevitas_act_export.py" ]
[ "import numpy as np\nimport torch\nimport brevitas.onnx as bo\nfrom brevitas.nn import QuantHardTanh\nfrom brevitas.core.restrict_val import RestrictValueType\nfrom brevitas.core.scaling import ScalingImplType\nfrom models.common import get_quant_type\nimport pytest\nfrom finn.core.modelwrapper import ModelWrapper\nimport finn.core.onnx_exec as oxe\nfrom finn.transformation.infer_shapes import InferShapes\n\nexport_onnx_path = \"test_act.onnx\"\n\n\[email protected](\"abits\", [1, 2, 4, 8])\[email protected](\"narrow_range\", [False, True])\[email protected](\"max_val\", [1.0, 1 - 2 ** (-7)])\ndef test_brevitas_act_export(abits, narrow_range, max_val):\n act_quant_type = get_quant_type(abits)\n min_val = -1.0\n ishape = (1, 10)\n b_act = QuantHardTanh(\n bit_width=abits,\n quant_type=act_quant_type,\n max_val=max_val,\n min_val=min_val,\n restrict_scaling_type=RestrictValueType.LOG_FP,\n scaling_impl_type=ScalingImplType.CONST,\n narrow_range=narrow_range,\n )\n bo.export_finn_onnx(b_act, ishape, export_onnx_path)\n model = ModelWrapper(export_onnx_path)\n model = model.transform(InferShapes())\n inp_tensor = np.random.uniform(low=min_val, high=max_val, size=ishape).astype(\n np.float32\n )\n idict = {model.graph.input[0].name: inp_tensor}\n odict = oxe.execute_onnx(model, idict, True)\n produced = odict[model.graph.output[0].name]\n inp_tensor = torch.from_numpy(inp_tensor).float()\n expected = b_act.forward(inp_tensor).detach().numpy()\n assert np.isclose(produced, expected, atol=1e-3).all()\n" ]
[ [ "numpy.random.uniform", "torch.from_numpy", "numpy.isclose" ] ]
markmuetz/cosmic
[ "f215c499bfc8f1d717dea6aa78a58632a4e89113" ]
[ "ctrl/experimental/HadGEM3_highresSST_JJA_ipython.py" ]
[ "# coding: utf-8\nfrom pathlib import Path\ndatapath = Path('/badc/cmip6/data/CMIP6/HighResMIP/MOHC/HadGEM3-GC31-HM/highresSST-present/r1i1p1f1/E1hr/pr/gn/v20170831')\nfns = sorted(datapath.glob('*.nc'))\nfn = fns[-1]\n[[int(v) for v in (d[:4], d[4:6], d[6:8], d[8:10], d[10:])] for d in fn.stem[-25:].split('-')]\nd1, d2 = [[int(v) for v in (d[:4], d[4:6], d[6:8], d[8:10], d[10:])] for d in fn.stem[-25:].split('-')]\nd1\nd2\nimport pandas as pd\ndata = []\nfor fn in fns:\n d1, d2 = [[int(v) for v in (d[:4], d[4:6], d[6:8], d[8:10], d[10:])] for d in fn.stem[-25:].split('-')]\n data.append(d1 + d2 + [str(fn)])\n\ndf = pd.DataFrame(data, columns=['Y1', 'M1', 'D1', 'h1', 'm1', 'Y2', 'M2', 'D2', 'h2', 'm2', 'filename'])\ndf\ndf[(df.M1 == 4) & (df.M1 == 7)]\ndf[(df.M1 == 4) | (df.M1 == 7)]\ndf[(df.M1 == 4) | (df.M1 == 7)].filename\nfns_AMJJJS = df[(df.M1 == 4) | (df.M1 == 7)].filename.values\nfns_AMJJJS\nfn\ncube = iris.load_cube(str(fn))\nimport iris\ncube = iris.load_cube(str(fn))\ncube\ncube.extract\ncube.extract(iris.Constraint(time=lambda t: t < 1000))\ncube.extract(iris.Constraint(time=lambda t: t < 1000))\ncube\ncube.coord('time')\ndt1 = PartialDateTime(year=2007, month=7, day=15)\nfrom iris.time import PartialDateTime\ndt1 = PartialDateTime(year=2007, month=7, day=15)\nfn\ndt1 = PartialDateTime(year=2014, month=6, day=1)\ncube.extract(iris.Constraint(time=lambda t: t > dt1))\ncube.extract(iris.Constraint(time=lambda t: t == dt1))\ncube.extract(iris.Constraint(time=lambda cell: cell.time == dt1))\ncube.extract(iris.Constraint(time=lambda cell: cell.point.time == dt1))\ncube.extract(iris.Constraint(time=lambda cell: cell.point == dt1))\ncube.extract(iris.Constraint(time=lambda cell: cell.point < 1000))\ncube.extract(iris.Constraint(time=lambda cell: cell.point > dt1))\ncube.extract(iris.Constraint(time=lambda cell: cell > dt1))\ncube.extract(iris.Constraint(time=lambda cell: cell.point > dt1))\ncube.extract(iris.Constraint(time=lambda cell: cell.point.value > dt1))\nfrom cftime._cftime import Datetime360Day\nd = Datetime360Day()\nd = Datetime360Day(2001, 1, 1)\nd\nd.second\nd.month\ncube.extract(iris.Constraint(time=lambda cell: cell.point.month == 6))\nfn\ncube.extract(iris.Constraint(time=lambda cell: cell.point.month == 12))\nfn = fns[-3]\ncube = iris.load_cube(str(fn))\ncube.extract(iris.Constraint(time=lambda cell: cell.point.month == 6))\ncube.extract(iris.Constraint(time=lambda cell: 4 < cell.point.month < 6))\ncube_AMJJAS = iris.load_cube([str(fn) for fn in fns_AMJJJS])\ncube_AMJJAS = iris.load_cube([str(fn) for fn in fns_AMJJJS[-10:]])\ncubes_AMJJAS = iris.load([str(fn) for fn in fns_AMJJJS[-4:]])\ncubes\ncubes_AMJJAS\ncubes_AMJJAS.concatenate_cube()\nfrom iris.experimental import equalise_cubes\nequalise_cubes(cubes_AMJJAS)\nfrom iris.experimental import equalise_cubes\nequalise_cubes.equalise_attributes(cubes_AMJJAS)\ncubes_AMJJAS.concatenate_cube()\ncube_AMJJAS = cubes_AMJJAS.concatenate_cube()\nconstraing_JJA + iris.Constraint(time=lambda cell: 6 <= cell.point.month <= 8)\nconstraint_JJA = iris.Constraint(time=lambda cell: 6 <= cell.point.month <= 8)\ncubes_AMJJAS.extract(constraint_JJA)\ncube_AMJJAS.extract(constraint_JJA)\ncube_JJA + cube_AMJJAS.extract(constraint_JJA)\ncube_JJA = cube_AMJJAS.extract(constraint_JJA)\n\n" ]
[ [ "pandas.DataFrame" ] ]
amitaifrey/learn-to-hash
[ "dc13fe715eb4ebfddf249d2a2c1c65cb3e4dbd78" ]
[ "kahip/kmkahip.py" ]
[ "\r\n'''\r\nPipeline to:\r\n-create knn graphs from dataset.\r\n-recursively partitions dataset using KaHIP in parallel.\r\n-learn tree of neural networks in tandem with partitions tree.\r\n'''\r\n\r\nimport _init_paths\r\nimport sys\r\nimport os\r\nimport os.path as osp\r\nimport pickle\r\nimport create_graph\r\nimport torch\r\nimport numpy as np\r\nimport argparse\r\nimport utils\r\nimport math\r\nfrom model import train\r\nfrom model.data import DataNode\r\nimport utils\r\nfrom collections import defaultdict\r\nimport multiprocessing as mp\r\nimport kmeans\r\nimport logreg\r\nimport pdb\r\n\r\ngraph_file = create_graph.graph_file\r\n\r\ndata_dir = utils.data_dir\r\nparts_path = osp.join(data_dir, 'partition')\r\ndsnode_path = osp.join(data_dir, 'ds_node')\r\n\r\n'''\r\nRerun kahip for every node/on every subtree.\r\nNeed new graph. \r\nInput:\r\n-branching_l, list of indices, branching in tree so far, used for memoizing partition results.\r\n'''\r\ndef run_kahip(graph_path, datalen, branching_l, height, opt):\r\n\r\n #num_parts = int(sys.argv[2])\r\n n_class = opt.n_class\r\n\r\n if n_class < 2:\r\n raise Exception('wrong number of parts: {}. Should be greater than or equal to 2.'.format(n_class))\r\n\r\n opt.kahip_config = 'eco'\r\n kahip_config = opt.kahip_config\r\n\r\n #if configuration != 'fast' and configuration != 'eco' and configuration != 'strong':\r\n if kahip_config not in ['fast', 'eco', 'strong']:\r\n raise Exception('configuration not supported')\r\n \r\n #output_path = ' '+str(n_class)\r\n \r\n if datalen < n_class:\r\n n_class = datalen\r\n \r\n branching_l_len = len(branching_l)\r\n #if True or branching_l_len == 1:\r\n\r\n\r\n #parts_path = opt.parts_path_root + str(n_class) + str(kahip_config) + '{}'.format(opt.dataset_name)+''.join(branching_l) + 'ht' + str(height) + '_{}'.format('50') #'sub10')#opt.k_graph)\r\n parts_path = opt.parts_path_root + str(n_class) + '{}'.format(opt.dataset_name)+''.join(branching_l) + 'ht' + str(height) + '_{}_{}'.format(opt.k_graph, opt.k)\r\n\r\n #else:\r\n # parts_path = opt.parts_path_root + str(n_class) + str(kahip_config)\r\n\r\n if opt.glove and (branching_l_len == 1):\r\n #if glove top level, use precomputed partition\r\n parts_path = utils.glove_top_parts_path(opt.n_clusters, opt)\r\n elif opt.glove_25 and (branching_l_len == 1):\r\n # if glove top level, use precomputed partition\r\n parts_path = utils.glove_25_top_parts_path(opt.n_clusters, opt)\r\n elif opt.glove_200 and (branching_l_len == 1):\r\n # if glove top level, use precomputed partition\r\n parts_path = utils.glove_200_top_parts_path(opt.n_clusters, opt)\r\n elif opt.sift and (branching_l_len == 1):\r\n #if glove top level, use precomputed partition\r\n parts_path = utils.sift_top_parts_path(opt.n_clusters, opt)\r\n elif opt.lastfm and (branching_l_len == 1):\r\n # if glove top level, use precomputed partition\r\n parts_path = utils.lastfm_top_parts_path(opt.n_clusters, opt)\r\n elif opt.deep and (branching_l_len == 1):\r\n # if glove top level, use precomputed partition\r\n parts_path = utils.deep_top_parts_path(opt.n_clusters, opt)\r\n elif opt.gist and (branching_l_len == 1):\r\n # if glove top level, use precomputed partition\r\n parts_path = utils.gist_top_parts_path(opt.n_clusters, opt)\r\n elif opt.prefix10m and (branching_l_len == 1):\r\n #if glove top level, use precomputed partition\r\n parts_path = utils.prefix10m_top_parts_path(opt.n_clusters, opt)\r\n\r\n if not os.path.exists(parts_path):# or branching_l_len > 1:\r\n print(\"partitioning! parts_path is {}\".format(parts_path))\r\n print(os.path.join(utils.kahip_dir, \"deploy\", \"kaffpa\") + ' ' + graph_path + \" --preconfiguration=\" + kahip_config + \" --output_filename=\" + \\\r\n parts_path + \" --k=\" + str(n_class) )#+ \" > /dev/null\" #+ \" --imbalance=\" + str(3)))\r\n #cmd = \"LD_LIBRARY_PATH=./KaHIP/extern/argtable-2.10/lib ./KaHIP/deploy/kaffpa \" + graph_file + \" --preconfiguration=\" + configuration + \" --output_filename=\" + output_file + \" --k=\" + str(num_parts) \r\n cmd = os.path.join(utils.kahip_dir, \"deploy\", \"kaffpa\") + ' ' + graph_path + \" --preconfiguration=\" + kahip_config + \" --output_filename=\" + \\\r\n parts_path + \" --k=\" + str(n_class) #+ \" > /dev/null\" #+ \" --imbalance=\" + str(3)\r\n #pdb.set_trace()\r\n if os.system(cmd) != 0:\r\n raise Exception('Kahip error')\r\n\r\n #raise exception here if just want partitioning of top level\r\n # print('parts path', parts_path)\r\n #raise Exception('done partitioning!', parts_path)\r\n\r\n return parts_path\r\n\r\n'''\r\ncreate data node by reading the graph partition file.\r\nInput:\r\n-path: path to kahip\r\n-ds_idx: LongTensor of indices in (entire) dataset\r\n-height: level of current node, root has highest height.\r\n-classes: classes in partitioning result. Int\r\nReturns:\r\n-datanode created from \r\n\r\nUse output from neural net\r\n'''#dataset, all_ranks, ds_idx, train_node, idx2bin, height-1, branching_l, classes, opt)\r\ndef add_datanode_children(dataset, all_ranks_data, ds_idx, parent_train_node, idx2bin, height, branching_l, classes, ht2cutsz, cur_tn_idx, opt, ds_idx_ranks, toplevel=None, root=False):\r\n \r\n all_ranks, idx2weights = all_ranks_data\r\n n_class = opt.n_class if opt.n_class <= len(ds_idx) else len(ds_idx)\r\n '''\r\n For 2nd level, SIFT, say 64 parts, beyond 25 epochs train does not improve much.\r\n '''\r\n if opt.glove or opt.sift or opt.lastfm or opt.glove_25 or opt.glove_200 or opt.deep or opt.gist:\r\n n_epochs = 18 if len(branching_l)==1 else 15 #44 opt.n_epochs #opt.n_epochs ################stopping mechanism 65. 18 if len(branching_l)==1 else 15\r\n # <-for MCCE loss #glove+sift: 18 then 15\r\n else:\r\n n_epochs = 18 if len(branching_l)==1 else 10 #opt.n_epochs #opt.n_epochs ################stopping mechanism 65.\r\n #85 good top level epoch number for MNIST. #glove+sift: 18 then 10\r\n \r\n toplevel = toplevel if toplevel is not None else (True if height > 0 else False)\r\n #need to train and get children idx (classes) from net.\r\n train_node = train.TrainNode(n_epochs, opt, height, toplevel=toplevel)\r\n #append node to parent\r\n parent_train_node.add_child(train_node, cur_tn_idx)\r\n \r\n dataset_data = dataset[ds_idx]\r\n if False and opt.sift:\r\n #'n' stands for neural and normalized\r\n dataset_n = dataset / dataset.norm(dim=1, p=2, keepdim=True).clamp(1)\r\n dataset_data_n = dataset_n[ds_idx]\r\n else:\r\n dataset_n = dataset\r\n dataset_data_n = dataset_data\r\n \r\n #height is 0 for leaf level nodes\r\n if False and height < 1:#not opt.compute_gt_nn: #height < 1: #not opt.compute_gt_nn: True or\r\n \r\n train_node.train(dataset, dsnode, idx2bin, height)\r\n model = train_node.model \r\n model.eval()\r\n classes_l = []\r\n chunk_sz = 90000\r\n \r\n dataset_len = len(dataset_data)\r\n for i in range(0, dataset_len, chunk_sz):\r\n \r\n end = min(i+chunk_sz, dataset_len) \r\n cur_data = dataset_data[i:end, :] \r\n classes_l.append(torch.argmax(model(cur_data), dim=1))\r\n\r\n classes = torch.cat(classes_l)\r\n\r\n action = opt.level2action[height]\r\n if action == 'km':\r\n #bottom level, use kmeans\r\n train_node.model = None\r\n train_node.trained = True\r\n train_node.idx2bin = idx2bin \r\n \r\n solver = kmeans.FastKMeans(dataset_data, n_class, opt)\r\n d_cls_idx = solver.predict(dataset_data, k=1)\r\n\r\n d_cls_idx = d_cls_idx.reshape(-1)\r\n \r\n classes = torch.LongTensor(d_cls_idx)\r\n \r\n train_node.kmsolver = solver\r\n \r\n d_idx_l = [np.where(d_cls_idx==i)[0] for i in range(n_class)]\r\n \r\n train_node.probe_count_l = [len(l) for l in d_idx_l] #[(classes == i).sum().item() for i in range(n_class) ] \r\n else:\r\n classes = torch.LongTensor(classes)\r\n \r\n if action == 'train':\r\n device = dataset.device\r\n '''\r\n #compute the ranks of top classes. Using centers of all points in a class\r\n \r\n sums = torch.zeros(n_class, dataset_data.size(-1), device=device)\r\n classes_exp = classes.unsqueeze(1).expand_as(dataset_data).to(device)\r\n sums.scatter_add_(0, classes_exp, dataset_data)\r\n \r\n lens = torch.zeros(n_class)#, dtype=torch.int64)\r\n lens_ones = torch.ones(dataset_data.size(0))# , dtype=torch.int64)\r\n lens.scatter_add_(0, classes, lens_ones)\r\n lens = lens.to(device)\r\n centers = sums / lens.unsqueeze(-1)\r\n \r\n ranks = utils.dist_rank(dataset_data, k=n_class, data_y=centers, include_self=True)\r\n '''\r\n\r\n dsnode = DataNode(ds_idx, classes, opt.n_class, ranks=ds_idx_ranks)\r\n \r\n #if opt.sift: \r\n #center as well?\r\n train_node.train(dataset_n, dsnode, idx2bin, height)\r\n #else:\r\n # train_node.train(dataset, dsnode, idx2bin, height)\r\n model = train_node.model \r\n model.eval()\r\n classes_l = []\r\n chunk_sz = 80000\r\n dataset_len = len(dataset_data_n)\r\n for i in range(0, dataset_len, chunk_sz):\r\n end = min(i+chunk_sz, dataset_len) \r\n cur_data = dataset_data_n[i:end, :] \r\n classes_l.append(torch.argmax(model(cur_data), dim=1))\r\n\r\n classes = torch.cat(classes_l)\r\n elif action == 'logreg':\r\n \r\n train_node.model = None\r\n train_node.trained = True\r\n train_node.idx2bin = idx2bin\r\n\r\n cur_path = None\r\n if opt.glove:\r\n cur_path = osp.join(utils.data_dir, 'lg_glove')\r\n elif opt.sift:\r\n cur_path = osp.join(utils.data_dir, 'lg_sift')\r\n \r\n if root and cur_path is not None: \r\n if osp.exists(cur_path):\r\n #deserialize\r\n with open(cur_path, 'rb') as file:\r\n solver = pickle.load(file)\r\n else:\r\n #serialize\r\n solver = logreg.LogReg(dataset_data, classes, opt)\r\n with open(cur_path, 'wb') as file:\r\n pickle.dump(solver, file)\r\n \r\n else:\r\n solver = logreg.LogReg(dataset_data, classes, opt)\r\n \r\n d_cls_idx = solver.predict(dataset_data, k=1)\r\n d_cls_idx = d_cls_idx.reshape(-1)\r\n\r\n classes = torch.LongTensor(d_cls_idx)\r\n train_node.kmsolver = solver\r\n \r\n \r\n d_idx_l = [np.where(d_cls_idx==i)[0] for i in range(n_class)]\r\n\r\n train_node.probe_count_l = [len(l) for l in d_idx_l] \r\n elif action == 'kahip':\r\n #kahip only\r\n train_node.model = None\r\n train_node.trained = True\r\n train_node.idx2bin = idx2bin \r\n train_node.idx2kahip = {}\r\n\r\n for i, cur_idx in enumerate(ds_idx):\r\n train_node.idx2kahip[cur_idx.item()] = classes[i]\r\n \r\n train_node.probe_count_l = [(classes == i).sum().item() for i in range(n_class) ]\r\n else:\r\n raise Exception('Action must be either kahip km or train')\r\n dsnode = DataNode(ds_idx, classes, opt.n_class)\r\n #ds_idx needs to be indices wrt entire dataset. \r\n #y are labels of clusters, indices 0 to num_cluster. \r\n \r\n if height > 0: \r\n #recurse based on children\r\n procs = []\r\n next_act = opt.level2action[height-1]\r\n parallelize = next_act in ['train', 'kahip', 'logreg'] \r\n if parallelize:\r\n p_man = mp.Manager()\r\n idx2classes = p_man.dict()\r\n \r\n branching_l_l = []\r\n child_ds_idx_l = []\r\n #index of child TrainNode\r\n tnode_idx_l = []\r\n ranks_l = []\r\n \r\n for cur_class in range(n_class):\r\n #pick the samples having this class \r\n child_ds_idx = ds_idx[classes==cur_class]\r\n child_branching_l = list(branching_l)\r\n child_branching_l.append(str(cur_class))\r\n\r\n if len(child_ds_idx) <= opt.k:\r\n #create train_node without model, but with base_idx, leaf_idx etc. Need to have placeholder for correct indexing.\r\n child_tn = train.TrainNode(opt.n_epochs, opt, height-1)\r\n child_tn.base_idx = len(set(idx2bin.values()))\r\n child_tn.leaf_idx = [child_tn.base_idx]\r\n for j in child_ds_idx:\r\n idx2bin[j.item()] = child_tn.base_idx\r\n \r\n child_tn.probe_count_l = [len(child_ds_idx)]\r\n child_tn.idx2bin = idx2bin\r\n train_node.add_child(child_tn, cur_class)\r\n else: \r\n ranks, all_ranks_data, graph_path = create_data_tree(dataset, all_ranks_data, child_ds_idx, train_node, idx2bin, height, child_branching_l, ht2cutsz, opt)\r\n branching_l_l.append(child_branching_l)\r\n \r\n #those knn graphs for kahip are one-based, and are lists and not tensors due to weights.\r\n if next_act == 'train':\r\n k1 = max(1, int(opt.nn_mult*opt.k))\r\n ranks_l.append(utils.dist_rank(dataset[child_ds_idx], k=k1, opt=opt))\r\n else:\r\n ranks_l.append([])\r\n if parallelize:\r\n datalen = len(child_ds_idx) \r\n p = mp.Process(target=process_child, args=(ranks, graph_path, datalen, child_branching_l, height, idx2classes, len(procs), ht2cutsz, opt))\r\n \r\n #print('processed child process!! len {}'.format(len(cur_classes))) \r\n procs.append(p)\r\n p.start()\r\n tnode_idx_l.append(cur_class)\r\n child_ds_idx_l.append(child_ds_idx)\r\n \r\n for p in procs:\r\n p.join()\r\n print('~~~~~~~~~~finished p.join. check classes_l')\r\n \r\n for i in range(len(branching_l_l )):\r\n if parallelize:\r\n classes = idx2classes[i]\r\n else:\r\n classes = None\r\n child_branching_l = branching_l_l[i]\r\n child_ds_idx = child_ds_idx_l[i]\r\n child_ranks = ranks_l[i]\r\n #create root DataNode dataset, ds_idx, parent_train_node, idx2bin, height, opt\r\n child_dsnode = add_datanode_children(dataset, all_ranks_data, child_ds_idx, train_node, idx2bin, height-1, child_branching_l, classes, ht2cutsz, tnode_idx_l[i], opt, child_ranks)\r\n dsnode.add_child(child_dsnode)\r\n else:\r\n \r\n train_node.base_idx = len(set(idx2bin.values()))\r\n train_node.leaf_idx = range(train_node.base_idx, train_node.base_idx+n_class)\r\n\r\n if train_node.kmsolver is not None:\r\n predicted = train_node.kmsolver.predict(dataset_data, k=1)\r\n for i, pred in enumerate(predicted):\r\n idx2bin[ds_idx[i].item()] = train_node.base_idx + int(pred)\r\n else:\r\n #predict entire dataset at once!\r\n if opt.compute_gt_nn or action == 'kahip':\r\n for i, data in enumerate(dataset_data): \r\n predicted = train_node.idx2kahip[ds_idx[i].item()].item()\r\n idx2bin[ds_idx[i].item()] = train_node.base_idx + predicted\r\n elif train_node.model is not None:\r\n dataset_data_len = len(dataset_data_n)\r\n\r\n chunk_sz = 80000\r\n if dataset_data_len > chunk_sz:\r\n pred_l = []\r\n \r\n for p in range(0, dataset_data_len, chunk_sz):\r\n cur_data = dataset_data_n[p : min(p+chunk_sz, dataset_data_len)] \r\n pred_l.append( torch.argmax(model(cur_data), dim=1) )\r\n\r\n predicted = torch.cat(pred_l)\r\n else:\r\n predicted = torch.argmax(model(dataset_data_n), dim=1)\r\n for i, pred in enumerate(predicted): \r\n #idx2bin[ds_idx[i].item()] = train_node.base_idx + train_node.leaf_idx[predicted]\r\n idx2bin[ds_idx[i].item()] = train_node.base_idx + int(pred)\r\n else:\r\n raise Exception('Training inconsistency')\r\n return dsnode\r\n\r\n'''\r\nTO be run in parallel.\r\nInput:\r\n-classes_l: NestedList object\r\n'''\r\ndef process_child(ranks, graph_path, datalen, branching_l, height, idx2classes, proc_i, ht2cutsz, opt):\r\n \r\n n_edges = create_graph.write_knn_graph(ranks, graph_path)\r\n \r\n parts_path = run_kahip(graph_path, datalen, branching_l, height, opt)\r\n print(parts_path)\r\n\r\n lines = utils.load_lines(parts_path)\r\n idx2classes[proc_i] = [int(line) for line in lines]\r\n\r\n '''\r\n compute_cut_size_b = False\r\n if compute_cut_size_b:\r\n cut_sz = compute_cut_size(classes, ranks)\r\n ht2cutsz[height].append((cut_sz, n_edges)) \r\n '''\r\n\r\n'''\r\ncreate data node tree by reading the graph partition file.\r\nTo be serialized and used by TrainNode to train.\r\nNote: ds_idx is 0-based, but ranks is 1-based\r\nds_idx are indices for current train node.\r\nReturns:\r\n-root node\r\n'''\r\ndef create_data_tree(dataset, all_ranks_data, ds_idx, train_node, idx2bin, height, branching_l, ht2cutsz, opt):\r\n\r\n (all_ranks, idx2weights) = all_ranks_data\r\n\r\n datalen = len(ds_idx)\r\n if datalen <= opt.k:\r\n return None\r\n\r\n #create graph from data.\r\n data = dataset[ds_idx]\r\n graph_path = os.path.join(opt.graph_file + '_' + str(opt.n_clusters) + '_' + ''.join(branching_l) + 'ht' + str(height))\r\n\r\n #ranks are 1-based\r\n if len(branching_l) == 1:\r\n #only use distance at top level of tree\r\n ranks = create_graph.create_knn_graph(data, k=opt.k, opt=opt) #should supply opt\r\n all_ranks = ranks\r\n else:\r\n assert all_ranks is not None \r\n #else compute part of previous graph\r\n ranks = create_graph.create_knn_sub_graph(all_ranks, idx2weights, ds_idx, data, opt)\r\n \r\n return ranks, all_ranks_data, graph_path\r\n\r\n'''\r\nTo be called for creating from root. Entry point to creating the tree.\r\n'''\r\ndef create_data_tree_root(dataset, all_ranks, ds_idx, train_node, idx2bin, height, branching_l, ht2cutsz, opt):\r\n\r\n datalen = len(ds_idx)\r\n if datalen <= opt.k:\r\n return None\r\n graph_path = opt.graph_file #'../data/knn.graph'\r\n print(\"graph path is: {}\".format(graph_path))\r\n \r\n #ranks are 1-based\r\n if opt.glove or opt.glove_25 or opt.glove_200 or opt.sift or opt.prefix10m or opt.lastfm or opt.deep or opt.gist: #and len(branching_l) == 1:\r\n # if opt.glove:\r\n # #custom paths\r\n # #if opt.glove and opt.k_graph==50: #april, 50NN graph file\r\n # #graph_path = os.path.join(opt.data_dir, 'glove50_'+opt.graph_file) #'../data/knn.graph'\r\n # graph_path = os.path.join(opt.data_dir, opt.graph_file) #'../data/knn.graph'\r\n # #graph_path = os.path.join(opt.data_dir, 'glove10_sub10knn.graph')\r\n # print('graph file {}'.format(graph_path))\r\n parts_path = run_kahip(graph_path, datalen, branching_l, height, opt)\r\n print('Done partitioning top level! path: {}'.format(parts_path))\r\n lines = utils.load_lines(parts_path)\r\n classes = [int(line) for line in lines]\r\n\r\n #read in all_ranks, for partitioning on further levels.\r\n all_ranks, idx2weights = read_all_ranks(opt)\r\n if opt.dataset_name != 'prefix10m':\r\n k1 = max(1, int(opt.nn_mult*opt.k))\r\n ranks = utils.dist_rank(dataset, k=k1, opt=opt)\r\n else:\r\n #subtract 1 as graph was created with 1-indexing for kahip.\r\n ranks = torch.load('/large/prefix10m10knn.graph.pt') - 1\r\n #create root DataNode dataset, ds_idx, parent_train_node, idx2bin, height, opt\r\n dsnode = add_datanode_children(dataset, (all_ranks, idx2weights), ds_idx, train_node, idx2bin, height-1, branching_l, classes, ht2cutsz, 0, opt, ranks, toplevel=True, root=True) \r\n return dsnode\r\n\r\n #create graph from data.\r\n data = dataset[ds_idx]\r\n if len(branching_l) == 1: #this is always the case now\r\n #use tree created at top level throughout the hierarchy\r\n ranks = create_graph.create_knn_graph(data, k=opt.k, opt=opt) #should supply opt\r\n all_ranks = ranks\r\n else:\r\n assert all_ranks is not None \r\n #else compute part of previous graph\r\n ranks = create_graph.create_knn_sub_graph(all_ranks, ds_idx, data, opt)\r\n \r\n n_edges = create_graph.write_knn_graph(ranks, graph_path)\r\n _, idx2weights = read_all_ranks(opt, path=graph_path)\r\n \r\n #create partition from graph\r\n #this overrides file each iteration \r\n parts_path = run_kahip(graph_path, datalen, branching_l, height, opt)\r\n\r\n lines = utils.load_lines(parts_path)\r\n classes = [int(line) for line in lines]\r\n \r\n compute_cut_size_b = False and not opt.glove\r\n if compute_cut_size_b:\r\n cut_sz = compute_cut_size(classes, ranks)\r\n ht2cutsz[height].append((cut_sz, n_edges)) \r\n \r\n #create root DataNode dataset, ds_idx, parent_train_node, idx2bin, height, opt\r\n dsnode = add_datanode_children(dataset, (all_ranks, idx2weights), ds_idx, train_node, idx2bin, height-1, branching_l, classes, ht2cutsz, 0, opt, all_ranks-1, toplevel=True, root=True)\r\n #Note the above all_ranks is not 5*opt.k number of nearest neighbors.\r\n \r\n return dsnode\r\n\r\n'''\r\nRead all ranks in from precomputed glove data.\r\nNote these neighbors are not ranked to distance, they are \r\nsorted according to index.\r\n'''\r\ndef read_all_ranks(opt, path=None):\r\n ranks = [] \r\n lines = utils.load_lines(opt.graph_file)[1:]\r\n #tuples of 2 indices, and their weights\r\n idx2weights = {}\r\n \r\n for i, line in enumerate(lines, 1):\r\n \r\n cur_list = line.strip().split(' ')\r\n cur_ranks = []\r\n \r\n for j in range(0, len(cur_list), 2):\r\n neigh = int(cur_list[j])\r\n cur_ranks.append(neigh)\r\n \r\n neigh_weight = int(cur_list[j+1])\r\n tup = (i, neigh) if i < neigh else (neigh, i)\r\n idx2weights[tup] = neigh_weight\r\n \r\n #ensure proper k! for resulting graph\r\n ranks.append(cur_ranks)\r\n \r\n #ranks = torch.LongTensor(ranks)\r\n return ranks, idx2weights\r\n\r\n'''\r\nRead all ranks in from precomputed SIFT data.\r\nNote these neighbors are not ranked to distance, they are \r\nsorted according to index.\r\n'''\r\ndef read_all_ranks_siftDep(opt):\r\n \r\n graph_path = osp.join(utils.data_dir, 'sift_graph_10', 'graph.txt')\r\n ranks = []\r\n \r\n lines = utils.load_lines(graph_path)[1:]\r\n #tuples of 2 indices, and their weights\r\n idx2weights = {}\r\n \r\n for i, line in enumerate(lines, 1):\r\n \r\n cur_list = line.strip().split(' ')\r\n cur_ranks = []\r\n \r\n for j in range(0, len(cur_list), 2):\r\n neigh = int(cur_list[j])\r\n cur_ranks.append(neigh)\r\n neigh_weight = int(cur_list[j+1])\r\n tup = (i, neigh) if i < neigh else (neigh, i)\r\n idx2weights[tup] = neigh_weight\r\n \r\n #ensure proper k! for resulting graph\r\n ranks.append(cur_ranks)\r\n \r\n return ranks, idx2weights\r\n\r\n'''\r\nInput:\r\n-classes: list of kahip output classes\r\n-ranks are 1-based\r\nShould pass in total number of edges to compute ratio!\r\n'''\r\ndef compute_cut_size(classes_l, ranks):\r\n idx2class = {}\r\n for i, iclass in enumerate(classes_l, 1):\r\n idx2class[i] = iclass\r\n #n_class = max(classes_l) #len(set(classes_l)) <--some classes are empty for high imbalance\r\n #cut_mx = torch.zeros(n_class, n_class)\r\n #should compute matrix of cuts\r\n cut = 0\r\n #ranks tensor\r\n ranks_is_tensor = isinstance(ranks, torch.Tensor)\r\n for i, row in enumerate(ranks, 1):\r\n \r\n for j in row:\r\n if ranks_is_tensor:\r\n j = j.item()\r\n iclass = idx2class[i]\r\n jclass = idx2class[j]\r\n if iclass != jclass:\r\n cut += 1\r\n #cut_mx[iclass-1, jclass-1] += 1\r\n #cut_mx[jclass-1, iclass-1] += 1\r\n \r\n print('total cut size {}'.format(cut))\r\n #print('cut matrix {}'.format(cut_mx))\r\n return cut\r\n \r\ndef run_kmkahip(height_preset, opt, dataset, queryset, neighbors):\r\n \r\n k = opt.k\r\n print('Configs: {} \\n Starting data processing and training ...'.format(opt))\r\n #this root node is a dummy node, since it doesn't have a trained model or idx2bin\r\n train_node = train.TrainNode(-1, opt, -1)\r\n\r\n swap_query_to_data = False\r\n if swap_query_to_data:\r\n print('** NOTE: Modifying queryset as part of dataset **') \r\n queryset = dataset[:11000]\r\n #queryset = dataset\r\n neighbors = utils.dist_rank(queryset, k=opt.k, data_y=dataset, largest=False, opt=opt)\r\n #dist += 2*torch.max(dist).item()*torch.eye(len(dist)) #torch.diag(torch.max(dist))\r\n #val, neighbors = torch.topk(dist, k=opt.k, dim=1, largest=False) \r\n \r\n #dsnode_path = opt.dsnode_path + str(opt.n_clusters)\r\n #dsnode = utils.pickle_load(dsnode_path)\r\n\r\n #check if need to normalize data. Remove second conditions eventually.\r\n if opt.normalize_data:# and dataset[0].norm(p=2).item() != 1:# and not opt.glove:\r\n print('Normalizing data ...')\r\n dataset = utils.normalize(dataset)\r\n queryset = utils.normalize(queryset)\r\n\r\n #create data tree used for training\r\n n_clusters = opt.n_clusters\r\n\r\n height = height_preset\r\n n_bins = 1\r\n \r\n ds_idx = torch.LongTensor(list(range(len(dataset))))\r\n print('{} height: {} level2action {}'.format(ds_idx.size(), height, opt.level2action))\r\n \r\n idx2bin = {}\r\n ht2cutsz = defaultdict(list) \r\n #used for memoizing partition results\r\n branching_l = ['0']\r\n all_ranks = None\r\n print(\"creating root\")\r\n root_dsnode = create_data_tree_root(dataset, all_ranks, ds_idx, train_node, idx2bin, height, branching_l,ht2cutsz, opt)\r\n print('Done creating training tree. Starting evaluation ...')\r\n\r\n #top node only first child node is train node.\r\n eval_root = train.EvalNode(train_node.children[0])\r\n\r\n ''' Evaluate '''\r\n \r\n with torch.no_grad():\r\n print('About to evaluate model! {} height: {} level2action {}'.format(ds_idx.size(), height, opt.level2action)) \r\n acc, probe_count, probe_count95 = train.eval_model(eval_root, queryset, neighbors, n_bins, opt)\r\n \r\n print('cut_sizes {}'.format(ht2cutsz))\r\n print('Configs: {}'.format(opt))\r\n print('acc {} probe count {} 95th {}'.format(acc, probe_count, probe_count95))\r\n\r\n ''' Serialize '''\r\n serialize_bool = False if 'kahip' in set(opt.level2action.values()) else True\r\n serialize_bool = True\r\n if serialize_bool:\r\n print('Serializing eval root...')\r\n if opt.sift:\r\n data_name = 'sift'\r\n elif opt.glove:\r\n data_name = 'glove'\r\n elif opt.glove_25:\r\n data_name = 'glove_25'\r\n elif opt.glove_200:\r\n data_name = 'glove_200'\r\n elif opt.prefix10m:\r\n data_name = 'prefix10m'\r\n elif opt.lastfm:\r\n data_name = 'lastfm'\r\n elif opt.deep:\r\n data_name = 'deep'\r\n elif opt.gist:\r\n data_name = 'gist'\r\n else:\r\n data_name = 'mnist'\r\n idx2bin = eval_root.idx2bin\r\n if 'logreg' in opt.level2action.values():\r\n serial_path = 'evalroot_{}_ht{}_{}_{}{}nn{}logreg'\r\n else:\r\n serial_path = 'evalroot_{}_ht{}_{}_{}{}nn{}'\r\n eval_root_path = osp.join(opt.data_dir, serial_path.format(data_name, height, n_clusters, opt.k_graph, opt.k, opt.nn_mult)) \r\n eval_root_dict = {'eval_root':eval_root, 'opt':opt}\r\n utils.pickle_dump(eval_root_dict, eval_root_path)\r\n print('Done serializing {}'.format(eval_root_path))\r\n #dsnode_path = opt.dsnode_path + str(opt.n_clusters)\r\n #utils.pickle_dump(root_dsnode, dsnode_path)\r\n \r\n with open(osp.join(opt.data_dir, 'cutsz_k{}_ht{}_{}'.format(k, height, n_clusters)), 'w') as file:\r\n file.write(str(ht2cutsz))\r\n file.write('\\n\\n')\r\n file.write(str(opt))\r\n \r\nif __name__ == '__main__':\r\n opt = utils.parse_args()\r\n\r\n opt.data_dir = \"data\"\r\n\r\n n_cluster_l = [2, 4, 16, 32, 64, 128, 256]\r\n n_cluster_l = [256]\r\n n_cluster_l = [8] #[64] #[2] #[16]\r\n \r\n # This is now set upstream, keep here for demo purposes.\r\n # actions can be km, kahip, train, logreg #\r\n opt.level2action = {0:'km', 1:'train'} \r\n opt.level2action = {0:'train', 1:'train'} \r\n \r\n opt.level2action = {0:'logreg', 2:'logreg', 3:'logreg', 4:'logreg', 5:'logreg', 6:'logreg', 7:'logreg', 8:'logreg', 9:'logreg', 10:'logreg', 11:'logreg'}\r\n opt.level2action = {0:'train', 1:'train'}\r\n \r\n height_l = range(1, 9)\r\n height_l = [1]\r\n\r\n #if opt.glove:\r\n if opt.subsample > 1:\r\n dataset = utils.load_glove_sub_data('train').to(utils.device)\r\n queryset = utils.load_glove_data('query').to(utils.device) \r\n neighbors = utils.load_glove_sub_data('answers').to(utils.device)\r\n opt.dataset_name = 'glove'\r\n elif opt.glove:\r\n dataset = utils.load_glove_data('train').to(utils.device)\r\n queryset = utils.load_glove_data('query').to(utils.device) \r\n neighbors = utils.load_glove_data('answers').to(utils.device)\r\n opt.dataset_name = 'glove'\r\n elif opt.glove_c:\r\n #catalyzer glove vecs\r\n dataset = utils.load_glove_c_data('train').to(utils.device)\r\n queryset = utils.load_glove_data('query').to(utils.device) \r\n neighbors = utils.load_glove_data('answers').to(utils.device)\r\n opt.dataset_name = 'glove'\r\n opt.glove = True\r\n elif opt.sift:\r\n dataset = utils.load_sift_data('train').to(utils.device)\r\n queryset = utils.load_sift_data('query').to(utils.device) \r\n neighbors = utils.load_sift_data('answers').to(utils.device)\r\n opt.dataset_name = 'sift'\r\n elif opt.prefix10m:\r\n dataset = utils.load_prefix10m_data('train').to(utils.device)\r\n queryset = utils.load_prefix10m_data('query').to(utils.device) \r\n neighbors = utils.load_prefix10m_data('answers').to(utils.device)\r\n opt.dataset_name = 'prefix10m' \r\n else:\r\n dataset = utils.load_data('train').to(utils.device)\r\n queryset = utils.load_data('query').to(utils.device) \r\n neighbors = utils.load_data('answers').to(utils.device)\r\n opt.dataset_name = 'mnist'\r\n for n_cluster in n_cluster_l:\r\n print('n_cluster {}'.format(n_cluster))\r\n opt.n_clusters = n_cluster\r\n opt.n_class = n_cluster\r\n for height in height_l:\r\n run_kmkahip(height, opt, dataset, queryset, neighbors)\r\n" ]
[ [ "torch.LongTensor", "torch.load", "torch.cat", "torch.no_grad", "numpy.where" ] ]
Rowing0914/Graph_Nets
[ "7f6ea41f6f7b96da783cf8387982d153dc7a408a" ]
[ "environments/multitask_env/walkers.py" ]
[ "#!/usr/bin/env python2\n# -----------------------------------------------------------------------------\n# @brief:\n# Several Walkers\n# @author:\n# Tingwu (Wilson) Wang, Nov. 22nd, 2017\n# -----------------------------------------------------------------------------\n\nimport numpy as np\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\nimport environments.init_path as init_path\nimport os\nimport num2words\n\n\ndef modify_xml(xml_name, num):\n if num is not None:\n if num <= 5:\n xml_name = xml_name.replace('.xml', num2words.num2words(num) + '.xml')\n # xml_name = 'mass/' + xml_name\n xml_name = 'strength/' + xml_name\n elif num <= 10:\n num -= 5\n xml_name = xml_name.replace('.xml', num2words.num2words(num) + '.xml')\n xml_name = 'strength/' + xml_name\n elif num <= 15:\n num -= 10\n xml_name = xml_name.replace('.xml', num2words.num2words(num) + '.xml')\n xml_name = 'length/' + xml_name\n else:\n raise NotImplementedError\n # print xml_name\n return xml_name\n\n\nclass WalkersHalfhumanoidEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n\n def __init__(self, num=None):\n\n # get the path of the environments\n xml_name = 'WalkersHalfhumanoid.xml'\n xml_name = modify_xml(xml_name, num)\n xml_path = os.path.join(os.path.join(init_path.get_base_dir(),\n 'environments', 'assets', xml_name))\n xml_path = str(os.path.abspath(xml_path))\n self.num = num\n\n mujoco_env.MujocoEnv.__init__(self, xml_path, 4)\n utils.EzPickle.__init__(self)\n\n def step(self, a):\n posbefore = self.sim.data.qpos[0]\n self.do_simulation(a, self.frame_skip)\n posafter, height, ang = self.sim.data.qpos[0:3]\n alive_bonus = 1.0\n reward = ((posafter - posbefore) / self.dt)\n reward += alive_bonus\n reward -= 1e-3 * np.square(a).sum()\n done = not (height > 0.8 and height < 2.0 and\n ang > -1.0 and ang < 1.0)\n ob = self._get_obs()\n return ob, reward, done, {}\n\n def _get_obs(self):\n qpos = self.sim.data.qpos\n qvel = self.sim.data.qvel\n return np.concatenate([qpos[1:], np.clip(qvel, -10, 10)]).ravel()\n\n def reset_model(self):\n self.set_state(\n self.init_qpos + self.np_random.uniform(low=-.005, high=.005, size=self.model.nq),\n self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n )\n return self._get_obs()\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 2\n self.viewer.cam.distance = self.model.stat.extent * 0.5\n self.viewer.cam.lookat[2] += .8\n self.viewer.cam.elevation = -20\n\n\nclass WalkersOstrichEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n\n def __init__(self, num=None):\n\n # get the path of the environments\n xml_name = 'WalkersOstrich.xml'\n xml_name = modify_xml(xml_name, num)\n xml_path = os.path.join(os.path.join(init_path.get_base_dir(),\n 'environments', 'assets', xml_name))\n xml_path = str(os.path.abspath(xml_path))\n self.num = num\n\n mujoco_env.MujocoEnv.__init__(self, xml_path, 4)\n utils.EzPickle.__init__(self)\n\n def step(self, a):\n posbefore = self.sim.data.qpos[0]\n self.do_simulation(a, self.frame_skip)\n posafter, height, ang = self.sim.data.qpos[0:3]\n alive_bonus = 1.0\n reward = ((posafter - posbefore) / self.dt)\n reward += alive_bonus\n reward -= 1e-3 * np.square(a).sum()\n done = not (height > 0.8 and height < 2.0 and\n ang > -1.0 and ang < 1.0 and\n self.sim.data.site_xpos[0, 2] > 1.1)\n ob = self._get_obs()\n return ob, reward, done, {}\n\n def _get_obs(self):\n qpos = self.sim.data.qpos\n qvel = self.sim.data.qvel\n return np.concatenate([qpos[1:], np.clip(qvel, -10, 10)]).ravel()\n\n def reset_model(self):\n self.set_state(\n self.init_qpos + self.np_random.uniform(low=-.005, high=.005, size=self.model.nq),\n self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n )\n return self._get_obs()\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 2\n self.viewer.cam.distance = self.model.stat.extent * 0.5\n self.viewer.cam.lookat[2] += .8\n self.viewer.cam.elevation = -20\n\n\nclass WalkersHopperEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n\n def __init__(self, num=None):\n\n xml_name = 'WalkersHopper.xml'\n xml_name = modify_xml(xml_name, num)\n xml_path = os.path.join(os.path.join(init_path.get_base_dir(),\n 'environments', 'assets', xml_name))\n self.num = num\n\n xml_path = str(os.path.abspath(xml_path))\n mujoco_env.MujocoEnv.__init__(self, xml_path, 4)\n utils.EzPickle.__init__(self)\n\n def step(self, a):\n posbefore = self.sim.data.qpos[0]\n self.do_simulation(a, self.frame_skip)\n posafter, height, ang = self.sim.data.qpos[0:3]\n alive_bonus = 1.0\n reward = (posafter - posbefore) / self.dt\n reward += alive_bonus\n reward -= 1e-3 * np.square(a).sum()\n s = self.state_vector()\n done = not (np.isfinite(s).all() and (np.abs(s[2:]) < 100).all() and\n (height > .7) and (abs(ang) < .2))\n ob = self._get_obs()\n return ob, reward, done, {}\n\n def _get_obs(self):\n return np.concatenate([\n self.sim.data.qpos.flat[1:],\n np.clip(self.sim.data.qvel.flat, -10, 10)\n ])\n\n def reset_model(self):\n qpos = self.init_qpos + self.np_random.uniform(low=-.005, high=.005, size=self.model.nq)\n qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n self.set_state(qpos, qvel)\n return self._get_obs()\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 2\n self.viewer.cam.distance = self.model.stat.extent * 0.75\n self.viewer.cam.lookat[2] += .8\n self.viewer.cam.elevation = -20\n\n\nclass WalkersHalfcheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n\n def __init__(self, num=None):\n\n xml_name = 'WalkersHalfcheetah.xml'\n xml_name = modify_xml(xml_name, num)\n xml_path = os.path.join(os.path.join(init_path.get_base_dir(),\n 'environments', 'assets', xml_name))\n xml_path = str(os.path.abspath(xml_path))\n self.num = num\n\n mujoco_env.MujocoEnv.__init__(self, xml_path, 4)\n utils.EzPickle.__init__(self)\n\n def step(self, action):\n xposbefore = self.sim.data.qpos[0]\n self.do_simulation(action, self.frame_skip)\n xposafter = self.sim.data.qpos[0]\n ob = self._get_obs()\n reward_ctrl = - 0.1 * np.square(action).sum()\n reward_run = (xposafter - xposbefore) / self.dt\n reward = reward_ctrl + reward_run\n alive_bonus = 1.0\n reward += alive_bonus\n s = self.state_vector()\n done = not (np.isfinite(s).all() and (np.abs(s[2:]) < 100).all() and\n self.sim.data.site_xpos[2, 2] > 1.2 and\n self.sim.data.site_xpos[0, 2] > 0.7 and\n self.sim.data.site_xpos[1, 2] > 0.7\n )\n\n return ob, reward, done, dict(reward_run=reward_run, reward_ctrl=reward_ctrl)\n\n def _get_obs(self):\n return np.concatenate([\n self.sim.data.qpos.flat[1:],\n np.clip(self.sim.data.qvel.flat, -10, 10)\n ])\n\n def reset_model(self):\n qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)\n qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n self.set_state(qpos, qvel)\n return self._get_obs()\n\n def viewer_setup(self):\n self.viewer.cam.distance = self.model.stat.extent * 0.5\n\n\nclass WalkersFullcheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n\n def __init__(self, num=None):\n\n xml_name = 'WalkersFullcheetah.xml'\n xml_name = modify_xml(xml_name, num)\n xml_path = os.path.join(os.path.join(init_path.get_base_dir(),\n 'environments', 'assets', xml_name))\n xml_path = str(os.path.abspath(xml_path))\n self.num = num\n\n mujoco_env.MujocoEnv.__init__(self, xml_path, 4)\n utils.EzPickle.__init__(self)\n\n def step(self, action):\n xposbefore = self.sim.data.qpos[0]\n self.do_simulation(action, self.frame_skip)\n xposafter = self.sim.data.qpos[0]\n ob = self._get_obs()\n reward_ctrl = - 0.1 * np.square(action).sum()\n reward_run = (xposafter - xposbefore) / self.dt\n reward = reward_ctrl + reward_run\n alive_bonus = 1\n reward += alive_bonus\n s = self.state_vector()\n done = not (np.isfinite(s).all() and (np.abs(s[2:]) < 100).all() and\n self.sim.data.site_xpos[0, 2] > 0.7 and\n self.sim.data.site_xpos[1, 2] > 0.7\n )\n return ob, reward, done, dict(reward_run=reward_run, reward_ctrl=reward_ctrl)\n\n def _get_obs(self):\n return np.concatenate([\n self.sim.data.qpos.flat[1:],\n np.clip(self.sim.data.qvel.flat, -10, 10)\n ])\n\n def reset_model(self):\n qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)\n qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n self.set_state(qpos, qvel)\n return self._get_obs()\n\n def viewer_setup(self):\n self.viewer.cam.distance = self.model.stat.extent * 0.5\n\n\nclass WalkersKangarooEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n\n def __init__(self):\n\n # get the path of the environments\n xml_name = 'WalkersKangaroo.xml'\n xml_name = modify_xml(xml_name)\n xml_path = os.path.join(os.path.join(init_path.get_base_dir(),\n 'environments', 'assets', xml_name))\n xml_path = str(os.path.abspath(xml_path))\n\n mujoco_env.MujocoEnv.__init__(self, xml_path, 4)\n utils.EzPickle.__init__(self)\n\n def step(self, a):\n posbefore = self.sim.data.qpos[0, 0]\n self.do_simulation(a, self.frame_skip)\n posafter, height, ang = self.sim.data.qpos[0:3, 0]\n alive_bonus = 1.0\n reward = ((posafter - posbefore) / self.dt) / 2.0\n reward += alive_bonus\n reward -= 1e-3 * np.square(a).sum()\n done = not (height > 0.8 and height < 2.0 and\n ang > -1.0 and ang < 1.0 and self.sim.data.site_xpos[0, 2] > 0.8\n and self.sim.data.site_xpos[0, 2] < 1.6)\n ob = self._get_obs()\n return ob, reward, done, {}\n\n def _get_obs(self):\n qpos = self.sim.data.qpos\n qvel = self.sim.data.qvel\n return np.concatenate([qpos[1:], np.clip(qvel, -10, 10)]).ravel()\n\n def reset_model(self):\n self.set_state(\n self.init_qpos + self.np_random.uniform(low=-.005, high=.005, size=self.model.nq),\n self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n )\n return self._get_obs()\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 2\n self.viewer.cam.distance = self.model.stat.extent * 0.5\n self.viewer.cam.lookat[2] += .8\n self.viewer.cam.elevation = -20\n\n\n# robust env\nclass WalkersHalfhumanoidzeroEnv(WalkersHalfhumanoidEnv):\n\n def __init__(self):\n super(WalkersHalfhumanoidzeroEnv, self).__init__(num=0)\n\n\nclass WalkersOstrichzeroEnv(WalkersOstrichEnv):\n\n def __init__(self):\n super(WalkersOstrichzeroEnv, self).__init__(num=0)\n\n\nclass WalkersHopperzeroEnv(WalkersHopperEnv):\n\n def __init__(self):\n super(WalkersHopperzeroEnv, self).__init__(num=0)\n\n\nclass WalkersHalfcheetahzeroEnv(WalkersHalfcheetahEnv):\n\n def __init__(self):\n super(WalkersHalfcheetahzeroEnv, self).__init__(num=0)\n\n\nclass WalkersFullcheetahzeroEnv(WalkersFullcheetahEnv):\n\n def __init__(self):\n super(WalkersFullcheetahzeroEnv, self).__init__(num=0)\n\n\nclass WalkersHalfhumanoidfiveEnv(WalkersHalfhumanoidEnv):\n\n def __init__(self):\n super(WalkersHalfhumanoidfiveEnv, self).__init__(num=5)\n\n\nclass WalkersOstrichfiveEnv(WalkersOstrichEnv):\n\n def __init__(self):\n super(WalkersOstrichfiveEnv, self).__init__(num=5)\n\n\nclass WalkersHopperfiveEnv(WalkersHopperEnv):\n\n def __init__(self):\n super(WalkersHopperfiveEnv, self).__init__(num=5)\n\n\nclass WalkersHalfcheetahfiveEnv(WalkersHalfcheetahEnv):\n\n def __init__(self):\n super(WalkersHalfcheetahfiveEnv, self).__init__(num=5)\n\n\nclass WalkersFullcheetahfiveEnv(WalkersFullcheetahEnv):\n\n def __init__(self):\n super(WalkersFullcheetahfiveEnv, self).__init__(num=5)\n\n\nclass WalkersHalfhumanoidfourEnv(WalkersHalfhumanoidEnv):\n\n def __init__(self):\n super(WalkersHalfhumanoidfourEnv, self).__init__(num=4)\n\n\nclass WalkersOstrichfourEnv(WalkersOstrichEnv):\n\n def __init__(self):\n super(WalkersOstrichfourEnv, self).__init__(num=4)\n\n\nclass WalkersHopperfourEnv(WalkersHopperEnv):\n\n def __init__(self):\n super(WalkersHopperfourEnv, self).__init__(num=4)\n\n\nclass WalkersHalfcheetahfourEnv(WalkersHalfcheetahEnv):\n\n def __init__(self):\n super(WalkersHalfcheetahfourEnv, self).__init__(num=4)\n\n\nclass WalkersFullcheetahfourEnv(WalkersFullcheetahEnv):\n\n def __init__(self):\n super(WalkersFullcheetahfourEnv, self).__init__(num=4)\n\n\nclass WalkersHalfhumanoidthreeEnv(WalkersHalfhumanoidEnv):\n\n def __init__(self):\n super(WalkersHalfhumanoidthreeEnv, self).__init__(num=3)\n\n\nclass WalkersOstrichthreeEnv(WalkersOstrichEnv):\n\n def __init__(self):\n super(WalkersOstrichthreeEnv, self).__init__(num=3)\n\n\nclass WalkersHopperthreeEnv(WalkersHopperEnv):\n\n def __init__(self):\n super(WalkersHopperthreeEnv, self).__init__(num=3)\n\n\nclass WalkersHalfcheetahthreeEnv(WalkersHalfcheetahEnv):\n\n def __init__(self):\n super(WalkersHalfcheetahthreeEnv, self).__init__(num=3)\n\n\nclass WalkersFullcheetahthreeEnv(WalkersFullcheetahEnv):\n\n def __init__(self):\n super(WalkersFullcheetahthreeEnv, self).__init__(num=3)\n\n\nclass WalkersHalfhumanoidtwoEnv(WalkersHalfhumanoidEnv):\n\n def __init__(self):\n super(WalkersHalfhumanoidtwoEnv, self).__init__(num=2)\n\n\nclass WalkersOstrichtwoEnv(WalkersOstrichEnv):\n\n def __init__(self):\n super(WalkersOstrichtwoEnv, self).__init__(num=2)\n\n\nclass WalkersHoppertwoEnv(WalkersHopperEnv):\n\n def __init__(self):\n super(WalkersHoppertwoEnv, self).__init__(num=2)\n\n\nclass WalkersHalfcheetahtwoEnv(WalkersHalfcheetahEnv):\n\n def __init__(self):\n super(WalkersHalfcheetahtwoEnv, self).__init__(num=2)\n\n\nclass WalkersFullcheetahtwoEnv(WalkersFullcheetahEnv):\n\n def __init__(self):\n super(WalkersFullcheetahtwoEnv, self).__init__(num=2)\n\n\nclass WalkersHalfhumanoidoneEnv(WalkersHalfhumanoidEnv):\n\n def __init__(self):\n super(WalkersHalfhumanoidoneEnv, self).__init__(num=1)\n\n\nclass WalkersOstrichoneEnv(WalkersOstrichEnv):\n\n def __init__(self):\n super(WalkersOstrichoneEnv, self).__init__(num=1)\n\n\nclass WalkersHopperoneEnv(WalkersHopperEnv):\n\n def __init__(self):\n super(WalkersHopperoneEnv, self).__init__(num=1)\n\n\nclass WalkersHalfcheetahoneEnv(WalkersHalfcheetahEnv):\n\n def __init__(self):\n super(WalkersHalfcheetahoneEnv, self).__init__(num=1)\n\n\nclass WalkersFullcheetahoneEnv(WalkersFullcheetahEnv):\n\n def __init__(self):\n super(WalkersFullcheetahoneEnv, self).__init__(num=1)\n" ]
[ [ "numpy.square", "numpy.abs", "numpy.isfinite", "numpy.clip" ] ]
DavidZhangdw/pysot-toolkit
[ "8dd56e9ff8315104d4316542ca03b036a9e6b18e" ]
[ "bin/testTracker_got_vis_draw.py" ]
[ "# Copyright (c) SenseTime. All Rights Reserved.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport os\n\nimport cv2\nimport torch\nimport numpy as np\nimport math\nimport sys\n\nsys.path.append('../')\nbase_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(base_dir)\n\nfrom pysot.core.config import cfg\nfrom pysot.utils.bbox import get_axis_aligned_bbox\nfrom pysot.utils.model_load import load_pretrain\nfrom pysot.models.model_builder_gfl import ModelBuilder\nfrom toolkit.datasets import DatasetFactory\nfrom pysot.tracker.siamgfl_tracker import SiamGATTracker\n\n\nparser = argparse.ArgumentParser(description='siamgat tracking')\nparser.add_argument('--video', default='', type=str,\n help='eval one special video')\nparser.add_argument('--dataset', type=str, default='GOT-10k',\n help='datasets') # OTB100 LaSOT UAV123 GOT-10k\nparser.add_argument('--vis', action='store_true', default=True,\n help='whether visualzie result')\nparser.add_argument('--snapshot', type=str, default='snapshot/got10k_model.pth',\n help='snapshot of models to eval')\nparser.add_argument('--config', type=str, default='../experiments/siamgat_googlenet_got10k/config.yaml',\n help='config file')\nargs = parser.parse_args()\n\ntorch.set_num_threads(1)\n\n\ndef main():\n # load config\n cfg.merge_from_file(args.config)\n\n # Test dataset\n cur_dir = os.path.dirname(os.path.realpath(__file__))\n dataset_root = os.path.join(cur_dir, '/media/david/000AA762000315F3/Datesets/GOT_10k/val')\n #//home/david/Desktop/pysot-master/testing_dataset\n\n # set hyper parameters\n params = getattr(cfg.HP_SEARCH, 'GOT10k')\n cfg.TRACK.LR = params[0]\n cfg.TRACK.PENALTY_K = params[1]\n cfg.TRACK.WINDOW_INFLUENCE = params[2]\n\n model = ModelBuilder()\n\n # load model\n model = load_pretrain(model, args.snapshot).cuda().eval()\n\n # build tracker\n tracker = SiamGATTracker(model)\n\n # create dataset\n dataset = DatasetFactory.create_dataset(name=args.dataset,\n dataset_root=dataset_root,\n load_img=False)\n\n model_name = args.snapshot.split('/')[-1].split('.')[-2]\n\n box_dir = \"results/others\"\n\n # OPE tracking\n for v_idx, video in enumerate(dataset):\n if args.video != '':\n # test one special video\n if video.name != args.video:\n continue\n toc = 0\n pred_bboxes = []\n track_times = []\n\n name = video.name\n record_file = os.path.join(box_dir,'Ours_VAL',name,'%s_001.txt'%video.name)\n pact_box = np.loadtxt(record_file, delimiter=',')\n\n record_file1 = os.path.join(box_dir,'SiamGAT_val',name,'%s_001.txt'%video.name)\n pact_box1 = np.loadtxt(record_file1, delimiter=',')\n\n record_file2 = os.path.join(box_dir,'siamfcpp_val',name,'%s_001.txt'%video.name)\n pact_box2 = np.loadtxt(record_file2, delimiter=',')\n\n for idx, (img, gt_bbox) in enumerate(video):\n tic = cv2.getTickCount()\n\n if idx == 0:\n cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox))\n gt_bbox_ = [cx-(w-1)/2, cy-(h-1)/2, w, h]\n tracker.init(img, gt_bbox_)\n pred_bbox = gt_bbox_\n pred_bboxes.append(pred_bbox)\n else:\n outputs = tracker.track(img)\n pred_bbox = outputs['bbox']\n #print(outputs['ltrb'])\n pred_bboxes.append(pred_bbox)\n toc += cv2.getTickCount() - tic\n track_times.append((cv2.getTickCount() - tic)/cv2.getTickFrequency())\n if idx == 0:\n cv2.destroyAllWindows()\n if args.vis and idx > 0:\n if not any(map(math.isnan,gt_bbox)):\n x1, y1, w1, h1 = pact_box[idx]\n x11, y11, w11, h11 = pact_box1[idx]\n x12, y12, w12, h12 = pact_box2[idx]\n gt_bbox = list(map(int, gt_bbox))\n pred_bbox = list(map(int, pred_bbox))\n '''out = cv2.rectangle(img, (gt_bbox[0], gt_bbox[1]),\n (gt_bbox[0]+gt_bbox[2], gt_bbox[1]+gt_bbox[3]), (0, 255, 0), 3)''' # green\n out = cv2.rectangle(img, (int(x1), int(y1)),\n (int(x1 + w1), int(y1 + h1)), (255, 0, 0), 3) # red\n out = cv2.rectangle(out, (int(x11), int(y11)),\n (int(x11 + w11), int(y11 + h11)), (0, 255, 0), 3) # green\n out = cv2.rectangle(out, (int(x12), int(y12)),\n (int(x12 + w12), int(y12 + h12)), (0, 0, 255), 3) # blue\n #out = cv2.rectangle(img, (pred_bbox[0], pred_bbox[1]),\n #(pred_bbox[0]+pred_bbox[2], pred_bbox[1]+pred_bbox[3]), (0, 255, 255), 3) # yellow\n out = cv2.putText(out, str(idx), (40, 60), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 255, 255), 3)\n cv2.imshow(video.name, out)\n cv2.waitKey(1)\n out_path = os.path.join('output_box', video.name)\n if not os.path.isdir(out_path):\n os.makedirs(out_path)\n cv2.imwrite(\"output_box/{seq_name}/{seq_name}_{id}.jpg\".format(seq_name=video.name, id=idx), out)\n toc /= cv2.getTickFrequency()\n\n # save results\n if 'GOT-10k' == args.dataset:\n video_path = os.path.join('results', 'GOT-10K-Val', model_name, video.name)\n if not os.path.isdir(video_path):\n os.makedirs(video_path)\n result_path = os.path.join(video_path, '{}_001.txt'.format(video.name))\n with open(result_path, 'w') as f:\n for x in pred_bboxes:\n f.write(','.join([str(i) for i in x]) + '\\n')\n result_path = os.path.join(video_path,\n '{}_time.txt'.format(video.name))\n with open(result_path, 'w') as f:\n for x in track_times:\n f.write(\"{:.6f}\\n\".format(x))\n else:\n model_path = os.path.join('results', args.dataset, model_name)\n if not os.path.isdir(model_path):\n os.makedirs(model_path)\n result_path = os.path.join(model_path, '{}.txt'.format(video.name))\n with open(result_path, 'w') as f:\n for x in pred_bboxes:\n f.write(','.join([str(i) for i in x])+'\\n')\n print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format(\n v_idx+1, video.name, toc, idx / toc))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array", "torch.set_num_threads", "numpy.loadtxt" ] ]
Rogue05/Praktyki-FT
[ "e647b64b15b742a74903fa0749cf8143fb7ad45d", "e647b64b15b742a74903fa0749cf8143fb7ad45d" ]
[ "untitled1.py", "mymod.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 7 22:24:15 2021\n\n@author: Wojtek\n\"\"\"\nimport os\nimport warnings\n\nfrom matplotlib.cbook.deprecation import MatplotlibDeprecationWarning\nwarnings.filterwarnings(\"ignore\", category=MatplotlibDeprecationWarning)\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom scipy.optimize import curve_fit\n\ndirpath = 'Lifetime analysis/dane/'\n\n# # filename = '2Time.dat'\n# for filename in ['1Time.dat','2Time.dat']:\n# data = pd.read_csv(dirpath+filename,sep='\\t',skiprows=[1,])\n# t, i = np.array(data['Time']),np.array(data['Intensity'])\n# lim = 0.1\n# i = i/i.sum()\n# t,i = t[t<0.01],i[t<0.01]\n\n# plt.plot(t, i, label=filename)\n# plt.legend()\n# plt.show()\n\n# # #### SEF\n# # fun = lambda x, A, B, C, D: A*np.exp(-B*(x-0.08)**C) + D\n# fun = lambda x, A, B, C, D: A*np.exp(-B*(x-0.08)**C) + D\n# p0 = [3829.04734517, 9016.84220052, 1, 46.15589228]\n\n#### EXP & 2EXP\n# fun = lambda x, A, T, B: A*np.exp(-T*(x-0.08)) + B\n# fun = lambda x, A1, T1, A2, T2: A1*np.exp(-T1*(x-0.08)) + A2*np.exp(-T2*(x-0.08))\n# fun = lambda x, A1, T1, A2, T2, B: A1*np.exp(-T1*(x-0.08)) + A2*np.exp(-T2*(x-0.08)) + B\nfun = lambda x, A1, T1, A2, T2, A3, T3, B: \\\n A1*np.exp(-T1*(x-0.08)) + \\\n A2*np.exp(-T2*(x-0.08)) + \\\n A3*np.exp(-T3*(x-0.08)) + B\n\n# fun = lambda x, A, T: A*np.exp(np.exp(-T*(x-0.08)))\n# fun = lambda x, A, T, B: A*np.exp(-T*(x-0.08))+B\n# p0=(527.94641168, 5083.02901712)\np0=None\n\n# fun_rise = lambda x, T0, T, A, B: A*(1-np.exp(-(x-T0)/T))+B\n\n# def fun_rise(x, T0, T, A, B):\n# ret = A*(1-np.exp(-(x-T0)/T))*np.heaviside(x-T0,1)\n# # ret[ret<0]=0\n# # ret += B\n# return ret\n\n# def fun_rise(x, T01, T1, A, T02, T2, B):\n# if T01 == T02:\n# T01 += T02\n# T02 = 0\n# ret = A*(1-T01/(T01-T02)*np.exp(-x/T1)+T02/(T01-T02)*np.exp(-x/T2))/2\n# ret[ret<0]=0\n# ret += B\n# return ret\n\ndef fun_rise(x, T1, T2, A, T0):\n if T1 == T2:\n T1 += T2\n T2 = 0\n x=x-T0\n ret = A*(1-T1/(T1-T2)*np.exp(-x/T1)+T2/(T1-T2)*np.exp(-x/T2))\\\n *np.heaviside(x,1)\n # ret[ret<0]=0\n # ret += B\n return ret\np0_rise=None\n# p0_rise=(0.001,0.003,350,15) # 1exp\n# p0_rise=(0.001,0.001,3900,0.001,0.003,15) # 2exp\np0_rise=(.003,.0032,3900,.001) #iner+hev\ndirpath = dirpath + 'dane Marcina/'\n\nfits = []\n\nplt.figure(figsize=(10,5))\nfilenames = os.listdir(dirpath)\nfilenames.sort(key = lambda x:-int(x.split(' ')[-1].split('.')[0]))\n\ninds = []\n\n\nTs = []\nAs = []\nfor filename in filenames:\n# for filename in filenames[5:7]:\n print(filename)\n ind = int(filename.split(' ')[-1].split('.')[0])\n inds.append(ind)\n data = pd.read_csv(dirpath+filename,sep='\\t',header=None)\n t0, i0 = np.array(data[0]),np.array(data[1])\n # lim = 0.0808\n # t,i = t[t<lim],i[t<lim]\n # lim = 0.08\n # t,i = t[t>lim],i[t>lim]\n\n\n lim = 0.02\n # lim = 0.005\n t,i = t0[t0<lim],i0[t0<lim]\n\n # plt.plot(t,i)\n # if i.max()<1e2: continue\n\n # plt.figure(figsize=(10,5))\n plt.subplot(121)\n # plt.semilogy(t,i,label=str(ind))\n plt.plot(t,i,label=str(ind))\n\n popt, pcov = curve_fit(fun_rise,t,i,p0=p0_rise)\n print('--',popt,)\n plt.plot(t,fun_rise(t,*popt))\n Ts.append(popt[[0,1,3]])\n As.append(popt[2])\n # plt.plot(t,fun_rise(t,*p0_rise))\n # plt.plot(t,fun_rise(t,0.001,0.003,350,15))\n\n # break\n\n lim = 0.08\n t,i = t0[t0>lim],i0[t0>lim]\n # lim = 0.0808\n lim = 0.082\n t,i = t[t<lim],i[t<lim]\n plt.subplot(122)\n # plt.plot(t,i,label=str(filename.split(' ')[-1].split('.')[0]))\n plt.semilogy(t,i,label=str(filename.split(' ')[-1].split('.')[0]))\n # plt.loglog(t,i,label=str(filename.split(' ')[-1].split('.')[0]))\n\n try:\n popt, pcov = curve_fit(fun,t,i,p0=p0)\n except:\n break\n p0=popt\n fits.append(popt)\n # A, T, B = popt\n # plt.plot(t,fun(t,*popt))\n plt.semilogy(t,fun(t,*popt))\n # plt.loglog(t,fun(t,*popt))\n print(popt,np.sqrt(np.diag(pcov)))\n print('+/-',3*np.around(np.sqrt(np.diag(pcov))/popt*100,1),'%')\n # break\n\n# plt.figure()\n# plt.plot(t,i-fun(t,*popt),'.')\n\nplt.subplot(121);plt.legend()\nplt.subplot(122);plt.legend()\n\n# fits = np.array(fits)\n# fits[np.abs(fits)==np.inf] = np.nan\n# plt.figure()\n# plt.plot(inds,fits[:,1],'.')\n# plt.plot(inds,fits[:,3],'.')\n\n# plt.figure()\n# plt.plot(inds,fits[:,0],'.')\n# plt.plot(inds,fits[:,2],'.')\n\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 5 21:18:51 2021\n\n@author: Wojtek\n\"\"\"\nimport os\nimport re\nimport warnings\n\nfrom matplotlib.cbook.deprecation import MatplotlibDeprecationWarning\nwarnings.filterwarnings(\"ignore\", category=MatplotlibDeprecationWarning)\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom scipy.optimize import curve_fit\n\ndef process_name(filename):\n # date = filename[:10]\n date, sample_nr, power = re.split('Concentration|Power|Sample',filename[:-4])\n return date, sample_nr, power\n\nclass Exp:\n fun = lambda t, A, T, B: A*np.exp(-t/T)+B\n\n def get_p0(t,y):\n B = y.min()\n A = y.max()-y.min()\n T = (t.max()-t.min())/3\n return A, T, B\n\nclass Exp_rise:\n fun = lambda t, A, T, B: A*(1-np.exp(-t/T))+B\n\n def get_p0(t,y):\n B = y.min()\n A = y.max()-y.min()\n T = (t.max()-t.min())/3\n return A, T, B\n\nclass Exp2:\n fun = lambda t, A1, T1, A2, T2, B: A1*np.exp(-t/T1)+ A2*np.exp(-t/T2)+B\n\n def get_p0(t,y):\n B = y.min()\n A1 = (y.max()-y.min())/2\n A2 = A1\n T1 = (t.max()-t.min())/3\n T2 = T1*2\n return A1, T1, A2, T2, B\n\nclass Exp2_rise:\n fun = lambda t, A1, T1, A2, T2, B: A1*(1-np.exp(-t/T1))+ A2*(1-np.exp(-t/T2))+B\n\n def get_p0(t,y):\n B = y.min()\n A1 = (y.max()-y.min())/2\n A2 = A1\n T1 = (t.max()-t.min())/3\n T2 = T1*2\n return A1, T1, A2, T2, B\n\nclass ExpS:\n fun = lambda t, A, B, C, D: A*np.exp(-t**C/B) + D\n\n def get_p0(t,y):\n D = y.min()\n A = y.max()-y.min()\n C = 1\n B = (t.max()-t.min())/10\n return A, B, C, D\n\nclass ExpS_rise:\n fun = lambda t, A, B, C, D: A*(1-np.exp(-t**C/B)) + D\n\n def get_p0(t,y):\n D = y.min()\n A = y.max()-y.min()\n C = 1\n B = (t.max()-t.min())/10\n return A, B, C, D\n\ndef process_data(t,y,t_start,t_end,model):\n di = np.logical_and(t>t_start,t<t_end)\n dt, dy = t[di], y[di]\n plt.plot(dt,dy,label='decay')\n # plt.legend()\n\n p0 = model.get_p0(dt, dy)\n plt.plot(dt,model.fun(dt-t_start,*p0))\n\n\n popt, pcov = curve_fit(model.fun,dt-t_start,dy,p0)\n stderr = np.sqrt(np.diag(pcov))\n r = dy - model.fun(dt-t_start,*popt)\n plt.plot(dt,model.fun(dt-t_start,*popt))\n plt.show()\n\n return popt, stderr, r\n\n\ndef process_file(filename, t_start, t_end, model):\n try:\n data = pd.read_csv(filename,\n sep=';',\n decimal=',',\n header=None,\n skiprows=15)\n except pd.errors.EmptyDataError:\n print('ERROR invalid file',filename)\n # continue\n return\n except FileNotFoundError:\n print('ERROR missing file',filename)\n # continue\n return\n\n t,y = np.array(data[0]),np.array(data[1])\n # return process_data(t,y,0.025,0.030,Exp2)\n # return process_data(t,y,0.019,0.025,ExpS_rise)\n return process_data(t, y, t_start, t_end, model)\n\n\nif __name__ == '__main__':\n dirpath = 'respotkanieponiedziaek'\n\n for filename in os.listdir(dirpath):\n print(filename,process_name(filename))\n date, sample_nr, power = process_name(filename)\n ret = process_file(os.path.join(dirpath,filename), 0.019,0.025,ExpS_rise)\n if ret is None: continue\n popt, stderr, r = ret\n print(stderr)\n if filename == '28.06.2021Sample208PowerHigh.txt':\n break\n" ]
[ [ "numpy.diag", "matplotlib.pyplot.legend", "pandas.read_csv", "numpy.heaviside", "matplotlib.pyplot.subplot", "numpy.exp", "numpy.array", "scipy.optimize.curve_fit", "matplotlib.pyplot.figure" ], [ "numpy.diag", "pandas.read_csv", "matplotlib.pyplot.plot", "numpy.exp", "numpy.array", "numpy.logical_and", "scipy.optimize.curve_fit", "matplotlib.pyplot.show" ] ]
IQTLabs/WITW
[ "36154fb9388dbdc5b2776fc9d49699b26a08f8ae" ]
[ "tools/heatmap/heatmap.py" ]
[ "#!/usr/bin/env python\n\nimport os\nimport sys\nimport tqdm\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom skimage import io\nimport torch\nimport torchvision\nfrom osgeo import osr\nfrom osgeo import gdal\n\nsys.path.append('../../model')\nimport cvig_fov as cvig\nGlobals = cvig.Globals\ndevice = cvig.device\n\nnames = [\n '01_rio',\n '02_vegas',\n '03_paris',\n '04_shanghai',\n '05_khartoum',\n '06_atlanta',\n '07_moscow',\n '08_mumbai',\n '09_san',\n '10_dar',\n '11_rotterdam',\n]\n\n\nclass ImageDataset(torch.utils.data.Dataset):\n def __init__(self, paths, transform=None):\n self.paths = paths\n self.transform = transform\n def __len__(self):\n return len(self.paths)\n def __getitem__(self, idx):\n raw = io.imread(self.paths[idx])\n image = torch.from_numpy(raw.astype(np.float32).transpose((2, 0, 1)))\n data = {'image':image}\n if self.transform is not None:\n data = self.transform(data)\n return data\n\n\nclass TileDataset(torch.utils.data.Dataset):\n def __init__(self, source, windows, transform=None):\n self.source = source\n self.windows = windows\n self.transform = transform\n def __len__(self):\n return len(self.windows)\n def __getitem__(self, idx):\n mem_path = '/vsimem/tile%s.jpg' % str(idx)\n ds = gdal.Translate(mem_path, self.source, projWin=self.windows[idx])\n raw = ds.ReadAsArray()\n gdal.GetDriverByName('GTiff').Delete(mem_path)\n image = torch.from_numpy(raw.astype(np.float32))\n data = {'image':image}\n if self.transform is not None:\n data = self.transform(data)\n return data\n\n\nclass ResizeSurface(object):\n \"\"\"\n Resize surface photo to fit model and crop to fov.\n \"\"\"\n def __init__(self, fov=360):\n self.fov = fov\n self.surface_width = int(self.fov / 360 * Globals.surface_width_max)\n def __call__(self, data):\n data['image'] = torchvision.transforms.functional.resize(data['image'], (Globals.surface_height_max, self.surface_width))\n return data\n\n\nclass ResizeOverhead(object):\n \"\"\"\n Resize overhead image tile to fit model and crop to fov.\n \"\"\"\n def __call__(self, data):\n data['image'] = torchvision.transforms.functional.resize(data['image'], (Globals.overhead_size, Globals.overhead_size))\n return data\n\n\nclass ImageNormalization(object):\n \"\"\"\n Normalize image values to use with pretrained VGG model\n \"\"\"\n def __init__(self):\n self.norm = torchvision.transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )\n def __call__(self, data):\n data['image'] = self.norm(data['image'] / 255.)\n return data\n\n\nclass PolarTransform(object):\n def __init__(self):\n self.transform = cvig.PolarTransform()\n def __call__(self, data):\n data_renamed = {'overhead':data['image']}\n data = self.transform(data_renamed)\n return data\n\n\ndef sweep(aoi, bounds, edge, offset, fov, sat_dir, photo_path, csv_path):\n\n # Compute center and window for each satellite tile\n center_eastings = []\n center_northings = []\n windows = []\n e2 = edge / 2.\n for easting in np.arange(bounds[0] - e2, bounds[2] - e2, offset):\n for northing in np.arange(bounds[3] + e2, bounds[1] + e2, -offset):\n center_eastings.append(easting + e2)\n center_northings.append(northing - e2)\n windows.append([easting, northing, easting + edge, northing - edge])\n\n # Load satellite strip\n sat_path = os.path.join(sat_dir, names[aoi-1] + '.tif')\n sat_file = gdal.Open(sat_path)\n\n # Specify transformations\n surface_transform = torchvision.transforms.Compose([\n ResizeSurface(fov),\n ImageNormalization()\n ])\n overhead_transform = torchvision.transforms.Compose([\n ResizeOverhead(),\n ImageNormalization(),\n PolarTransform()\n ])\n\n # Load data\n surface_set = ImageDataset((photo_path,), surface_transform)\n overhead_set = TileDataset(sat_file, windows, overhead_transform)\n surface_batch = torch.unsqueeze(surface_set[0]['image'], dim=0).to(device)\n overhead_loader = torch.utils.data.DataLoader(overhead_set, batch_size=64, shuffle=False, num_workers=1)\n\n # Load the neural networks\n surface_encoder = cvig.FOV_DSM(circ_padding=False).to(device)\n overhead_encoder = cvig.FOV_DSM(circ_padding=True).to(device)\n surface_encoder.load_state_dict(torch.load('../../model/fov_{}_surface_best.pth'.format(int(fov))))\n overhead_encoder.load_state_dict(torch.load('../../model/fov_{}_overhead_best.pth'.format(int(fov))))\n surface_encoder.eval()\n overhead_encoder.eval()\n\n # Surface photo's features\n surface_embed = surface_encoder(surface_batch)\n\n # Overhead images' features\n torch.set_grad_enabled(False)\n overhead_embed = None\n for batch, data in enumerate(tqdm.tqdm(overhead_loader)):\n overhead = data['polar'].to(device)\n #with torch.set_grad_enabled(False):\n overhead_embed_part = overhead_encoder(overhead)\n if overhead_embed is None:\n overhead_embed = overhead_embed_part\n else:\n overhead_embed = torch.cat((overhead_embed, overhead_embed_part), dim=0)\n\n # Calculate score for each overhead image\n output_width_max = 64\n orientation_estimate = cvig.correlation(overhead_embed, surface_embed)\n orientations = torch.squeeze(orientation_estimate) * 360 / output_width_max - 180\n overhead_cropped_all = cvig.crop_overhead(overhead_embed, orientation_estimate, surface_embed.shape[3])\n distances = cvig.l2_distance(overhead_cropped_all, surface_embed)\n distances = torch.squeeze(distances)\n scores = torch.exp(10. * (1. - distances))\n\n # Save information to disk\n df = pd.DataFrame({\n 'x': center_eastings,\n 'y': center_northings,\n 'orientation': orientations.cpu().numpy(),\n 'dissimilarity': distances.cpu().numpy(),\n 'score': scores.cpu().numpy()\n })\n df.to_csv(csv_path, index=False)\n\n\ndef layer(aoi, bounds, sat_dir, layer_path):\n sat_path = os.path.join(sat_dir, names[aoi-1] + '.tif')\n sat_file = gdal.Open(sat_path)\n window = [bounds[0], bounds[3], bounds[2], bounds[1]]\n gdal.Translate(layer_path, sat_file, projWin=window)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--aoi',\n type=int,\n choices=range(1,12),\n default=3,\n help='SpaceNet AOI of satellite image')\n parser.add_argument('-b', '--bounds',\n type=float,\n nargs=4,\n default=(447665.8, 5411329.8, 448184.8, 5411814.8),\n metavar=('left', 'bottom', 'right', 'top'),\n help='Bounds given as UTM coordinates in this order: min easting, min northing, max easting, max northing')\n parser.add_argument('-e', '--edge',\n type=float,\n default=225,\n help='Edge length of satellite imagery tiles [m]')\n parser.add_argument('-o', '--offset',\n type=float,\n default=56.25,\n help='Offset between centers of adjacent satellite imagery tiles [m]')\n parser.add_argument('-f', '--fov',\n type=int,\n default=70,\n help='Field of view assumed for photo (deg, rounded)')\n parser.add_argument('-s', '--satdir',\n default='/local_data/geoloc/sat/utm',\n help='Folder containing satellite images')\n parser.add_argument('-p', '--photopath',\n default='img.jpg',\n help='Path to surface photo to analyze')\n parser.add_argument('-c', '--csvpath',\n default='./geomatch.csv',\n help='Path to output CSV file path')\n parser.add_argument('-l', '--layerpath',\n default='./satlayer.tiff',\n help='Path to output cropped satellite image')\n parser.add_argument('-i', '--image',\n action='store_true',\n help='Flag to output cropped satellite image')\n args = parser.parse_args()\n sweep(args.aoi, args.bounds, args.edge, args.offset, args.fov,\n args.satdir, args.photopath, args.csvpath)\n if args.image:\n layer(args.aoi, args.bounds, args.satdir, args.layerpath)\n" ]
[ [ "torch.cat", "numpy.arange", "torch.utils.data.DataLoader", "torch.unsqueeze", "torch.exp", "torch.set_grad_enabled", "torch.squeeze" ] ]
rocabrera/audio-learning
[ "9c99effb44c05cb33a7fdc8dbce18955fa95f84e" ]
[ "wav2vec/project/data_processor/dataset.py" ]
[ "import re\nimport os\nimport json\nimport pandas as pd\nfrom glob import glob\n\nfrom abc import ABC, abstractmethod\n\nclass DataBase(ABC):\n \n @abstractmethod\n def make_tidy(self):\n pass\n \n @abstractmethod\n def parse_data(self) -> pd.DataFrame:\n pass\n \nclass MLS(DataBase):\n \n ext = \".flac\" \n basename = \"multi_speech_librespeech\"\n \n def __init__(self, data_train_dir, data_test_dir, data_dev_dir):\n\n self.train_path = data_train_dir\n self.test_path = data_test_dir\n self.dev_path = data_dev_dir\n \n def _create_path(self, path_type:str, audio_code:str):\n \n match = re.search(\"(\\d+)_(\\d+)_(\\d+)\",audio_code)\n return os.path.join(path_type, \"audio\", match.group(1), match.group(2), \"\".join([audio_code, self.ext]))\n\n def _parse_type(self, path_type:str, type_:str) -> pd.DataFrame:\n path_label = os.path.join(path_type, \"transcripts.txt\")\n \n df = pd.read_csv(path_label, sep=\"\\t\",header=None,names=[\"audio_code\", \"text\"]) \n df = df.assign(**{\"type\":type_,\n \"file\":df.audio_code.apply(lambda x: \n self._create_path(path_type,x))\n })\n return df.filter([\"file\", \"text\", \"type\"])\n \n def make_tidy(self):\n pass\n \n def parse_data(self) -> pd.DataFrame:\n \n df_train = self._parse_type(self.train_path, \"train\")\n df_test = self._parse_type(self.test_path, \"test\")\n df_dev = self._parse_type(self.dev_path, \"dev\")\n \n return pd.concat([df_train, df_test, df_dev], ignore_index=True).assign(base=self.basename)\n\nclass CommonVoice(DataBase):\n \n ext = \".mp3\" \n basename = \"common_voice\"\n \n def __init__(self, main_path):\n\n self.train_path = os.path.join(main_path, \"train.tsv\")\n self.test_path = os.path.join(main_path, \"test.tsv\")\n self.dev_path = os.path.join(main_path, \"validated.tsv\")\n self.audios_path = os.path.join(main_path, \"clips\")\n \n def _create_path(self, audio_name):\n return os.path.join(self.audios_path, audio_name)\n\n def _parse_type(self, df_path, type_): \n \n df = pd.read_csv(df_path, sep = \"\\t\")\n return (df.assign(**{\"type\":type_, \"file\":df[\"path\"].apply(self._create_path)})\n .rename(columns={\"sentence\":\"text\"})\n .filter([\"file\", \"text\", \"type\"]))\n \n def make_tidy(self):\n pass\n \n def parse_data(self) -> pd.DataFrame: \n \n df_train = self._parse_type(self.train_path, \"train\")\n df_test = self._parse_type(self.test_path,\"test\")\n df_dev = self._parse_type(self.dev_path,\"dev\")\n \n return pd.concat([df_train, df_test, df_dev], ignore_index=True).assign(base=self.basename)" ]
[ [ "pandas.concat", "pandas.read_csv" ] ]
saileshpuranam/DSND----Image-Classifier-Deep-Learning
[ "ea472161670a1a2586f55be4f766be9b287ee761" ]
[ "train.py" ]
[ "import argparse\nimport data_utils\nimport network_utils\n\nimport matplotlib.pyplot as plt\nimport torch\nimport numpy as np\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\nfrom PIL import Image\nimport seaborn as sns\nimport json\nfrom collections import OrderedDict\n\n\n# Define All the functions required for training the model\n\n# Function arg_parser() parses keyword arguments from the CMD\ndef arg_parser():\n # Define parser\n parser = argparse.ArgumentParser(description=\"Neural Network Settings\")\n \n # Add architecture selection to parser\n parser.add_argument('--arch', \n type=str, \n help='Choose architecture from torchvision.models as str')\n \n # Add checkpoint directory to parser\n parser.add_argument('--save_dir', \n type=str, \n help='Define save directory for checkpoints as str. If not specified then model will be lost.')\n \n # Add hyperparameter tuning to parser\n parser.add_argument('--learning_rate', \n type=float, \n help='Define gradient descent learning rate as float')\n parser.add_argument('--hidden_units', \n type=int, \n help='Hidden units for DNN classifier as int')\n parser.add_argument('--epochs', \n type=int, \n help='Number of epochs for training as int')\n\n # Add GPU Option to parser\n parser.add_argument('--gpu', \n action=\"store_true\", \n help='Use GPU + Cuda for calculations')\n \n # Parse args\n args = parser.parse_args()\n return args\n\n# Function train_transformer to do training transformations on a dataset\ndef train_transformer(train_dir):\n # Define transformation\n train_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])])\n # Load the Data\n train_data = datasets.ImageFolder(train_dir, transform=train_transforms)\n return train_data\n\n# Function test_transformer(test_dir) performs test/validation transformations on a dataset\ndef test_transformer(test_dir):\n # Define transformation\n test_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])])\n # Load the Data\n test_data = datasets.ImageFolder(test_dir, transform=test_transforms)\n return test_data\n\n# Function test_transformer(test_dir) performs test/validation transformations on a dataset\ndef valid_transformer(valid_dir):\n # Define transformation\n valid_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])])\n # Load the Data\n valid_data = datasets.ImageFolder(valid_dir, transform=valid_transforms)\n return valid_data\n\n# Function to create a dataloader from dataset imported\ndef data_loader(data, train=True):\n if train: \n loader = torch.utils.data.DataLoader(data, batch_size=50, shuffle=True)\n else: \n loader = torch.utils.data.DataLoader(data, batch_size=50)\n return loader\n\n# Function check_gpu\ndef check_gpu(gpu_arg):\n # If gpu_arg is false then simply return the cpu device\n if not gpu_arg:\n return torch.device(\"cpu\")\n \n # If gpu_arg then make sure to check for CUDA before assigning it\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n \n # Print result\n if device == \"cpu\":\n print(\"CUDA was not found on device, using CPU instead.\")\n return device\n\n# Loader function downloads vgg16 from Torch vision\ndef primaryloader_model(architecture=\"vgg16\"):\n # Load Defaults if none specified\n if type(architecture) == type(None): \n model = models.vgg16(pretrained=True)\n model.name = \"vgg16\"\n print(\"Network architecture specified as vgg16.\")\n else: \n exec(\"model = models.{}(pretrained=True)\".format(architecture))\n model.name = architecture\n \n # Freeze model parameters so we can access them at a later point\n for param in model.parameters():\n param.requires_grad = False \n return model\n\n# Function initial_classifier(model, hidden_units) creates a classifier\ndef initial_classifier(model, hidden_units):\n # Check that hidden layers has been input\n if type(hidden_units) == type(None): \n hidden_units = 4096 #hyperparamters\n print(\"Number of Hidden Layers specificed as 4096.\")\n \n # Find Input Layers\n if model.name == \"vgg16\":\n input_features = model.classifier[0].in_features\n elif model.name == \"resnet18\":\n model_ft = models.resnet18(pretrained=True)\n number_input_features = model_ft.fc.in_features\n elif model.name == \"alexnet\":\n alexnet = models.alexnet(pretrained=True);\n number_input_features = alexnet.classifier[6].in_features\n \n # Define Classifier params\n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(input_features, hidden_units, bias=True)),\n ('relu1', nn.ReLU()),\n ('dropout1', nn.Dropout(p=0.5)),\n ('fc2', nn.Linear(hidden_units, 102, bias=True)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n return classifier\n\n# Function validation(model, testloader, criterion, device) validates training against testloader to return loss and accuracy\ndef validation(model, testloader, criterion, device):\n test_loss = 0\n accuracy = 0\n \n for ii, (inputs, labels) in enumerate(testloader):\n \n inputs, labels = inputs.to(device), labels.to(device)\n \n output = model.forward(inputs)\n test_loss += criterion(output, labels).item()\n \n ps = torch.exp(output)\n equality = (labels.data == ps.max(dim=1)[1])\n accuracy += equality.type(torch.FloatTensor).mean()\n return test_loss, accuracy\n\n# Function network_trainer represents the training of the network model\ndef network_trainer(Model, Trainloader, Testloader, Device, \n criterion, optimizer, Epochs, Print_every, Steps):\n # Check Model Kwarg\n if type(Epochs) == type(None):\n Epochs = 5\n print(\"Number of Epochs specificed as 5.\") \n \n print(\"Training process initializing .....\\n\")\n\n # Train Model\n for e in range(Epochs):\n running_loss = 0\n Model.train() # Technically not necessary, setting this for good measure\n \n for ii, (inputs, labels) in enumerate(Trainloader):\n Steps += 1\n \n inputs, labels = inputs.to(Device), labels.to(Device)\n \n optimizer.zero_grad()\n \n # Forward and backward passes\n outputs = Model.forward(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n \n running_loss += loss.item()\n \n if Steps % Print_every == 0:\n Model.eval()\n\n with torch.no_grad():\n valid_loss, accuracy = validation(Model, Testloader, criterion, Device)\n \n print(\"Epoch: {}/{} | \".format(e+1, Epochs),\n \"Training Loss: {:.4f} | \".format(running_loss/Print_every),\n \"Validation Loss: {:.4f} | \".format(valid_loss/len(Testloader)),\n \"Validation Accuracy: {:.4f}\".format(accuracy/len(Testloader)))\n \n running_loss = 0\n Model.train()\n\n return Model\n\n#Function validate_model(Model, Testloader, Device) validate the above model on test data images\ndef validate_model(Model, Testloader, Device):\n # Do validation on the test set\n correct = 0\n total = 0\n with torch.no_grad():\n Model.eval()\n for data in Testloader:\n images, labels = data\n images, labels = images.to(Device), labels.to(Device)\n outputs = Model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n \n print('Accuracy achieved by the network on test images is: %d%%' % (100 * correct / total))\n\n# Function initial_checkpoint(Model, Save_Dir, Train_data) saves the model at a defined checkpoint\ndef initial_checkpoint(Model, Save_Dir, Train_data,Epochs,Learning_rate):\n \n # Save model at checkpoint\n if type(Save_Dir) == type(None):\n print(\"Model checkpoint directory not specified, model will not be saved.\")\n else:\n Model.class_to_idx = Train_data.class_to_idx\n \n # Create checkpoint dictionary\n checkpoint = {'architecture':Model.name,\n 'classifier':Model.classifier,\n 'state_dict':Model.state_dict(),\n 'epochs':Epochs,\n 'learning_rate':Learning_rate,\n 'class_to_idx':Model.class_to_idx\n }\n \n # Save checkpoint\n torch.save(checkpoint, 'my_checkpoint.pth')\n \n\n# Main Function which executes all pre defined functions above to have the trained model ready\ndef main():\n \n # Get Keyword Args for Training\n args = arg_parser()\n \n # Set directory for training\n data_dir = 'flowers'\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n \n # Pass transforms in, then create trainloader\n train_data = train_transformer(train_dir)\n valid_data = valid_transformer(valid_dir)\n test_data = test_transformer(test_dir)\n \n trainloader = data_loader(train_data)\n validloader = data_loader(valid_data, train=False)\n testloader = data_loader(test_data, train=False)\n \n # Load Model\n model = primaryloader_model(architecture=args.arch)\n \n # Build Classifier\n model.classifier = initial_classifier(model, \n hidden_units=args.hidden_units)\n \n # Check for GPU\n device = check_gpu(gpu_arg=args.gpu);\n \n # Send model to device\n model.to(device);\n \n # Check for learnrate args\n if type(args.learning_rate) == type(None):\n learning_rate = 0.001\n print(\"Learning rate specificed as 0.001\")\n else: learning_rate = args.learning_rate\n \n # Define loss and optimizer\n criterion = nn.NLLLoss()\n optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)\n \n # Define deep learning method\n print_every = 30\n steps = 0\n \n\n \n # Train the classifier layers using backpropogation\n trained_model = network_trainer(model, trainloader, validloader, \n device, criterion, optimizer, args.epochs, \n print_every, steps)\n \n print(\"\\nTraining process is now complete!!\")\n \n # Validate the model\n validate_model(trained_model, testloader, device)\n \n # Save the model\n initial_checkpoint(trained_model, args.save_dir, train_data, args.epochs,args.learning_rate)\n\n\n# =============================================================================\n# Run Program\n# =============================================================================\nif __name__ == '__main__': \n main()" ]
[ [ "torch.nn.NLLLoss", "torch.nn.Dropout", "torch.nn.LogSoftmax", "torch.max", "torch.utils.data.DataLoader", "torch.exp", "torch.nn.Linear", "torch.no_grad", "torch.cuda.is_available", "torch.device", "torch.nn.ReLU", "torch.save" ] ]
rinikerlab/reeds
[ "d5d13eaf844d8f78456634f68455ba6239a106de" ]
[ "reeds/function_libs/pipeline/worker_scripts/analysis_workers/RE_EDS_explore_lowerBound_analysis.py" ]
[ "#!/usr/bin/python env\nimport glob\nimport os\n\nimport numpy as np\n\nfrom pygromos.files import imd\nfrom pygromos.utils import bash\nfrom reeds.function_libs.analysis.sampling import undersampling_occurence_potential_threshold_distribution_based as find_undersampling_pot_tresh\n\n\nimport reeds.function_libs.analysis.sampling\nimport reeds.function_libs.visualization.pot_energy_plots\nfrom reeds.function_libs.file_management import file_management as fM\nfrom reeds.function_libs.analysis.sampling import sampling_analysis\nimport reeds.function_libs.utils.s_log_dist as s_log_dist\n\nnp.set_printoptions(suppress=True,formatter={'float_kind':'{:0.7f}'.format})\nfrom reeds.data import ene_ana_libs\n\ndef do(out_analysis_dir: str, system_name: str,\n in_simulation_dir: str, in_topology_path: str, in_imd_path: str,\n undersampling_occurrence_fraction_threshold: float = 0.9,\n gromosPP_bin: str = None, final_number_of_replicas : int= None,\n in_ene_ana_lib: str = ene_ana_libs.ene_ana_lib_path,\n verbose: bool = True):\n \"\"\"\n This analysis worker is used to analyse the exploration of the b_step: high s-undersampling boundary.\n\n Features:\n - test stability of simulations with starting coordinates\n - find turning point of transition-phase -> undersampling\n - determine potential thresholds for undersampling region.\n\n Parameters\n ----------\n out_analysis_dir : str\n output dir for analysis data-results\n system_name : str\n name of the system\n in_simulation_dir : str\n simulation dir path contains the gromos simulation data\n in_topology_path : str\n path to the gromos topology\n in_imd_path : str\n path to one gromos paramter file, that was used\n undersampling_pot_tresh : float, optional\n initial undersampling potential threshold (<-OLD)\n gromosPP_bin : str, optional\n path to the gromosPP binary folder\n in_ene_ana_lib : str, optional\n path to the ene_ana library\n verbose : bool, optional\n Sing, Sing, Sing, ....\n\n Returns\n -------\n\n \"\"\"\n control_dict = {\n \"cp_cnf\": True,\n \"cat_trc\": False,\n \"convert_trcs\": False,\n \"remove_gromosTRC\": True,\n \"cat_tre\": False,\n \"ene_ana\": True,\n \"cat_repdat\": False,\n \"pot_ene_by_replica\": True,\n \"pot_ene_by_state\": True,\n \"plot_pot_ene_timeseries\": True,\n \"plot_ref_timeseries\": True,\n \"plot_ref_distrib\": True\n }\n\n if (verbose): print(\"out: \", out_analysis_dir)\n bash.make_folder(out_analysis_dir)\n\n # global vars\n out_prefix = system_name\n data_dir = out_analysis_dir + \"/data\"\n imd_file = imd.Imd(in_imd_path + \"_1.imd\")\n num_states = int(imd_file.EDS.NUMSTATES)\n\n # Read in all s-values that were used\n\n imd_files = sorted(glob.glob(in_imd_path + \"*.imd\"), key=lambda x: int(x.split(\"_\")[-1].replace(\".imd\", \"\")))\n s_values = [float((imd.Imd(f)).EDS.S) for f in imd_files]\n\n # Count the number of simulations wich were succesful\n if (verbose): print(\"START file organization\")\n if (os.path.exists(in_simulation_dir)):\n succsessful_sim_count = 0\n print(\"all_omds: \", glob.glob(in_simulation_dir + \"/*.omd\"))\n successfull_files = []\n for omd_file_path in sorted(glob.glob(in_simulation_dir + \"/*.omd\"),\n key=lambda x: int(x.split(\"_\")[-1].replace(\".omd\", \"\"))):\n found_success = False\n for line in reversed(list(open(omd_file_path, \"r\"))):\n if \"successfully\" in line:\n succsessful_sim_count += 1\n found_success = True\n successfull_files.append(omd_file_path)\n break\n if (not found_success):\n print(\"Stop : \", succsessful_sim_count)\n break\n\n print(\"Successful sims: \" + str(succsessful_sim_count), \" out of \", len(s_values))\n print(\"Files: \", successfull_files)\n bash.make_folder(data_dir, additional_option=\"-p\")\n\n # organize simulatoin Files:\n if (os.path.exists(in_simulation_dir)):\n fM.project_concatenation(in_folder=in_simulation_dir, in_topology_path=in_topology_path,\n additional_properties=[\"eR\"] + [\"e\" + str(i) for i in range(1, num_states + 1)],\n in_imd=in_imd_path + \"_1.imd\", num_replicas=len(s_values[:succsessful_sim_count]),\n control_dict=control_dict, out_folder=data_dir, in_ene_ana_lib_path=in_ene_ana_lib,\n out_file_prefix=out_prefix, gromosPP_bin_dir=gromosPP_bin)\n\n elif (os.path.exists(data_dir) and os.path.exists(in_simulation_dir + \".tar.gz\")):\n cnfs = glob.glob(data_dir + \"/*.cnf\")\n succsessful_sim_count = len(cnfs)\n\n else:\n raise IOError(\"could not find simulation dir or analysis dir!\")\n\n if (verbose): print(\"START analysis\")\n # do sampling_plot\n out_analysis_plot_dir = out_analysis_dir + \"/plots\"\n bash.make_folder(out_analysis_plot_dir, \"-p\")\n ene_trajs = fM.parse_csv_energy_trajectories(data_dir, out_prefix) # gather potentials\n\n state_undersampling_pot_treshold = find_undersampling_pot_tresh(ene_traj_csvs=ene_trajs, sampling_fraction_treshold = undersampling_occurrence_fraction_threshold)\n\n sampling_analysis_results, out_plot_dirs = reeds.function_libs.analysis.sampling.detect_undersampling(out_path = out_analysis_plot_dir,\n ene_traj_csvs = ene_trajs, eoffs= [0 for _ in \n range(num_states)],\n s_values = s_values[:succsessful_sim_count],\n state_potential_treshold=state_undersampling_pot_treshold)\n\n # Plotting the different potential energy distributions\n if control_dict[\"pot_ene_by_state\"]:\n for i in range(num_states):\n outfile = out_analysis_plot_dir + '/' + system_name + '_pot_ene_state_' + str(i+1) + '.png'\n reeds.function_libs.visualization.pot_energy_plots.plot_energy_distribution_by_state(energy_trajs=ene_trajs, outfile=outfile, state_num=i + 1, s_values=s_values)\n \n if control_dict[\"pot_ene_by_replica\"]:\n for i in range(len(ene_trajs)):\n outfile = out_analysis_plot_dir + '/' + system_name + '_pot_ene_replica_' + str(i+1) + '.png'\n reeds.function_libs.visualization.pot_energy_plots.plot_energy_distribution_by_replica(traj_data=ene_trajs[i], outfile_path=outfile, replica_num=i + 1, s_value=s_values[i])\n \n if control_dict[\"plot_ref_timeseries\"]:\n outfile = out_analysis_plot_dir + '/' + system_name + '_ref_pot_ene_timeseries.png'\n reeds.function_libs.visualization.pot_energy_plots.plot_ref_pot_ene_timeseries(ene_trajs=ene_trajs, outfile=outfile, s_values=s_values)\n\n if control_dict[\"plot_ref_distrib\"]:\n outfile = out_analysis_plot_dir + '/' + system_name + '_ref_pot_ene_distrib.png'\n reeds.function_libs.visualization.pot_energy_plots.plot_ref_pot_energy_distribution(energy_trajs=ene_trajs, outfile=outfile, s_values=s_values)\n\n # plot the potential energy timeseries as a grid:\n if control_dict[\"plot_pot_ene_timeseries\"]:\n for i, ene_traj in enumerate(ene_trajs):\n out_path = out_analysis_plot_dir + '/' + system_name + '_pot_ene_timeseries_' + str(i+1) + '.png'\n title = 'Lower Bound Analysis potential energy timeseries - s = ' + str(s_values[i])\n reeds.function_libs.visualization.pot_energy_plots.plot_sampling_grid(traj_data = ene_traj, y_range = (-1000, 1000),\n out_path = out_path, title = title)\n\n # Preparing input for the energy offset run\n if (verbose): print(\"Start next folder\")\n out_analysis_next_dir = out_analysis_dir + \"/next\"\n bash.make_folder(out_analysis_next_dir, \"-p\")\n\n print(sampling_analysis_results)\n u_idx = sampling_analysis_results[\"undersamplingThreshold\"]\n # Make the new s-distribution based on this \n print(\"undersampling found after replica: \" + str(u_idx) + ' with s = ' + str(s_values[u_idx])) \n print('New s distribution will place ' + str(num_states) + ' replicas between s = ' + str(s_values[u_idx]) + ' and s = ' +str(s_values[u_idx+3]))\n\n if(not final_number_of_replicas is None):\n new_sdist = list(s_log_dist.get_log_s_distribution_between(s_values[0],s_values[u_idx-2], final_number_of_replicas-num_states))\n lower_sdist = list(s_log_dist.get_log_s_distribution_between(s_values[u_idx-1], s_values[u_idx], num_states))\n new_sdist.extend(lower_sdist)\n else:\n new_sdist = s_values[:u_idx-2]\n lower_sdist = s_log_dist.get_log_s_distribution_between(s_values[u_idx-1], s_values[u_idx], num_states)\n new_sdist.extend(lower_sdist)\n\n # Write the s-values to a csv file\n out_file = open(out_analysis_next_dir + \"/s_vals.csv\", \"w\")\n out_file.write(\"\\t\".join(list(map(str, new_sdist))))\n out_file.write(\"\\n\")\n out_file.close()\n\n # Write the potential energy thresholds to a csv file\n out_file = open(out_analysis_next_dir + \"/state_occurence_pot_thresh.csv\", \"w\")\n out_file.write(\"\\t\".join(map(str, sampling_analysis_results[\"potentialThreshold\"])))\n out_file.write(\"\\n\")\n out_file.close()\n\n # Coordinates:\n cnfs = list(sorted(glob.glob(data_dir + \"/*.cnf\"), key=lambda x: int(x.split(\"_\")[-1].replace(\".cnf\", \"\"))))\n if(len(s_values) != len(cnfs)):\n fM.adapt_cnfs_to_new_sDistribution(in_old_svals=s_values[:u_idx], in_new_svals=new_sdist, in_cnf_files=cnfs[:u_idx], out_cnf_dir=out_analysis_next_dir, cnf_prefix=system_name+\"_lower_bound\")\n\n # compress out_trc/out_tre Files & simulation dir\n trx_files = glob.glob(data_dir + \"/*.tr?\")\n for trx in trx_files:\n bash.compress_gzip(in_path=trx)\n\n if (not os.path.exists(in_simulation_dir + \".tar.gz\") and os.path.exists(in_simulation_dir)):\n tar_sim_dir = bash.compress_tar(in_path=in_simulation_dir, gunzip_compression=True, )\n bash.wait_for_fileSystem(tar_sim_dir)\n bash.remove_file(in_simulation_dir, additional_options=\"-r\")\n" ]
[ [ "numpy.set_printoptions" ] ]
dominik-steenken/qiskit-aqua
[ "bba4c02040ccf45b066f67398407e3e6382458b4", "bba4c02040ccf45b066f67398407e3e6382458b4" ]
[ "qiskit/aqua/components/oracles/truth_table_oracle.py", "qiskit/aqua/algorithms/classical/exactlpsolver/exactlpsolver.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright 2018 IBM.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"\nThe Truth Table-based Quantum Oracle.\n\"\"\"\n\nimport logging\nimport operator\nimport math\nimport numpy as np\nfrom functools import reduce\n\nfrom dlx import DLX\nfrom pyeda.inter import exprvars, And, Xor\nfrom qiskit import QuantumRegister, QuantumCircuit\n\nfrom qiskit.aqua import AquaError\nfrom qiskit.aqua.circuits import ESOP\nfrom qiskit.aqua.components.oracles import Oracle\nfrom qiskit.aqua.utils.arithmetic import is_power_of_2\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_prime_implicants(ones=None, dcs=None):\n \"\"\"\n Compute all prime implicants for a truth table using the Quine-McCluskey Algorithm\n\n Args:\n ones (list of int): The list of integers corresponding to '1' outputs\n dcs (list of int): The list of integers corresponding to don't-cares\n\n Return:\n list of lists of int, representing all prime implicants\n \"\"\"\n\n def combine_terms(terms, num1s_dict=None):\n if num1s_dict is None:\n num1s_dict = {}\n for num in terms:\n num1s = bin(num).count('1')\n if not num1s in num1s_dict:\n num1s_dict[num1s] = [num]\n else:\n num1s_dict[num1s].append(num)\n\n new_implicants = {}\n new_num1s_dict = {}\n prime_dict = {mt: True for mt in sorted(terms)}\n cur_num1s, max_num1s = min(num1s_dict.keys()), max(num1s_dict.keys())\n while cur_num1s < max_num1s:\n if cur_num1s in num1s_dict and (cur_num1s + 1) in num1s_dict:\n for cur_term in sorted(num1s_dict[cur_num1s]):\n for next_term in sorted(num1s_dict[cur_num1s + 1]):\n if isinstance(cur_term, int):\n diff_mask = dc_mask = cur_term ^ next_term\n implicant_mask = cur_term & next_term\n elif isinstance(cur_term, tuple):\n if terms[cur_term][1] == terms[next_term][1]:\n diff_mask = terms[cur_term][0] ^ terms[next_term][0]\n dc_mask = diff_mask | terms[cur_term][1]\n implicant_mask = terms[cur_term][0] & terms[next_term][0]\n else:\n continue\n else:\n raise AquaError('Unexpected type: {}.'.format(type(cur_term)))\n if bin(diff_mask).count('1') == 1:\n prime_dict[cur_term] = False\n prime_dict[next_term] = False\n if isinstance(cur_term, int):\n cur_implicant = (cur_term, next_term)\n elif isinstance(cur_term, tuple):\n cur_implicant = tuple(sorted((*cur_term, *next_term)))\n else:\n raise AquaError('Unexpected type: {}.'.format(type(cur_term)))\n new_implicants[cur_implicant] = (\n implicant_mask,\n dc_mask\n )\n num1s = bin(implicant_mask).count('1')\n if not num1s in new_num1s_dict:\n new_num1s_dict[num1s] = [cur_implicant]\n else:\n if not cur_implicant in new_num1s_dict[num1s]:\n new_num1s_dict[num1s].append(cur_implicant)\n cur_num1s += 1\n return new_implicants, new_num1s_dict, prime_dict\n\n terms = ones + dcs\n cur_num1s_dict = None\n\n prime_implicants = []\n\n while True:\n next_implicants, next_num1s_dict, cur_prime_dict = combine_terms(terms, num1s_dict=cur_num1s_dict)\n for implicant in cur_prime_dict:\n if cur_prime_dict[implicant]:\n if isinstance(implicant, int):\n if implicant not in dcs:\n prime_implicants.append((implicant,))\n else:\n if not set.issubset(set(implicant), dcs):\n prime_implicants.append(implicant)\n if next_implicants:\n terms = next_implicants\n cur_num1s_dict = next_num1s_dict\n else:\n break\n\n return prime_implicants\n\n\ndef get_exact_covers(cols, rows, num_cols=None):\n \"\"\"\n Use Algorithm X to get all solutions to the exact cover problem\n\n https://en.wikipedia.org/wiki/Knuth%27s_Algorithm_X\n\n Args:\n cols (list of int): A list of integers representing the columns to be covered\n rows (list of list of int): A list of lists of integers representing the rows\n num_cols (int): The total number of columns\n\n Returns:\n All exact covers\n \"\"\"\n if num_cols is None:\n num_cols = max(cols) + 1\n ec = DLX([(c, 0 if c in cols else 1) for c in range(num_cols)])\n ec.appendRows([[c] for c in cols])\n ec.appendRows(rows)\n exact_covers = []\n for s in ec.solve():\n cover = []\n for i in s:\n cover.append(ec.getRowList(i))\n exact_covers.append(cover)\n return exact_covers\n\n\nclass TruthTableOracle(Oracle):\n\n CONFIGURATION = {\n 'name': 'TruthTableOracle',\n 'description': 'Truth Table Oracle',\n 'input_schema': {\n '$schema': 'http://json-schema.org/schema#',\n 'id': 'truth_table_oracle_schema',\n 'type': 'object',\n 'properties': {\n 'bitmaps': {\n \"type\": \"array\",\n \"default\": [],\n \"items\": {\n \"type\": \"string\"\n }\n },\n \"optimization\": {\n \"type\": \"string\",\n \"default\": \"off\",\n 'oneOf': [\n {\n 'enum': [\n 'off',\n 'qm-dlx'\n ]\n }\n ]\n },\n 'mct_mode': {\n 'type': 'string',\n 'default': 'basic',\n 'oneOf': [\n {\n 'enum': [\n 'basic',\n 'advanced',\n 'noancilla',\n ]\n }\n ]\n },\n },\n 'additionalProperties': False\n }\n }\n\n def __init__(self, bitmaps, optimization='off', mct_mode='basic'):\n \"\"\"\n Constructor for Truth Table-based Oracle\n\n Args:\n bitmaps (str or [str]): A single binary string or a list of binary strings representing the desired\n single- and multi-value truth table.\n optimization (str): Optimization mode to use for minimizing the circuit.\n Currently, besides no optimization ('off'), Aqua also supports a 'qm-dlx' mode,\n which uses the Quine-McCluskey algorithm to compute the prime implicants of the truth table,\n and then compute an exact cover to try to reduce the circuit.\n mct_mode (str): The mode to use when constructing multiple-control Toffoli.\n \"\"\"\n if isinstance(bitmaps, str):\n bitmaps = [bitmaps]\n\n self.validate(locals())\n super().__init__()\n\n self._mct_mode = mct_mode\n self._optimization = optimization\n\n self._bitmaps = bitmaps\n\n # check that the input bitmaps length is a power of 2\n if not is_power_of_2(len(bitmaps[0])):\n raise AquaError('Length of any bitmap must be a power of 2.')\n for bitmap in bitmaps[1:]:\n if not len(bitmap) == len(bitmaps[0]):\n raise AquaError('Length of all bitmaps must be the same.')\n self._nbits = int(math.log(len(bitmaps[0]), 2))\n self._num_outputs = len(bitmaps)\n\n esop_exprs = []\n for bitmap in bitmaps:\n esop_expr = self._get_esop_ast(bitmap)\n esop_exprs.append(esop_expr)\n\n self._esops = [\n ESOP(esop_expr, num_vars=self._nbits) for esop_expr in esop_exprs\n ] if esop_exprs else None\n\n self.construct_circuit()\n\n def _get_esop_ast(self, bitmap):\n v = exprvars('v', self._nbits)\n\n def binstr_to_vars(binstr):\n return [\n (~v[x[1] - 1] if x[0] == '0' else v[x[1] - 1])\n for x in zip(binstr, reversed(range(1, self._nbits + 1)))\n ][::-1]\n\n if self._optimization == 'off':\n expression = Xor(*[\n And(*binstr_to_vars(term)) for term in\n [np.binary_repr(idx, self._nbits) for idx, v in enumerate(bitmap) if v == '1']])\n else: # self._optimization == 'qm-dlx':\n ones = [i for i, v in enumerate(bitmap) if v == '1']\n if not ones:\n return ('const', 0,)\n dcs = [i for i, v in enumerate(bitmap) if v == '*' or v == '-' or v.lower() == 'x']\n pis = get_prime_implicants(ones=ones, dcs=dcs)\n cover = get_exact_covers(ones, pis)[-1]\n clauses = []\n for c in cover:\n if len(c) == 1:\n term = np.binary_repr(c[0], self._nbits)\n clause = And(*[\n v for i, v in enumerate(binstr_to_vars(term))\n ])\n elif len(c) > 1:\n c_or = reduce(operator.or_, c)\n c_and = reduce(operator.and_, c)\n _ = np.binary_repr(c_and ^ c_or, self._nbits)[::-1]\n clause = And(*[\n v for i, v in enumerate(binstr_to_vars(np.binary_repr(c_and, self._nbits))) if _[i] == '0'\n ])\n else:\n raise AquaError('Unexpected cover term size {}.'.format(len(c)))\n if clause:\n clauses.append(clause)\n expression = Xor(*clauses)\n\n raw_ast = expression.to_ast()\n idx_mapping = {\n u: v + 1 for u, v in zip(sorted(expression.usupport), [v.indices[0] for v in sorted(expression.support)])\n }\n\n if raw_ast[0] == 'and' or raw_ast[0] == 'or' or raw_ast[0] == 'xor':\n clauses = []\n for c in raw_ast[1:]:\n if c[0] == 'lit':\n clauses.append(('lit', (idx_mapping[c[1]]) if c[1] > 0 else (-idx_mapping[-c[1]])))\n elif (c[0] == 'or' or c[0] == 'and') and (raw_ast[0] != c[0]):\n clause = []\n for l in c[1:]:\n clause.append(('lit', (idx_mapping[l[1]]) if l[1] > 0 else (-idx_mapping[-l[1]])))\n clauses.append((c[0], *clause))\n else:\n raise AquaError('Unrecognized logic expression: {}'.format(raw_ast))\n elif raw_ast[0] == 'const' or raw_ast[0] == 'lit':\n return raw_ast\n else:\n raise AquaError('Unrecognized root expression type: {}.'.format(raw_ast[0]))\n ast = (raw_ast[0], *clauses)\n return ast\n\n @property\n def variable_register(self):\n return self._variable_register\n\n @property\n def ancillary_register(self):\n return self._ancillary_register\n\n @property\n def output_register(self):\n return self._output_register\n\n def construct_circuit(self):\n if self._circuit is not None:\n return self._circuit\n self._circuit = QuantumCircuit()\n self._output_register = QuantumRegister(self._num_outputs, name='o')\n if self._esops:\n for i, e in enumerate(self._esops):\n if e is not None:\n ci = e.construct_circuit(output_register=self._output_register, output_idx=i)\n self._circuit += ci\n self._variable_register = self._ancillary_register = None\n for qreg in self._circuit.qregs:\n if qreg.name == 'v':\n self._variable_register = qreg\n elif qreg.name == 'a':\n self._ancillary_register = qreg\n else:\n self._variable_register = QuantumRegister(self._nbits, name='v')\n self._ancillary_register = None\n self._circuit.add_register(self._variable_register, self._output_register)\n return self._circuit\n\n def evaluate_classically(self, measurement):\n assignment = [(var + 1) * (int(tf) * 2 - 1) for tf, var in zip(measurement[::-1], range(len(measurement)))]\n ret = [bitmap[int(measurement, 2)] == '1' for bitmap in self._bitmaps]\n if self._num_outputs == 1:\n return ret[0], assignment\n else:\n return ret, assignment\n", "# -*- coding: utf-8 -*-\n\n# Copyright 2018 IBM.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"The Exact LinearProblem algorithm.\"\"\"\n\nimport logging\n\nimport numpy as np\n\nfrom qiskit.aqua.algorithms import QuantumAlgorithm\nfrom qiskit.aqua import AquaError\n\nlogger = logging.getLogger(__name__)\n\n\nclass ExactLPsolver(QuantumAlgorithm):\n \"\"\"The Exact LinearProblem algorithm.\"\"\"\n\n CONFIGURATION = {\n 'name': 'ExactLPsolver',\n 'description': 'ExactLPsolver Algorithm',\n 'classical': True,\n 'input_schema': {\n '$schema': 'http://json-schema.org/schema#',\n 'id': 'ExactLPsolver_schema',\n 'type': 'object',\n 'properties': {\n },\n 'additionalProperties': False\n },\n 'problems': ['linear_system']\n }\n\n def __init__(self, matrix=None, vector=None):\n \"\"\"Constructor.\n\n Args:\n matrix (array): the input matrix of linear system of equations\n vector (array): the input vector of linear system of equations\n \"\"\"\n self.validate(locals())\n super().__init__()\n self._matrix = matrix\n self._vector = vector\n self._ret = {}\n\n @classmethod\n def init_params(cls, params, algo_input):\n \"\"\"\n Initialize via parameters dictionary and algorithm input instance\n Args:\n params: parameters dictionary\n algo_input: LinearSystemInput instance\n \"\"\"\n if algo_input is None:\n raise AquaError(\"LinearSystemInput instance is required.\")\n\n matrix = algo_input.matrix\n vector = algo_input.vector\n if not isinstance(matrix, np.ndarray):\n matrix = np.asarray(matrix)\n if not isinstance(vector, np.ndarray):\n vector = np.asarray(vector)\n\n if matrix.shape[0] != len(vector):\n raise ValueError(\"Input vector dimension does not match input \"\n \"matrix dimension!\")\n if matrix.shape[0] != matrix.shape[1]:\n raise ValueError(\"Input matrix must be square!\")\n\n return cls(matrix, vector)\n\n def _solve(self):\n self._ret['eigvals'] = np.linalg.eig(self._matrix)[0]\n self._ret['solution'] = list(np.linalg.solve(self._matrix, self._vector))\n\n def _run(self):\n \"\"\"\n Run the algorithm to compute eigenvalues and solution.\n Returns:\n Dictionary of results\n \"\"\"\n self._solve()\n return self._ret\n" ]
[ [ "numpy.binary_repr" ], [ "numpy.asarray", "numpy.linalg.eig", "numpy.linalg.solve" ] ]
lim0606/pytorch-generative-multisensory-network
[ "646404db3f6fdad0c6663b861be747c1032ec291" ]
[ "utils/colors.py" ]
[ "'''\nmiscellaneous functions: colors\n'''\nimport torch\n\ndef rgb2gray(img):\n #R = img[:, 0:1, :, :]\n #G = img[:, 1:2, :, :]\n #B = img[:, 2:3, :, :]\n\n ##gray = 0.2989 * R + 0.5870 * G + 0.1140 * B\n #gray = 0.2125 * R + 0.7154 * G + 0.0721 * B\n #return gray\n\n h, s, v = rgb_to_hsv(img)\n return v\n\n'''\ncopied and modified from https://github.com/enthought/Python-2.7.3/blob/master/Lib/colorsys.py\n'''\n#def rgb_to_hsv(r, g, b):\ndef rgb_to_hsv(img):\n batch_size, nchannels, nheight, nwidth = img.size()\n r = 255*img[:, 0:1, :, :].contiguous().view(-1)\n g = 255*img[:, 1:2, :, :].contiguous().view(-1)\n b = 255*img[:, 2:3, :, :].contiguous().view(-1)\n\n # get brightness\n maxc = torch.max(r, torch.max(g, b))\n minc = torch.min(r, torch.min(g, b))\n v = maxc\n\n # get mask\n #if minc == maxc:\n # return 0.0, 0.0, v\n mask = minc == maxc\n\n # get saturation\n s = (maxc-minc) / maxc\n s.masked_fill_(mask, 0.)\n\n # get hue\n rc = (maxc-r) / (maxc-minc)\n gc = (maxc-g) / (maxc-minc)\n bc = (maxc-b) / (maxc-minc)\n h = v.new_zeros(v.size())\n #if r == maxc:\n # h = bc-gc\n rmask = r == maxc\n h[rmask] = (bc-gc)[rmask]\n #elif g == maxc:\n # h = 2.0+rc-bc\n gmask = g == maxc\n h[gmask] = (2.0+rc-bc)[gmask]\n #else:\n # h = 4.0+gc-rc\n bmask = b == maxc\n h[bmask] = (4.0+gc-rc)[bmask]\n h = (h/6.0) % 1.0\n h.masked_fill_(mask, 0.)\n\n # reshape\n h = h.view(batch_size, 1, nheight, nwidth)\n s = s.view(batch_size, 1, nheight, nwidth)\n v = v.view(batch_size, 1, nheight, nwidth)\n return h, s, v\n" ]
[ [ "torch.min", "torch.max" ] ]
calvynsiong/OpenCVPractise
[ "e71ee4b78896c7a3ce05d0d0f5eef89ca5125971" ]
[ "Personal/masking.py" ]
[ "import cv2 as cv\nimport numpy as np\n\n\nimg = cv.imread(\"./Photos/cats.jpg\")\nblank = np.zeros(img.shape[:2], dtype=\"uint8\")\ncv.imshow(\"Base\", img)\ncv.imshow(\"blank\", blank)\n\n\n# Mask\n# mask = cv.circle(blank, (img.shape[1]//2, img.shape[0]//2+45,), 100, 255, -1)\nmask = cv.rectangle(blank, (30, 30), (300, 300), 255, -1)\ncv.imshow(\"mask\", mask)\n\nmasked = cv.bitwise_or(img, img, mask=mask)\ncv.imshow(\"Masked\", masked)\n\ncv.waitKey(0)\n" ]
[ [ "numpy.zeros" ] ]
thomcom/dask-cudf
[ "1a7f538e88fabff35859489221cbd2d8e7026076", "1a7f538e88fabff35859489221cbd2d8e7026076" ]
[ "dask_cudf/tests/test_groupby.py", "dask_cudf/tests/test_core.py" ]
[ "import numpy as np\nimport pandas as pd\nimport pytest\nfrom numba import cuda\n\nimport cudf as gd\nimport dask_cudf as dgd\n\n\ndef _gen_skewed_keys(nelem):\n \"\"\"Skewed keys to check a key will not split across multiple\n partitions even if the makes it unbalanced.\n \"\"\"\n skewed_size = int(nelem * 0.95)\n assert nelem > skewed_size\n reminaing_size = nelem - skewed_size\n\n xs = np.hstack(\n [\n np.random.randint(0, 2, size=skewed_size),\n np.random.randint(2, 10, size=reminaing_size),\n ]\n )\n\n np.random.shuffle(xs)\n return xs\n\n\ndef _gen_uniform_keys(nelem):\n xs = np.random.randint(0, 20, size=nelem)\n return xs\n\n\[email protected](reason=\"pandas/cudf groupbys are not consistent\")\[email protected](\"keygen\", [_gen_skewed_keys, _gen_uniform_keys])\ndef test_groupby_single_key(keygen):\n np.random.seed(0)\n\n nelem = 500\n npartitions = 10\n\n # Generate the keys\n xs = keygen(nelem)\n\n assert xs.size == nelem\n df = pd.DataFrame({\"x\": xs, \"z\": np.random.normal(size=nelem) + 1})\n gdf = gd.DataFrame.from_pandas(df)\n dgf = dgd.from_cudf(gdf, npartitions=npartitions)\n\n groups = dgf.groupby(by=[\"x\"]).count()\n got = groups.compute().to_pandas()\n\n # Check against expectation\n expect = df.groupby(by=[\"x\"], as_index=False).count()\n # Check keys\n np.testing.assert_array_equal(got.x, expect.x)\n # Check values\n np.testing.assert_array_equal(got.count_z, expect.z)\n\n\[email protected](reason=\"pandas/cudf groupbys are not consistent\")\[email protected](\"keygen\", [_gen_skewed_keys, _gen_uniform_keys])\ndef test_groupby_multi_keys(keygen):\n np.random.seed(0)\n\n nelem = 500\n npartitions = 10\n\n # Generate the keys\n xs = keygen(nelem)\n ys = keygen(nelem)\n\n assert xs.size == nelem\n assert ys.size == nelem\n df = pd.DataFrame({\"x\": xs, \"y\": ys, \"z\": np.random.normal(size=nelem) + 1})\n\n gdf = gd.DataFrame.from_pandas(df)\n dgf = dgd.from_cudf(gdf, npartitions=npartitions)\n\n groups = dgf.groupby(by=[\"x\", \"y\"]).count()\n got = groups.compute().to_pandas()\n\n # Check against expectation\n expect = df.groupby(by=[\"x\", \"y\"], as_index=False).count()\n # Check keys\n np.testing.assert_array_equal(got.x, expect.x)\n np.testing.assert_array_equal(got.y, expect.y)\n # Check values\n np.testing.assert_array_equal(got.count_z, expect.z)\n\n\ndef check_groupby_agg(agg):\n np.random.seed(0)\n\n nelem = 100\n npartitions = 3\n xs = _gen_uniform_keys(nelem)\n df = pd.DataFrame(\n {\n \"x\": xs,\n \"v1\": np.random.normal(size=nelem),\n \"v2\": np.random.normal(size=nelem),\n }\n )\n\n gdf = gd.DataFrame.from_pandas(df)\n dgf = dgd.from_cudf(gdf, npartitions=npartitions)\n\n gotgroup = dgf.groupby(by=\"x\")\n expgroup = df.groupby(by=\"x\", as_index=False)\n\n def add_prefix(x):\n return \"_\".join([agg, x])\n\n got = getattr(gotgroup, agg)().compute().to_pandas()\n exp = getattr(expgroup, agg)()\n np.testing.assert_array_almost_equal(getattr(got, add_prefix(\"v1\")), exp.v1)\n np.testing.assert_array_almost_equal(getattr(got, add_prefix(\"v2\")), exp.v2)\n\n\[email protected](reason=\"pandas/cudf groupbys are not consistent\")\[email protected](\"agg\", [\"count\", \"sum\", \"max\", \"min\"])\ndef test_groupby_agg(agg):\n check_groupby_agg(agg)\n\n\[email protected](reason=\"Not implemented by cudf yet\")\[email protected](\"agg\", [\"mean\", \"std\"])\ndef test_groupby_harder_agg(agg):\n check_groupby_agg(agg)\n\n\[email protected](\n reason=\"Fix needed from \\\n https://github.com/gpuopenanalytics/dask_cudf/pull/26\"\n)\[email protected](reason=\"Groupby apply not implemented in libgdf\")\ndef test_groupby_apply_grouped():\n np.random.seed(0)\n\n nelem = 100\n xs = _gen_uniform_keys(nelem)\n ys = _gen_uniform_keys(nelem)\n df = pd.DataFrame(\n {\n \"x\": xs,\n \"y\": ys,\n \"idx\": np.arange(nelem),\n \"v1\": np.random.normal(size=nelem),\n \"v2\": np.random.normal(size=nelem),\n }\n )\n\n gdf = gd.DataFrame.from_pandas(df)\n dgf = dgd.from_cudf(gdf, npartitions=2)\n\n def transform(y, v1, v2, out1):\n for i in range(cuda.threadIdx.x, y.size, cuda.blockDim.x):\n out1[i] = y[i] * (v1[i] + v2[i])\n\n grouped = dgf.groupby(by=[\"x\", \"y\"]).apply_grouped(\n transform, incols=[\"y\", \"v1\", \"v2\"], outcols={\"out1\": np.float64}\n )\n\n # Compute with dask\n dgd_grouped = grouped.compute().to_pandas()\n binning = {}\n for _, row in dgd_grouped.iterrows():\n binning[row.idx] = row\n\n # Emulate the operation with pandas\n def emulate(df):\n df[\"out1\"] = df.y * (df.v1 + df.v2)\n return df\n\n pd_groupby = df.groupby(by=[\"x\", \"y\"], sort=True, as_index=True).apply(emulate)\n\n # Check the result\n for _, expect in pd_groupby.iterrows():\n got = binning[expect.idx]\n\n attrs = [\"x\", \"y\", \"v1\", \"v2\", \"out1\"]\n for a in attrs:\n np.testing.assert_equal(getattr(got, a), getattr(expect, a))\n\n\[email protected](\n reason=\"Fix needed from \\\n https://github.com/gpuopenanalytics/dask_cudf/pull/26\"\n)\[email protected](reason=\"Groupby apply not implemented in libgdf\")\ndef test_groupby_apply():\n np.random.seed(0)\n\n nelem = 100\n xs = _gen_uniform_keys(nelem)\n ys = _gen_uniform_keys(nelem)\n df = pd.DataFrame(\n {\n \"x\": xs,\n \"y\": ys,\n \"idx\": np.arange(nelem),\n \"v1\": np.random.normal(size=nelem),\n \"v2\": np.random.normal(size=nelem),\n }\n )\n\n gdf = gd.DataFrame.from_pandas(df)\n dgf = dgd.from_cudf(gdf, npartitions=2)\n\n def transform(df):\n df[\"out1\"] = df.y * (df.v1 + df.v2)\n return df\n\n grouped = dgf.groupby(by=[\"x\", \"y\"]).apply(transform)\n\n # Compute with dask\n dgd_grouped = grouped.compute().to_pandas()\n binning = {}\n for _, row in dgd_grouped.iterrows():\n binning[row.idx] = row\n\n # Emulate the operation with pandas\n pd_groupby = df.groupby(by=[\"x\", \"y\"], sort=True, as_index=True).apply(transform)\n\n # Check the result\n for _, expect in pd_groupby.iterrows():\n got = binning[expect.idx]\n\n attrs = [\"x\", \"y\", \"v1\", \"v2\", \"out1\"]\n for a in attrs:\n np.testing.assert_equal(getattr(got, a), getattr(expect, a))\n\n\[email protected](\n reason=\"Fix needed from \\\n https://github.com/gpuopenanalytics/dask_cudf/pull/26\"\n)\ndef test_repeated_groupby():\n np.random.seed(0)\n\n nelem = 100\n df = pd.DataFrame()\n df[\"a\"] = _gen_uniform_keys(nelem)\n df[\"b\"] = _gen_uniform_keys(nelem)\n\n ref_df = gd.DataFrame.from_pandas(df)\n df = dgd.from_cudf(ref_df, npartitions=3)\n assert df.known_divisions\n\n df2 = df.groupby(\"a\").apply(lambda x: x)\n assert not df2.known_divisions\n\n got = df2.groupby(\"a\").apply(lambda x: x).compute().to_pandas()\n expect = ref_df.groupby(\"a\").apply(lambda x: x).to_pandas()\n\n def sort_content(df):\n return sorted(list(df.b))\n\n got = got.groupby(\"a\").apply(sort_content)\n expect = expect.groupby(\"a\").apply(sort_content)\n\n pd.util.testing.assert_series_equal(got, expect)\n\n\[email protected](reason=\"pandas/cudf groupbys are not consistent\")\[email protected](\"nelem\", [50, 100, 1000])\[email protected](\"npart\", [3, 4, 5, 10])\ndef test_groupby_tree_reduce_max(nelem, npart):\n np.random.seed(0)\n df = pd.DataFrame()\n df[\"a\"] = _gen_uniform_keys(nelem)\n df[\"b\"] = _gen_uniform_keys(nelem)\n\n ref_df = gd.DataFrame.from_pandas(df)\n dgf = dgd.from_cudf(ref_df, npartitions=npart)\n got = dgf.groupby(\"a\").max().compute().to_pandas()\n expect = df.groupby(\"a\", as_index=False).max()\n\n pd.util.testing.assert_series_equal(expect.a, got.a)\n pd.util.testing.assert_series_equal(expect.b, got.max_b, check_names=False)\n\n\[email protected](\"nelem\", [50, 100, 1000])\[email protected](\"npart\", [3, 4, 5, 10])\ndef test_groupby_tree_reduce_multi_agg(nelem, npart):\n np.random.seed(0)\n df = pd.DataFrame()\n df[\"a\"] = _gen_uniform_keys(nelem)\n df[\"b\"] = _gen_uniform_keys(nelem)\n df[\"c\"] = _gen_uniform_keys(nelem)\n\n ref_df = gd.DataFrame.from_pandas(df)\n dgf = dgd.from_cudf(ref_df, npartitions=npart)\n got = dgf.groupby(\"a\").agg({\"b\": \"max\", \"c\": \"min\"}).compute().to_pandas()\n expect = df.groupby(\"a\", as_index=False).agg({\"b\": \"max\", \"c\": \"min\"})\n\n pd.util.testing.assert_series_equal(expect.a, got.a)\n pd.util.testing.assert_series_equal(expect.b, got.max_b, check_names=False)\n pd.util.testing.assert_series_equal(expect.c, got.min_c, check_names=False)\n", "import dask\nimport dask.dataframe as dd\nimport numpy as np\nimport pandas as pd\nimport pandas.util.testing as tm\nimport pytest\nfrom pandas.util.testing import assert_frame_equal\n\nimport cudf\nimport dask_cudf\nimport dask_cudf as dgd\n\n\ndef test_from_cudf():\n np.random.seed(0)\n\n df = pd.DataFrame(\n {\"x\": np.random.randint(0, 5, size=10000), \"y\": np.random.normal(size=10000)}\n )\n\n gdf = cudf.DataFrame.from_pandas(df)\n\n # Test simple around to/from dask\n ingested = dgd.from_cudf(gdf, npartitions=2)\n assert_frame_equal(ingested.compute().to_pandas(), df)\n\n # Test conversion to dask.dataframe\n ddf = ingested.to_dask_dataframe()\n assert_frame_equal(ddf.compute(), df)\n\n\ndef _fragmented_gdf(df, nsplit):\n n = len(df)\n\n # Split dataframe in *nsplit*\n subdivsize = n // nsplit\n starts = [i * subdivsize for i in range(nsplit)]\n ends = starts[1:] + [None]\n frags = [df[s:e] for s, e in zip(starts, ends)]\n return frags\n\n\ndef test_concat():\n np.random.seed(0)\n\n n = 1000\n df = pd.DataFrame(\n {\"x\": np.random.randint(0, 5, size=n), \"y\": np.random.normal(size=n)}\n )\n\n gdf = cudf.DataFrame.from_pandas(df)\n frags = _fragmented_gdf(gdf, nsplit=13)\n\n # Combine with concat\n concated = dgd.concat(frags)\n assert_frame_equal(df, concated.compute().to_pandas())\n\n\ndef test_append():\n np.random.seed(0)\n\n n = 1000\n df = pd.DataFrame(\n {\"x\": np.random.randint(0, 5, size=n), \"y\": np.random.normal(size=n)}\n )\n\n gdf = cudf.DataFrame.from_pandas(df)\n frags = _fragmented_gdf(gdf, nsplit=13)\n\n # Combine with .append\n head = frags[0]\n tail = frags[1:]\n\n appended = dgd.from_cudf(head, npartitions=1)\n for each in tail:\n appended = appended.append(each)\n\n assert_frame_equal(df, appended.compute().to_pandas())\n\n\ndef test_series_concat():\n np.random.seed(0)\n\n n = 1000\n df = pd.DataFrame(\n {\"x\": np.random.randint(0, 5, size=n), \"y\": np.random.normal(size=n)}\n )\n\n gdf = cudf.DataFrame.from_pandas(df)\n frags = _fragmented_gdf(gdf, nsplit=13)\n\n frags = [df.x for df in frags]\n\n concated = dgd.concat(frags).compute().to_pandas()\n assert isinstance(concated, pd.Series)\n np.testing.assert_array_equal(concated, df.x)\n\n\ndef test_series_append():\n np.random.seed(0)\n\n n = 1000\n df = pd.DataFrame(\n {\"x\": np.random.randint(0, 5, size=n), \"y\": np.random.normal(size=n)}\n )\n\n gdf = cudf.DataFrame.from_pandas(df)\n frags = _fragmented_gdf(gdf, nsplit=13)\n\n frags = [df.x for df in frags]\n\n appending = dgd.from_cudf(frags[0], npartitions=1)\n for frag in frags[1:]:\n appending = appending.append(frag)\n\n appended = appending.compute().to_pandas()\n assert isinstance(appended, pd.Series)\n np.testing.assert_array_equal(appended, df.x)\n\n\ndef test_query():\n np.random.seed(0)\n\n df = pd.DataFrame(\n {\"x\": np.random.randint(0, 5, size=10), \"y\": np.random.normal(size=10)}\n )\n gdf = cudf.DataFrame.from_pandas(df)\n expr = \"x > 2\"\n\n assert_frame_equal(gdf.query(expr).to_pandas(), df.query(expr))\n\n queried = dgd.from_cudf(gdf, npartitions=2).query(expr)\n\n got = queried.compute().to_pandas()\n expect = gdf.query(expr).to_pandas()\n\n assert_frame_equal(got, expect)\n\n\ndef test_head():\n np.random.seed(0)\n df = pd.DataFrame(\n {\"x\": np.random.randint(0, 5, size=100), \"y\": np.random.normal(size=100)}\n )\n gdf = cudf.DataFrame.from_pandas(df)\n dgf = dgd.from_cudf(gdf, npartitions=2)\n\n assert_frame_equal(dgf.head().to_pandas(), df.head())\n\n\ndef test_from_dask_dataframe():\n np.random.seed(0)\n df = pd.DataFrame(\n {\"x\": np.random.randint(0, 5, size=20), \"y\": np.random.normal(size=20)}\n )\n ddf = dd.from_pandas(df, npartitions=2)\n dgdf = dgd.from_dask_dataframe(ddf)\n got = dgdf.compute().to_pandas()\n expect = df\n\n np.testing.assert_array_equal(got.index.values, expect.index.values)\n np.testing.assert_array_equal(got.x.values, expect.x.values)\n np.testing.assert_array_equal(got.y.values, expect.y.values)\n\n\[email protected](\"nelem\", [10, 200, 1333])\ndef test_set_index(nelem):\n with dask.config.set(scheduler=\"single-threaded\"):\n np.random.seed(0)\n # Use unique index range as the sort may not be stable-ordering\n x = np.arange(nelem)\n np.random.shuffle(x)\n df = pd.DataFrame({\"x\": x, \"y\": np.random.randint(0, nelem, size=nelem)})\n ddf = dd.from_pandas(df, npartitions=2)\n dgdf = dgd.from_dask_dataframe(ddf)\n\n expect = ddf.set_index(\"x\").compute()\n got = dgdf.set_index(\"x\").compute().to_pandas()\n\n np.testing.assert_array_equal(got.index.values, expect.index.values)\n np.testing.assert_array_equal(got.y.values, expect.y.values)\n assert got.columns == expect.columns\n\n\ndef assert_frame_equal_by_index_group(expect, got):\n assert sorted(expect.columns) == sorted(got.columns)\n assert sorted(set(got.index)) == sorted(set(expect.index))\n # Note the set_index sort is not stable,\n unique_values = sorted(set(got.index))\n for iv in unique_values:\n sr_expect = expect.loc[[iv]]\n sr_got = got.loc[[iv]]\n\n for k in expect.columns:\n # Sort each column before we compare them\n sorted_expect = sr_expect.sort_values(k)[k]\n sorted_got = sr_got.sort_values(k)[k]\n np.testing.assert_array_equal(sorted_expect, sorted_got)\n\n\[email protected](\"nelem\", [10, 200, 1333])\ndef test_set_index_2(nelem):\n with dask.config.set(scheduler=\"single-threaded\"):\n np.random.seed(0)\n df = pd.DataFrame(\n {\n \"x\": 100 + np.random.randint(0, nelem // 2, size=nelem),\n \"y\": np.random.normal(size=nelem),\n }\n )\n expect = df.set_index(\"x\").sort_index()\n\n dgf = dgd.from_cudf(cudf.DataFrame.from_pandas(df), npartitions=4)\n res = dgf.set_index(\"x\") # sort by default\n got = res.compute().to_pandas()\n\n assert_frame_equal_by_index_group(expect, got)\n\n\ndef test_set_index_w_series():\n with dask.config.set(scheduler=\"single-threaded\"):\n nelem = 20\n np.random.seed(0)\n df = pd.DataFrame(\n {\n \"x\": 100 + np.random.randint(0, nelem // 2, size=nelem),\n \"y\": np.random.normal(size=nelem),\n }\n )\n expect = df.set_index(df.x).sort_index()\n\n dgf = dgd.from_cudf(cudf.DataFrame.from_pandas(df), npartitions=4)\n res = dgf.set_index(dgf.x) # sort by default\n got = res.compute().to_pandas()\n\n assert set(expect.columns) == set(got.columns)\n assert_frame_equal_by_index_group(expect, got)\n\n\ndef test_assign():\n np.random.seed(0)\n df = pd.DataFrame(\n {\"x\": np.random.randint(0, 5, size=20), \"y\": np.random.normal(size=20)}\n )\n\n dgf = dgd.from_cudf(cudf.DataFrame.from_pandas(df), npartitions=2)\n pdcol = pd.Series(np.arange(20) + 1000)\n newcol = dgd.from_cudf(cudf.Series(pdcol), npartitions=dgf.npartitions)\n out = dgf.assign(z=newcol)\n\n got = out.compute().to_pandas()\n assert_frame_equal(got.loc[:, [\"x\", \"y\"]], df)\n np.testing.assert_array_equal(got[\"z\"], pdcol)\n\n\[email protected](\"data_type\", [\"int8\", \"int16\", \"int32\", \"int64\"])\ndef test_setitem_scalar_integer(data_type):\n np.random.seed(0)\n scalar = np.random.randint(0, 100, dtype=data_type)\n df = pd.DataFrame(\n {\"x\": np.random.randint(0, 5, size=20), \"y\": np.random.normal(size=20)}\n )\n dgf = dgd.from_cudf(cudf.DataFrame.from_pandas(df), npartitions=2)\n\n df[\"z\"] = scalar\n dgf[\"z\"] = scalar\n\n got = dgf.compute().to_pandas()\n np.testing.assert_array_equal(got[\"z\"], df[\"z\"])\n\n\[email protected](\"data_type\", [\"float32\", \"float64\"])\ndef test_setitem_scalar_float(data_type):\n np.random.seed(0)\n scalar = np.random.randn(1).astype(data_type)[0]\n df = pd.DataFrame(\n {\"x\": np.random.randint(0, 5, size=20), \"y\": np.random.normal(size=20)}\n )\n dgf = dgd.from_cudf(cudf.DataFrame.from_pandas(df), npartitions=2)\n\n df[\"z\"] = scalar\n dgf[\"z\"] = scalar\n\n got = dgf.compute().to_pandas()\n np.testing.assert_array_equal(got[\"z\"], df[\"z\"])\n\n\ndef test_setitem_scalar_datetime():\n np.random.seed(0)\n scalar = np.int64(np.random.randint(0, 100)).astype(\"datetime64[ms]\")\n df = pd.DataFrame(\n {\"x\": np.random.randint(0, 5, size=20), \"y\": np.random.normal(size=20)}\n )\n dgf = dgd.from_cudf(cudf.DataFrame.from_pandas(df), npartitions=2)\n\n df[\"z\"] = scalar\n dgf[\"z\"] = scalar\n\n got = dgf.compute().to_pandas()\n np.testing.assert_array_equal(got[\"z\"], df[\"z\"])\n\n\[email protected](\n \"func\",\n [\n lambda: tm.makeDataFrame().reset_index(),\n # tm.makeDataFrame(),\n tm.makeMixedDataFrame,\n tm.makeObjectSeries,\n tm.makeTimeSeries,\n ],\n)\ndef test_repr(func):\n pdf = func()\n if isinstance(pdf, pd.DataFrame):\n gdf = cudf.DataFrame.from_pandas(pdf)\n else:\n gdf = cudf.Series.from_pandas(pdf)\n # gddf = dd.from_pandas(gdf, npartitions=3, sort=False) # TODO\n gddf = dask_cudf.from_cudf(gdf, npartitions=3, sort=False)\n\n assert repr(gddf)\n if hasattr(pdf, \"_repr_html_\"):\n assert gddf._repr_html_()\n" ]
[ [ "numpy.random.seed", "numpy.arange", "pandas.util.testing.assert_series_equal", "pandas.DataFrame", "numpy.random.shuffle", "numpy.testing.assert_array_equal", "numpy.random.normal", "numpy.random.randint" ], [ "numpy.random.seed", "numpy.arange", "numpy.random.shuffle", "numpy.testing.assert_array_equal", "pandas.util.testing.assert_frame_equal", "numpy.random.normal", "numpy.random.randn", "pandas.util.testing.makeDataFrame", "numpy.random.randint" ] ]
Sandeep-777/Triangulation
[ "00ac74f997ff025bbb99f2d25fdd4cbcef8397d7" ]
[ "PCD_WRITE.py" ]
[ "from numpy import transpose\nfrom Read_files import read_vtk2arr\n\n\ndef pcd_write(data, length, filename):\n my_file = open(filename, 'w')\n ptx = data[0]\n pty = data[1]\n ptz = data[2]\n comment = '# .PCD v.5 - Point Cloud Data file format\\n'\n header = 'VERSION .5\\n' + 'FIELDS x y z\\n' + 'SIZE 4 4 4\\n' + 'TYPE F F F\\n' + 'COUNT 1 1 1\\n' + \\\n 'WIDTH ' + str(length) + '\\nHEIGHT 1\\n' + 'POINTS ' + str(length) + '\\nDATA ascii\\n'\n my_file.write(comment)\n my_file.write(header)\n for j in range(length):\n if j != length:\n my_file.write('' + str(ptx[j]) + ' ' + str(pty[j]) + ' ' + str(ptz[j]) + '\\n')\n else:\n my_file.write('' + str(ptx[j]) + ' ' + str(pty[j]) + ' ' + str(ptz[j]))\n my_file.close()\n\n\ndef pcd_write_color(data, color, length, filename):\n my_file = open(filename, 'w')\n ptx = data[0]\n pty = data[1]\n ptz = data[2]\n blue = color[0]\n green = color[1]\n red = color[2]\n comment = '# .PCD v.7 - Point Cloud Data file format\\n'\n header = 'VERSION .7\\n' + 'FIELDS x y z rgb\\n' + 'SIZE 4 4 4 4\\n' + 'TYPE F F F F\\n' + 'COUNT 1 1 1 1\\n' + \\\n 'WIDTH ' + str(length) + '\\nHEIGHT 1\\n' 'VIEWPOINT 0 0 0 1 0 0 0\\n' + 'POINTS ' + str(length) + \\\n '\\nDATA ascii\\n'\n my_file.write(comment)\n my_file.write(header)\n for j in range(length):\n\n rgb = rgb2float(int(red[j]), int(green[j]), int(blue[j]))\n if j != length:\n my_file.write('' + str(ptx[j]) + ' ' + str(pty[j]) + ' ' + str(ptz[j]) + ' ' + str(rgb) + '\\n')\n else:\n my_file.write('' + str(ptx[j]) + ' ' + str(pty[j]) + ' ' + str(ptz[j]) + ' ' + str(rgb))\n my_file.close()\n\n\ndef rgb2float(r, g, b, a=0):\n return float(a << 24 | r << 16 | g << 8 | b)\n\n\ndef make_ply_file(filename, vertex, color, poly):\n my_file = open(filename, 'w')\n\n ptx = vertex[0]\n pty = vertex[1]\n ptz = vertex[2]\n\n clr = color[2]\n clg = color[1]\n clb = color[0]\n\n pt_no = len(transpose(color))\n poly_no = len(poly)\n\n header = 'ply\\n' + 'format ascii 1.0\\n' + 'comment author: Sandy\\n' + 'comment object: 3D surface\\n' + \\\n 'element vertex ' + str(len(transpose(color))) + '\\n' + 'property float x\\nproperty float y\\nproperty float z\\n' + \\\n 'property uchar red\\nproperty uchar green\\nproperty uchar blue\\n' + 'element face ' + str(len(poly)) + \\\n '\\nproperty list uchar int vertex_indices\\n' + 'end_header\\n'\n\n my_file.write(header)\n for j in range(pt_no):\n my_file.write(str(ptx[j]) + ' ' + str(pty[j]) + ' ' + str(ptz[j]) + ' ' + str(clr[j]) + ' ' +\n str(clg[j]) + ' ' + str(clb[j]) + '\\n')\n for j in range(poly_no):\n if j != poly_no:\n my_file.write(str(poly[j][0]) + ' ' + str(poly[j][1]) + ' ' + str(poly[j][2]) + ' ' + str(poly[j][3]) +\n '\\n')\n my_file.close()\n\n\ndef make_obj_file(filename, vertex, color, normal, poly):\n my_file = open(filename, 'w')\n\n clr = color[2]\n clg = color[1]\n clb = color[0]\n\n pt_no = len(transpose(vertex)) - 1\n poly_no = len(poly) - 1\n\n for j in range(pt_no):\n color_r = float(clr[j])\n color_g = float(clg[j])\n color_b = float(clb[j])\n color_r /= 255\n color_g /= 255\n color_b /= 255\n my_file.write('vn ' + \"{0:.6f}\".format(normal[0][j]) + ' ' + \"{0:.6f}\".format(normal[1][j]) + ' ' + \"{0:.6f}\".format(normal[2][j]) + '\\nv ' +\n \"{0:.6f}\".format(vertex[0][j]) + ' ' + \"{0:.6f}\".format(vertex[1][j]) + ' ' + \"{0:.6f}\".format(vertex[2][j]) + ' ' + \"{0:.6f}\".format(color_r) +\n ' ' + \"{0:.6f}\".format(color_g) + ' ' + \"{0:.6f}\".format(color_b) + '\\n')\n for j in range(poly_no):\n if j != poly_no:\n my_file.write('f ' + str(poly[j][1]) + '//' + str(poly[j][1]) + ' ' + str(poly[j][2]) + '//' +\n str(poly[j][2]) + ' ' + str(poly[j][3]) + '//' + str(poly[j][3]) + '\\n')\n else:\n my_file.write(str(poly[j][0]) + ' ' + str(poly[j][1]) + ' ' + str(poly[j][2]) + ' ' + str(poly[j][3]))\n my_file.close()\n\n\ndef make_obj_point(filename, vertex, color):\n my_file = open(filename, 'w')\n\n clr = color[2]\n clg = color[1]\n clb = color[0]\n\n pt_no = len(transpose(vertex)) - 1\n\n for j in range(pt_no):\n color_r = float(clr[j])\n color_g = float(clg[j])\n color_b = float(clb[j])\n color_r /= 255\n color_g /= 255\n color_b /= 255\n my_file.write('v ' + \"{0:.6f}\".format(vertex[0][j]) + ' ' + \"{0:.6f}\".format(vertex[1][j]) + ' ' +\n \"{0:.6f}\".format(vertex[2][j]) + ' ' + \"{0:.6f}\".format(color_r) +\n ' ' + \"{0:.6f}\".format(color_g) + ' ' + \"{0:.6f}\".format(color_b) + '\\n')\n my_file.close()\n\npoly = read_vtk2arr('color.vtk')\n\n" ]
[ [ "numpy.transpose" ] ]
nicole-dwenger/DDB_Tagger
[ "190c7f4d378c319e0f7553a8f7dd775d394087fc" ]
[ "src/DDB_tagger.py" ]
[ "\"\"\"\nSemantic Tagger using Danske Begrebsordbog.\nThis script can be used in three ways:\n\n1 Import DDB_Tagger Class in a notebook to tag a text:\n - from DDB_Tagger import DDB_Tagger # import class\n - Tagger = DDB_Tagger(dict=\"dict/dict.pkl, da_model=\"spacy\") # initialise tagger\n - results = Tagger.tag_text(input=\"this is your text\", input_file=False, only_tagged_results=False)\n\n2 Import DDB_Tagger Class in a notebook to all texts in a given directory:\n - from DDB_Tagger import DDB_Tagger # import class\n - Tagger = DDB_Tagger(dict=\"dict/dict.pkl, da_model=\"spacy\") # initialise tagger\n - results = Tagger.tag_directory(input_path=\"in/\", output_path=\"out/\", only_tagged_results=False)\n \n2 Run the script to tag all texts in a given dictionary:\n - from DDB_Tagger directory run from Terminal: python3 src/DDB_Tagger.py --input_directory \"in/\" --output_directory out/ --da_model \"spacy\" --dict \"dict/dict.pkl\"\n - Results will be saved in --output_directory\n\nBoth ways require that:\n - Requirements are installed install_requirements.sh\n - DDB dictionary is stored as pickle file in dict/dict.pkl\n\"\"\"\n\n# --- DEPENDENCIES ---\n\nimport os, sys, argparse, glob\nfrom tqdm import tqdm\nimport time\nimport pickle\nimport pandas as pd\nimport numpy as np\nfrom itertools import chain\n\n\n# --- HELPER FUNCTIONS ---\n\ndef jaccard_distance(context_tokens: list[str], category_tokens: list[str]):\n \"\"\"Simple function for calculating Jaccard Distance\n\n Args:\n context_tokens (list[str]): List of context words (i.e. target words)\n category_tokens (list[str]): List of words in category\n\n Returns:\n [float]: Value of jaccard distance\n\n Info:\n s1: The set of all words+POS ±5 from target word\n s2: The set of all words+POS from the top-level DDB category\n\n JD = (s1 ∪ s2) - (s1 ∩ s2) / (s1 ∪ s2)\n\n This can also be calculated using 1 - (s1 ∩ s2) / (s1 ∪ s2), where the \n latter expression is the Jaccard similarity.\n \"\"\"\n\n s1 = set(context_tokens)\n s2 = set(category_tokens)\n union = len(s1.union(s2))\n intersection = len(s1.intersection(s2))\n return (union-intersection) / union\n\n\n# --- DDB TAGGER CLASS ---\n\nclass DDB_tagger:\n\n def __init__(self, dict: str=\"dict/dict.pkl\", da_model: str=\"spacy\"):\n \"\"\"Initializing Semantic Tagger using Den Danske Begrebsordbog.\n\n Args:\n dict (str, optional): Path to semantic dictionary. Defaults to \"dict/dict.pkl\".\n da_model (str, optional): Danish Language Model to use, \"spacy\" or \"dacy\". Defaults to \"spacy\".\n \"\"\" \n\n # Save the base path of the file to keep paths relative\n self.base_path = os.path.dirname(__file__)\n\n # Load DDB dictionary\n self.DDB_dict = pickle.load(open(os.path.join(self.base_path, \"..\", dict), \"rb\"))\n\n # Load Danish language model\n if da_model == \"spacy\":\n import spacy\n self.nlp = spacy.load(\"da_core_news_sm\") # could be changed\n\n elif da_model == \"dacy\":\n import dacy\n self.nlp = dacy.load(\"medium\") # could be changed\n\n\n def tag_text(self, input:str, input_file: bool=False, only_top3_results: bool=True, only_tagged_results: bool=False):\n \"\"\"Processing and tagging a text using Den Danske Begrebsordbog.\n\n Args:\n input (str): String of input text or path to input file. \n input_file (bool, optional): Defines whether input is path (True) to input file or string of text (False). Defaults to False.\n only_top3_results (bool, optional): Defines whether only the top3 tags or all should be in the results. Defaults to True.\n only_tagged_results (bool, optional): Defines whether results should only contain tags (True), or also scores (False). Defaults to False.\n \"\"\" \n\n # --- PREPARE TEXT --- \n\n # If input is file, load file\n if input_file == True:\n with open(input, 'r') as f:\n text = f.read() \n f.close()\n\n # If input is string, use it as text\n elif input_file == False:\n text = input\n\n # --- TOKENIZE AND POS TAGGING --- \n\n # Tokenize and save with POS tags in tuple (what happens with nan POS tags?)\n token_pos = pd.DataFrame([(token.text, token.pos_) for token in self.nlp(text)], columns = [\"TOKEN\", \"POS\"])\n # Remove rows with NAN\n token_pos.dropna(inplace=True) \n # Remove rows with \"SPACE\" tag\n token_pos = token_pos[token_pos[\"POS\"] != \"SPACE\"]\n # Rename POS tag for \"at\", since it is only defined as PART in DDB\n token_pos.loc[token_pos['TOKEN'] == 'at', 'POS'] = 'KONJ'\n # Rename POS tags to match the DDB tags (i.e. converting universal POS to DDB POS)\n token_pos[\"POS\"].replace({\"CCONJ\": \"KONJ\", \n \"SCONJ\": \"KONJ\",\n \"AUX\": \"VERB\"}, inplace=True)\n\n # Reset index to account for deleted SPACE tags\n token_pos = token_pos.reset_index(drop=True)\n # Save index as column\n token_pos.insert(loc=0, column='ORIGINAL_IDX', value=token_pos.index)\n # Turning dataframe into dictionaries (with keys ORIGINAL_IDX, TOKEN, POS)\n token_dicts = token_pos.to_dict(\"records\")\n # Converting to a list of dicts without punctuation\n dicts_no_punc = [token_d for token_d in token_dicts if token_d[\"POS\"] !=\"PUNCT\"]\n # Also saving dataframe with punctuation, to later concatenate again\n dicts_punc = [dict(token_d, **{'DDB_TAGS': \"-\", \"DDB_TAGS_DISAMBIGUATED\": \"-\"}) for token_d in token_dicts if token_d[\"POS\"] == \"PUNCT\"]\n \n # --- INITIAL TAGGING ---\n\n tagged = []\n for idx, token_d in enumerate(dicts_no_punc):\n\n # Prepare target token\n target = token_d[\"TOKEN\"].lower() + \"_\" + token_d[\"POS\"]\n # Find DDB tags in which the target occurs\n target_tags = [tag for tag, tag_tokens in self.DDB_dict.items() if target in tag_tokens]\n \n # If token does not appear in any category, append \"-\"\n if len(target_tags) == 0:\n target_tags_scores = \"-\"\n\n # Otherwise calculate scores for categories\n else: \n # Get the context tokens of the target\n context_dicts = self.get_context(dicts_no_punc, idx)\n context = [token_d[\"TOKEN\"].lower() + \"_\" + token_d[\"POS\"] for token_d in context_dicts]\n\n # Calculate scores for possible categories based on context\n target_tags_scores = []\n for tag in target_tags: \n # Get the top level tag\n top_level_tag = tag.split(\"|\")[0] + \"|\"\n # Get all the tokens of the category\n top_level_tokens = list(chain.from_iterable([tag_tokens for tag, tag_tokens in self.DDB_dict.items() if top_level_tag in tag]))\n # Calculate distance of context words and category tokens\n score = jaccard_distance(context, top_level_tokens) \n # Append tuple of category and score\n target_tags_scores.append((tag, score))\n # Sort possibel tags by score\n target_tags_scores = sorted(target_tags_scores, key=lambda x: x[1])\n\n # Add results to dict \n target_tagged_dict = dict(token_d, **{'DDB_TAGS': target_tags_scores})\n # Append result for token to list of all tagged tokens\n tagged.append(target_tagged_dict)\n\n # --- DISAMBIGUATION OF IDENTICAL SCORES ---\n\n # Prepare file path to save disambiguation information (only saved if input was a file)\n if input_file == True:\n disambiguation_filepath = os.path.join(self.base_path, \"..\", \"out\", f\"disambiguation_info-{input.split('/')[1]}\")\n if os.path.exists(disambiguation_filepath):\n os.remove(disambiguation_filepath)\n elif input_file == False:\n disambiguation_filepath = None\n\n # Loop through tagged tokens\n tagged_disambiguated = []\n for idx, token_tagged_d in enumerate(tagged):\n \n # Get the possible tags for the given token\n tags_scores = token_tagged_d[\"DDB_TAGS\"]\n # Get only the scores of the possible tags\n scores = [ts[1] for ts in tags_scores if tags_scores != \"-\"]\n # Get duplicate scores (if there are any in the first 4 scores)\n duplicate_scores = list(set([s for s in scores[:4] if scores[:4].count(s) > 1]))\n\n # If there are any duplicate scores\n if len(duplicate_scores) >= 1:\n # Make a copy of the tags and scores to change the order\n tags_disambiguated = tags_scores.copy()\n\n # Loop over duplicate scores and disambiguate tags\n for score in duplicate_scores:\n duplicate_idxs = [tags_scores.index(ts) for ts in tags_scores if ts[1] == score]\n duplicate_tags_scores = [tags_scores[idx] for idx in duplicate_idxs]\n\n # Get the dictionaries of the context tokens\n context_tagged = self.get_context(tagged, idx)\n # Get the tags of the context tokens (only if they should not be disambiguated themselves)\n tags_context = []\n for token in context_tagged:\n # Get the tags of the token\n tags_token = token[\"DDB_TAGS\"]\n # Add the top1 tag, but only if it should not also be disambiguated\n if (len(tags_token) == 1 and tags_token != \"-\") or (len(tags_token) > 1 and tags_token[0][1] != tags_token[1][1]):\n # Add the tag of the first level tag to the tags_context list\n tags_context.append(token[\"DDB_TAGS\"][0][0])\n\n # Disambiguate the tags of the target based on the context (or size of the tag entry in DDB)\n duplicates_disambiguated = self.disambiguate_duplicates(token_tagged_d, duplicate_tags_scores, tags_context, disambiguation_filepath)\n\n # Fix the order while keeping the non-duplicates\n for idx, duplicate in enumerate(duplicates_disambiguated):\n new_idx = duplicate_idxs[idx]\n tags_disambiguated[new_idx] = duplicate\n \n # If no duplicates, the ordered is just the same as the original\n else:\n tags_disambiguated = tags_scores\n\n # Save results in a copy of the dictionary, to prevent overwriting and append\n token_disambiguated_d = token_tagged_d.copy()\n\n if only_tagged_results == False:\n token_disambiguated_d[\"DDB_TAGS\"] = tags_scores\n token_disambiguated_d[\"DDB_TAGS_DISAMBIGUATED\"] = tags_disambiguated\n\n elif only_tagged_results == True:\n token_disambiguated_d[\"DDB_TAGS\"] = [ts[0] for ts in tags_scores]\n token_disambiguated_d[\"DDB_TAGS_DISAMBIGUATED\"] = [ts[0] for ts in tags_disambiguated]\n\n tagged_disambiguated.append(token_disambiguated_d)\n\n # --- PREPARE OUTPUT ---\n\n # Put tagged punct and no punct into dataframes and join\n column_names = ['ORIGINAL_IDX', 'TOKEN', 'POS', 'DDB_TAGS', 'DDB_TAGS_DISAMBIGUATED']\n df_tagged = pd.DataFrame(tagged_disambiguated, columns=column_names) \n df_punc = pd.DataFrame(dicts_punc, columns=column_names)\n output = pd.concat([df_tagged, df_punc]).sort_values(\"ORIGINAL_IDX\").reset_index(drop=True)\n\n # Split tags into separate columns\n output['DDB1'] = output.apply(lambda row : row['DDB_TAGS_DISAMBIGUATED'][0], axis = 1)\n output['DDB2'] = output.apply(lambda row : row['DDB_TAGS_DISAMBIGUATED'][1] if len(row['DDB_TAGS_DISAMBIGUATED']) > 1 else np.nan, axis = 1)\n output['DDB3'] = output.apply(lambda row : row['DDB_TAGS_DISAMBIGUATED'][2] if len(row['DDB_TAGS_DISAMBIGUATED']) > 2 else np.nan, axis = 1)\n output['DDB4+'] = output.apply(lambda row : row['DDB_TAGS_DISAMBIGUATED'][3:] if len(row['DDB_TAGS_DISAMBIGUATED']) > 3 else np.nan, axis = 1)\n output = output.drop([\"DDB_TAGS\", \"DDB_TAGS_DISAMBIGUATED\"], axis=1)\n output = output.fillna(\"-\")\n\n # If only top three tags, drop the fourth column:\n if only_top3_results == True:\n output = output.drop([\"DDB4+\"], axis=1)\n \n # Return output\n return output\n\n def tag_directory(self, input_directory: str=\"in/\", output_directory: str=\"out/\", only_top3_results: bool=True, only_tagged_results: bool=False):\n \"\"\"Tagging all texts (.txt files) in a directory using the tag_text function.\n\n Args:\n input_directory (str, optional): Input directory containing .txt files. Defaults to \"in/\".\n output_directory (str, optional): Output directory to save results. Defaults to \"out/\".\n only_top3_results (bool, optional): Defines whether only the top3 tags or all should be in the results. Defaults to True.\n only_tagged_results (bool, optional): Defines whether results should only contain tags (True), or also scores (False). Defaults to False.\n \"\"\" \n \n # --- PREPARING FILENAMES ---\n\n # Get filenames of directory\n file_pattern = input_directory + \"*.txt\"\n filenames = glob.glob(file_pattern)\n\n # If no files found:\n if len(filenames) == 0:\n sys.exit(f\"[ERROR] No files matching {file_pattern} found, check input_directory path or file placement.\")\n else:\n print(f\"[INFO] Found {len(filenames)} files, starting tagging...\")\n\n # --- TAGGING ALL FILES AND SAVING OUTPUTS ---\n\n # Start timer \n start = time.time()\n\n # Loop tagger over files\n for filename in tqdm(filenames):\n output = self.tag_text(input=filename, input_file=True, only_top3_results=only_top3_results, only_tagged_results=only_tagged_results)\n output_directory_path = os.path.join(self.base_path, \"..\", output_directory)\n output_path = output_directory_path + (\"only_\" if only_tagged_results == True else \"scores_\") + \"tagged_\" + filename.split(\"/\")[1].replace(\".txt\", \".csv\")\n output.to_csv(output_path, index=False, sep=\"\\t\", encoding=\"utf-8\")\n \n # Print done and results\n print(f\"[INFO] ...done! Results saved in {output_path}\")\n\n # Print timings\n print(f\"[INFO] {len(filenames)} files tagged in {round(time.time()-start, 2)} seconds!\")\n print(\"\\n ================== \\n\")\n\n def get_context(self, input_list:list, target_idx:int):\n \"\"\"Helper function to get 5 entries before and after a target in list.\n\n Args:\n input_list (list): List of elements, from which to extract context of a target.\n target_idx (int): Index of the target\n\n Returns:\n context (list): List of context entries from list\n \"\"\"\n\n # For the first 5 words\n if target_idx <= 5:\n pre_target = input_list[:target_idx]\n post_target = input_list[target_idx+1:target_idx+6]\n\n # Last 5 words\n elif target_idx >= len(input_list)-5:\n pre_target = input_list[target_idx-5:target_idx]\n post_target = input_list[target_idx:]\n\n # All other words\n else: \n pre_target = input_list[target_idx-5:target_idx]\n post_target = input_list[target_idx+1:target_idx+6]\n\n # Join context before and after\n context = pre_target + post_target\n\n return context\n \n def disambiguate_duplicates(self, token_tagged_d:dict, duplicate_tags_scores:list[tuple], tags_context:list[str], filepath:str):\n \"\"\"Helper function to disambiguate between tags with duplicate scores.\n\n Args:\n token_tagged_d (dict): dictionary of target token, for which scores are duplicate (only for info file).\n duplicate_tags_scores (list[tuple]): list of tuples (tag, score) of tags with duplicate scores.\n tags_context (list[str]): list of str for the tags from the context.\n filepath (str): filepath to save disambiguation info, if None nothing is saved.\n\n Returns:\n duplicates_disambiguated (list[tuple]): list of tuples disambiguated by necessary sorting algorithm\n \"\"\"\n \n # --- HIGH LEVEL DISAMBIGUATION ---\n\n # Get high level tags of the context\n top_tags_context = [tag.split(\"|\")[0] + \"|\" for tag in tags_context]\n\n # Count how many context words have each of the possible tags\n top_tags_counts = [(tag, top_tags_context.count(tag[0].split(\"|\")[0] + \"|\")) for tag in duplicate_tags_scores]\n top_counts = [tag_count[1] for tag_count in top_tags_counts]\n \n # If that was successful in disambiguating\n if len(top_counts) == len(set(top_counts)): \n disambiguation_method = \"HIGH-LEVEL\"\n top_tags_counts_ordered = sorted(top_tags_counts, key=lambda x: x[1], reverse=True)\n duplicates_disambiguated = [tag[0] for tag in top_tags_counts_ordered]\n\n # --- LOW LEVEL DISAMBIGUATION ---\n \n else: \n # Count how many context words have each of the possible tags\n sub_tags_counts = [(tag, tags_context.count(tag[0])) for tag in duplicate_tags_scores]\n sub_counts = [tag_count[1] for tag_count in sub_tags_counts]\n \n # If that was successful in disambiguating\n if len(sub_counts) == len(set(sub_counts)):\n disambiguation_method = \"LOW-LEVEL\"\n sub_tags_counts_ordered = sorted(sub_tags_counts, key=lambda x: x[1], reverse=True)\n duplicates_disambiguated = [tag[0] for tag in sub_tags_counts_ordered]\n\n # --- CATEGORY SIZE DISAMBIGUATION ---\n\n else:\n # Return sorted by size of low level category\n disambiguation_method = \"CATEGORY-SIZE\"\n duplicates_disambiguated = sorted(duplicate_tags_scores, key=lambda x: len(self.DDB_dict[x[0]]), reverse=True)\n\n # --- SAVE INFO ---\n if filepath is not None:\n f = open(filepath, \"a\")\n f.write(\"\\n----------------------------------------------\\n\")\n f.write(f\"TARGET INFO: {token_tagged_d['ORIGINAL_IDX'], token_tagged_d['TOKEN']}\\n\")\n f.write(f\"TARGET ALL TAGS: {token_tagged_d['DDB_TAGS']}\\n\")\n f.write(f\"TARGET DUPLICATE TAGS: {duplicate_tags_scores}\\n\")\n f.write(f\"TAGS CONTEXT: {tags_context}\\n\")\n f.write(f\"TOP LEVEL TAG COUNTS: {top_tags_counts}\\n\")\n if disambiguation_method == \"LOW-LEVEL\" or disambiguation_method == \"CATEGORY-SIZE\":\n f.write(f\"SUB LEVEL TAGS COUNTS: {sub_tags_counts}\\n\")\n if disambiguation_method == \"CATEGORY-SIZE\":\n f.write(f\"TAG SIZE DISAMBIGUATION: {[(x, len(self.DDB_dict[x[0]])) for x in duplicate_tags_scores]}\\n\")\n\n return duplicates_disambiguated\n\n\n# --- RUN TAGGER FOR FILES IN DIRECTORY ---\n\nif __name__ == '__main__':\n\n # --- REQUIREMENT: PYTHON >= 3.6 ----\n\n if sys.version_info[:2] < (3,6):\n sys.exit(\"[ERROR] Oops! You need Python 3.6+!\")\n print(\"\\n ================== \\n\")\n\n # --- ARGUMENT PARSER ---\n \n parser = argparse.ArgumentParser()\n \n parser.add_argument('--input_directory', type=str, required=False,\n default=\"in/\",\n help='Input directory to files to tag, will tag all files in directory.')\n\n parser.add_argument('--dict', type=str, required=False,\n default=\"dict/dict.pkl\",\n help=\"Path to semantic dictionary.\")\n\n parser.add_argument('--da_model', type=str, required=False,\n default=\"spacy\",\n help=\"Danish language model to use, 'spacy' or 'dacy'.\")\n \n parser.add_argument('--only_top3_results', required=False,\n action=\"store_true\", default=True,\n help=\"Use argument if results should contain all possible tags, by default only contains the top3.\")\n\n parser.add_argument('--only_tagged_results', required=False,\n action=\"store_true\", default=False,\n help=\"Use argument if results should only contain tags, by default also contain scores.\")\n\n parser.add_argument('--output_directory', type=str, required=False,\n default=\"out/\",\n help=\"Directory to save output files.\")\n\n args = parser.parse_args()\n \n # -- RUN TAGGER FOR DIRECTORY ---\n\n # Loading tagger (add error message if da_model is not loaded?)\n Tagger = DDB_tagger(dict=args.dict, da_model=args.da_model)\n print(f\"[INFO] DDB Tagger with {args.da_model} loaded, now processing files...\")\n\n # Run tagger for directory \n Tagger.tag_directory(input_directory=args.input_directory, \n output_directory=args.output_directory, \n only_top3_results=args.only_top3_results,\n only_tagged_results=args.only_tagged_results)\n\n " ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
blankspeedster/module-hub
[ "9b28b2dda9009bc1800a0d6e0d8c1447e70fd48a" ]
[ "qr_decoder/dispense_scanner.py" ]
[ "import mysql.connector\nimport cv2\nimport numpy as np\nfrom pyzbar.pyzbar import decode\nfrom time import sleep\nimport json\nimport RPi.GPIO as GPIO\n\ncurrentID = 0 \n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(23, GPIO.OUT)\nGPIO.output(23, GPIO.LOW)\n\nmydb = mysql.connector.connect(\n host=\"192.168.100.22\",\n user=\"modulehub_database\",\n passwd=\"modulehub_database\",\n database=\"modulehub_database\"\n )\nmycursor = mydb.cursor(dictionary=True)\n\nprint('Scan QR to start')\n\ndef decoder(image):\n gray_img = cv2.cvtColor(image,0)\n barcode = decode(gray_img)\n\n for obj in barcode:\n print('Please Wait... ')\n sleep(2)\n points = obj.polygon\n (x,y,w,h) = obj.rect\n pts = np.array(points, np.int32)\n pts = pts.reshape((-1, 1, 2))\n cv2.polylines(image, [pts], True, (0, 255, 0), 3)\n\n barcodeData = obj.data.decode(\"utf-8\")\n barcodeType = obj.type\n barcodeData = json.loads(barcodeData)\n\n if \"user\" in barcodeData.keys():\n print(\"user exists!\")\n global currentID\n currentID = barcodeData[\"user\"]\n mycursor.execute(f'''SELECT * FROM users WHERE id = {currentID}''')\n result = mycursor.fetchone()\n if result is None:\n print(\"No user found. Please try again.\")\n else:\n print(str.capitalize(result[\"firstname\"])+\" \"+str.capitalize(result[\"lastname\"]))\n mycursor.execute(f''' SELECT * FROM users u\n JOIN module m\n ON m.user_id = u.id\n WHERE m.returned = 0 AND u.id = {currentID}\n LIMIT 1 ''')\n subjects = mycursor.fetchall()\n if not subjects:\n print(\"Student has received the modules already.\")\n else:\n updateStatus = f'''UPDATE module SET returned = '-1' WHERE user_id = {currentID} '''\n mycursor.execute(updateStatus)\n mydb.commit()\n \n GPIO.output(23, GPIO.HIGH)\n sleep(5)\n GPIO.output(23, GPIO.LOW)\n\n print(\"Modules has been dispensed. Please tap another QR code for dispensing.\")\n else:\n print(\"Invalid QR code\") \n\n # string = \"Data \" + str(barcodeData) + \" | Type \" + str(barcodeType)\n # newString = str(barcodeData[\"user\"])\n # cv2.putText(frame, newString, (x,y), cv2.FONT_HERSHEY_SIMPLEX,0.8,(255,0,0), 2)\n # cv2.putText(frame, string, (x,y), cv2.FONT_HERSHEY_SIMPLEX,0.8,(255,0,0), 2)\n\n # print(\"Barcode: \"+barcodeData +\" | Type: \"+barcodeType)\n\ncap = cv2.VideoCapture(0)\nwhile True:\n ret, frame = cap.read()\n decoder(frame)\n cv2.imshow('Image', frame)\n code = cv2.waitKey(10)\n if code == ord('q'):\n break" ]
[ [ "numpy.array" ] ]
jinfagang/torch2trt_dynamic
[ "fad7a7845f13cb59c05de25fcb83e7591acb492c" ]
[ "torch2trt_dynamic/converters/relu.py" ]
[ "import torch\nfrom torch2trt_dynamic.torch2trt_dynamic import tensorrt_converter\n\nfrom .ReLU import convert_ReLU\n\n\n@tensorrt_converter('torch.relu')\n@tensorrt_converter('torch.relu_')\n@tensorrt_converter('torch.nn.functional.relu')\n@tensorrt_converter('torch.nn.functional.relu_')\ndef convert_relu(ctx):\n ctx.method_args = (torch.nn.ReLU(), ) + ctx.method_args\n convert_ReLU(ctx)\n" ]
[ [ "torch.nn.ReLU" ] ]
kanekosh/pygeo
[ "5bd69ee47d9483d851bd2bfdc2a6135c94ad0343" ]
[ "tests/reg_tests/commonUtils.py" ]
[ "import os\nimport numpy as np\nfrom pygeo import DVGeometry, DVGeometryAxi\nfrom pyspline import Curve\n\n\n##################\n# DVGeometry Tests\n##################\n\n\ndef setupDVGeo(base_path, rotType=None):\n # create the Parent FFD\n FFDFile = os.path.join(base_path, \"../inputFiles/outerBoxFFD.xyz\")\n DVGeo = DVGeometry(FFDFile)\n\n # create a reference axis for the parent\n axisPoints = [[-1.0, 0.0, 0.0], [1.5, 0.0, 0.0]]\n c1 = Curve(X=axisPoints, k=2)\n if rotType is not None:\n DVGeo.addRefAxis(\"mainAxis\", curve=c1, axis=\"y\", rotType=rotType)\n\n else:\n DVGeo.addRefAxis(\"mainAxis\", curve=c1, axis=\"y\")\n\n # create the child FFD\n FFDFile = os.path.join(base_path, \"../inputFiles/simpleInnerFFD.xyz\")\n DVGeoChild = DVGeometry(FFDFile, child=True)\n\n # create a reference axis for the child\n axisPoints = [[-0.5, 0.0, 0.0], [0.5, 0.0, 0.0]]\n c1 = Curve(X=axisPoints, k=2)\n DVGeoChild.addRefAxis(\"nestedAxis\", curve=c1, axis=\"y\")\n\n return DVGeo, DVGeoChild\n\n\ndef setupDVGeoD8(base_path, isComplex):\n # create the Parent FFD\n FFDFile = os.path.join(base_path, \"../inputFiles/bodyFFD.xyz\")\n DVGeo = DVGeometry(FFDFile, complex=isComplex)\n\n # create a reference axis for the parent\n axisPoints = [[0.0, 0.0, 0.0], [26.0, 0.0, 0.0], [30.5, 0.0, 0.9], [32.5, 0.0, 1.01], [34.0, 0.0, 0.95]]\n c1 = Curve(X=axisPoints, k=2)\n DVGeo.addRefAxis(\"mainAxis\", curve=c1, axis=\"y\")\n\n # create the child FFD\n FFDFile = os.path.join(base_path, \"../inputFiles/nozzleFFD.xyz\")\n DVGeoChild = DVGeometry(FFDFile, child=True, complex=isComplex)\n\n # create a reference axis for the child\n axisPoints = [[32.4, 1.0, 1.0], [34, 1.0, 0.9]]\n c1 = Curve(X=axisPoints, k=2)\n DVGeoChild.addRefAxis(\"nestedAxis\", curve=c1, axis=\"y\")\n\n return DVGeo, DVGeoChild\n\n\ndef setupDVGeoAxi(base_path):\n FFDFile = os.path.join(base_path, \"../inputFiles/axiTestFFD.xyz\")\n DVGeo = DVGeometryAxi(FFDFile, center=(0.0, 0.0, 0.0), collapse_into=(\"x\", \"z\"))\n axisPoints = [[0, 0.0, 0.0], [0, 0.0, 1.0]]\n c1 = Curve(X=axisPoints, k=2)\n DVGeo.addRefAxis(\"stretch\", curve=c1, axis=\"z\")\n\n return DVGeo\n\n\n# define a nested global design variable\ndef childAxisPoints(val, geo):\n C = geo.extractCoef(\"nestedAxis\")\n\n # Set the coefficients\n C[0, 0] = val[0]\n\n geo.restoreCoef(C, \"nestedAxis\")\n\n return\n\n\n# define a nested global design variable\ndef mainAxisPoints(val, geo):\n C = geo.extractCoef(\"mainAxis\")\n\n # Set the coefficients\n C[0, 0] = val[0]\n\n geo.restoreCoef(C, \"mainAxis\")\n\n return\n\n\n# define a nested global design variable\ndef childAxisPointsD8(val, geo):\n C = geo.extractCoef(\"nestedAxis\")\n\n # Set the coefficients\n for i in range(len(val)):\n C[i, 0] = val[i]\n\n geo.restoreCoef(C, \"nestedAxis\")\n\n return\n\n\n# define a nested global design variable\ndef mainAxisPointsD8(val, geo):\n C = geo.extractCoef(\"mainAxis\")\n\n # Set the coefficients\n for i in range(len(val)):\n C[i, 0] = val[i]\n\n geo.restoreCoef(C, \"mainAxis\")\n\n return\n\n\ndef mainAxisPointAxi(val, DVgeo):\n C = DVgeo.extractCoef(\"stretch\")\n C[0, 2] = val[0]\n\n DVgeo.restoreCoef(C, \"stretch\")\n return\n\n\ndef totalSensitivityFD(DVGeo, nPt, ptName, step=1e-1):\n xDV = DVGeo.getValues()\n refPoints = DVGeo.update(ptName)\n # now get FD Sensitivity\n dIdxFD = {}\n # step = 1e-1#8\n for key in xDV:\n baseVar = xDV[key].copy()\n nDV = len(baseVar)\n dIdxFD[key] = np.zeros([nPt, nDV])\n for i in range(nDV):\n # print('perturbing',key)\n xDV[key][i] = baseVar[i] + step\n # print('setting design vars')\n DVGeo.setDesignVars(xDV)\n # print('calling top level update')\n newPoints = DVGeo.update(ptName)\n\n deriv = (newPoints - refPoints) / step\n dIdxFD[key][:, i] = deriv.flatten()\n # print('Deriv',key, i,deriv)\n xDV[key][i] = baseVar[i]\n\n return dIdxFD\n\n\ndef totalSensitivityCS(DVGeo, nPt, ptName):\n xDV = DVGeo.getValues()\n\n # now get CS Sensitivity\n dIdxCS = {}\n step = 1e-40j\n for key in xDV:\n baseVar = xDV[key].copy()\n dIdxCS[key] = np.zeros([nPt, len(baseVar)])\n for i in range(len(baseVar)):\n xDV[key][i] = baseVar[i] + step\n\n DVGeo.setDesignVars(xDV)\n newPoints = DVGeo.update(ptName)\n\n deriv = np.imag(newPoints) / np.imag(step)\n dIdxCS[key][:, i] = deriv.flatten()\n # print 'Deriv',key, i,deriv\n xDV[key][i] = baseVar[i]\n\n # Before we exit make sure we have reset the DVs\n DVGeo.setDesignVars(xDV)\n\n return dIdxCS\n\n\ndef testSensitivities(DVGeo, refDeriv, handler, pointset=1):\n # create test points\n points = np.zeros([2, 3])\n if pointset == 1:\n points[0, :] = [0.25, 0, 0]\n points[1, :] = [-0.25, 0, 0]\n elif pointset == 2:\n points[0, :] = [0.25, 0.4, 4]\n points[1, :] = [-0.8, 0.2, 7]\n else:\n raise Warning(\"Enter a valid pointset\")\n\n # add points to the geometry object\n ptName = \"testPoints\"\n DVGeo.addPointSet(points, ptName)\n\n # generate dIdPt\n nPt = 6\n dIdPt = np.zeros([nPt, 2, 3])\n dIdPt[0, 0, 0] = 1.0\n dIdPt[1, 0, 1] = 1.0\n dIdPt[2, 0, 2] = 1.0\n dIdPt[3, 1, 0] = 1.0\n dIdPt[4, 1, 1] = 1.0\n dIdPt[5, 1, 2] = 1.0\n # get analytic sensitivity\n if refDeriv:\n dIdx = totalSensitivityFD(DVGeo, nPt, ptName)\n else:\n dIdx = DVGeo.totalSensitivity(dIdPt, ptName)\n\n handler.root_add_dict(\"dIdx\", dIdx, rtol=1e-7, atol=1e-7)\n\n\ndef testSensitivitiesD8(DVGeo, refDeriv, handler):\n # create test points\n nPoints = 50\n points = np.zeros([nPoints, 3])\n for i in range(nPoints):\n nose = 0.01\n tail = 34.0\n delta = (tail - nose) / nPoints\n points[i, :] = [nose + i * delta, 1.0, 0.5]\n\n # print('points',points)\n\n # add points to the geometry object\n ptName = \"testPoints\"\n DVGeo.addPointSet(points, ptName, faceFreeze={})\n\n # generate dIdPt\n nPt = nPoints * 3\n dIdPt = np.zeros([nPt, nPoints, 3])\n counter = 0\n for i in range(nPoints):\n for j in range(3):\n dIdPt[counter, i, j] = 1.0\n counter += 1\n # get analytic sensitivity\n if refDeriv:\n # dIdx = totalSensitivityFD(DVGeo,nPt,ptName)\n dIdx = totalSensitivityCS(DVGeo, nPt, ptName)\n else:\n dIdx = DVGeo.totalSensitivity(dIdPt, ptName)\n\n handler.root_add_dict(\"dIdx\", dIdx, rtol=1e-7, atol=1e-7)\n\n\n# --- Adding standard twist and single axis scaling functions ---\n# These functions are added for Test 24 but could be extended to other tests\n\nfix_root_sect = 1\nnRefAxPts = 4\n\n\ndef twist(val, geo):\n axis_key = list(geo.axis.keys())[0]\n for i in range(fix_root_sect, nRefAxPts):\n geo.rot_theta[axis_key].coef[i] = val[i - fix_root_sect]\n\n\ndef thickness(val, geo):\n axis_key = list(geo.axis.keys())[0]\n\n for i in range(1, nRefAxPts):\n geo.scale_z[axis_key].coef[i] = val[i - fix_root_sect]\n\n\ndef chord(val, geo):\n axis_key = list(geo.axis.keys())[0]\n\n for i in range(1, nRefAxPts):\n geo.scale_x[axis_key].coef[i] = val[i - fix_root_sect]\n" ]
[ [ "numpy.imag", "numpy.zeros" ] ]
Javran/misc
[ "736d657fead5143c26f3e3b9b85039bf8185e824" ]
[ "auto-tents/py/solver.py" ]
[ "#!/usr/bin/env python3.7\n\nimport json\nimport os\nimport random\nimport subprocess\nimport tempfile\nimport time\n\nimport cv2\nimport numpy as np\n\nfrom experiment import preset_path, RE_RAW_SIZE, find_board_size, \\\n extract_digits, color_shade, load_samples, crop_digit_cell, \\\n find_tag, RECOG_THRESHOLD\n\n\ndef main_recognize_and_solve_board():\n # compiled binary of https://github.com/Javran/puzzle-solving-collection/tree/master/tents-solver\n tents_demo_bin = os.environ['TENTS_DEMO_BIN']\n print(f'tents-demo: {tents_demo_bin}')\n d = None\n with open(preset_path) as f:\n d = json.load(f)['1440x2880']\n assert d is not None\n\n def to_side_length_set(bounds):\n return { x[1] - x[0] + 1 for x in bounds }\n\n # Build reverse map from side length of a blank cell to size (# of cells in row or col)\n side_length_to_size = {}\n for size_raw, v in d.items():\n size = int(RE_RAW_SIZE.match(size_raw).group(1))\n row_bounds = to_side_length_set(v['row_bounds'])\n col_bounds = to_side_length_set(v['col_bounds'])\n all_bounds = set.union(row_bounds, col_bounds)\n for x in all_bounds:\n assert x not in side_length_to_size, 'Side length is ambiguous.'\n side_length_to_size[x] = size\n\n # TODO: this is just quick and dirty and contains tons of duplicated codes.\n fp_img = tempfile.NamedTemporaryFile(delete=False,suffix='.png')\n subprocess.run(['adb', 'exec-out', 'screencap', '-p'], stdout=fp_img)\n fp_img.close()\n img = cv2.imread(fp_img.name)\n os.remove(fp_img.name)\n size = find_board_size(side_length_to_size, img)\n assert size is not None, 'Size cannot be recognized.'\n print(f'Board size: {size}x{size}')\n cell_bounds_raw = d[f'{size}x{size}']\n row_bounds = list(map(lambda x: (x[0], x[1]), cell_bounds_raw['row_bounds']))\n col_bounds = list(map(lambda x: (x[0], x[1]), cell_bounds_raw['col_bounds']))\n row_digits, col_digits = extract_digits(img, (row_bounds, col_bounds))\n digits = np.concatenate(\n [\n np.concatenate(row_digits, axis=1),\n np.concatenate(col_digits, axis=1),\n ])\n\n cells = [ [ None for _ in range(size) ] for _ in range(size)]\n for r, (row_lo, row_hi) in enumerate(row_bounds):\n for c, (col_lo, col_hi) in enumerate(col_bounds):\n cells[r][c] = img[row_lo:row_hi+1, col_lo:col_hi+1]\n recombined = np.concatenate([ np.concatenate(row, axis=1) for row in cells ], axis=0)\n\n output_board = [ [ None for _ in range(size) ] for _ in range(size)]\n def find_tree(cell_img,r,c):\n result = cv2.inRange(cell_img, color_shade, color_shade)\n (_,_,w,h) = cv2.boundingRect(result)\n if w != 0 and h != 0:\n color = 0xFF\n output_board[r][c] = 'R'\n else:\n color = 0\n output_board[r][c] = '?'\n return np.full((4,4), color)\n\n cell_results_recombined = np.concatenate([\n np.concatenate([ find_tree(cell,r,c) for c, cell in enumerate(row)], axis=1) for r, row in enumerate(cells)\n ], axis=0)\n\n tagged_samples = load_samples()\n recog_row_digits = [ None for _ in range(size) ]\n recog_col_digits = [ None for _ in range(size) ]\n\n for desc, ds, ds_out in [\n ('Row', row_digits, recog_row_digits),\n ('Col', col_digits, recog_col_digits),\n ]:\n # print(f'{desc} info:')\n for i, digit_img in enumerate(ds):\n digit_img_cropped = crop_digit_cell(digit_img)\n if digit_img_cropped is None:\n ds_out[i] = '0'\n # print('-')\n continue\n # use original image for this step as we want some room around\n # the sample to allow some flexibility.\n best_val, best_tag = find_tag(tagged_samples, digit_img)\n if best_val < RECOG_THRESHOLD:\n print(f'Warning: best_val is only {best_val}, the recognized digit might be incorrect.')\n\n ds_out[i] = best_tag\n\n # TOOD: turn this into UNTAGGED if best_val is too low,\n # we can also do \"UNTAGGED_<x>_<whatever id>.png\"\n # where \"<x>\" is the best tag we have.\n # this makes it easier to rename if the best guess is actually correct.\n # print(best_tag, best_val)\n input_lines = []\n def out(line):\n input_lines.append(line)\n\n out(f'{size} {size}')\n for i, line in enumerate(output_board):\n input_lines\n out(''.join(line) + f' {recog_row_digits[i]}')\n out(' '.join(recog_col_digits))\n print('# PUZZLE OUTPUT BEGIN')\n for l in input_lines:\n print(l)\n print('# PUZZLE OUTPUT END')\n plot = False\n if plot:\n pyplot.figure().canvas.set_window_title('@dev')\n subplot_color(221, img, 'origin')\n subplot_color(222, recombined, 'extracted')\n subplot_color(223, digits, 'digits')\n subplot_gray(224, cell_results_recombined, 'find tree')\n pyplot.show()\n proc_result = subprocess.run(\n [tents_demo_bin, 'stdin'],\n input='\\n'.join(input_lines) + '\\n',\n text=True,\n capture_output=True,\n )\n raw_tent_positions = proc_result.stdout.strip().split('|')\n def parse_raw(raw):\n [a,b] = raw.split(',')\n return int(a), int(b)\n tent_positions = list(map(parse_raw, raw_tent_positions))\n print(f'Received {len(tent_positions)} tent positions.')\n procs = []\n def tap(r,c):\n row_lo, row_hi = row_bounds[r]\n row_pos = round((row_lo + row_hi) / 2)\n col_lo, col_hi = col_bounds[c]\n col_pos = round((col_lo + col_hi) / 2)\n procs.append(subprocess.Popen(['adb', 'exec-out', 'input', 'tap', str(col_pos), str(row_pos)]))\n\n\n solving_moves = [ d for pos in tent_positions for d in [pos, pos] ]\n random.shuffle(solving_moves)\n for (r,c) in solving_moves:\n tap(r,c)\n time.sleep(0.2)\n\n for p in procs:\n p.wait()\n\nif __name__ == '__main__':\n main_recognize_and_solve_board()\n" ]
[ [ "numpy.concatenate", "numpy.full" ] ]
oldChen3/Face-Bullets-Screen-System
[ "820ede8bc2ec6daf9539322fcff5b30ee7713bb5" ]
[ "face_recognize_positon.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 12 13:28:18 2020\n\n@author: 老陈三\n\"\"\"\n\nimport cv2\nfrom dlib import get_frontal_face_detector\nfrom os import rename, remove\nfrom PIL import ImageFont, ImageDraw, Image\nfrom face_recognition import face_locations\nimport numpy as np\n\n\nclass face_recog_pos():\n def update_face_pos(self, video_path, pos_file_path):\n pos_fd = open(pos_file_path, 'w')\n vs = cv2.VideoCapture(video_path)\n frame_no = 0\n #detector = get_frontal_face_detector()\n while True:\n frame = vs.read()\n frame = frame[1]\n if frame is None:\n break\n #ret = self.get_face_pos_in_image(detector,frame)\n ret = self.get_face_pos_v2(frame)\n pos_fd.write(str(frame_no)+'-'+str(len(ret)))\n for val in ret:\n pos_fd.write('-'+str(val))\n pos_fd.write('- \\n')\n frame_no += 1\n vs.release()\n cv2.destroyAllWindows()\n pos_fd.close()\n \n def update_all_bullets_screen(self,video_path,bullet_file_path,text):\n bullet_fd = open(bullet_file_path, 'w')\n vs = cv2.VideoCapture(video_path)\n #frame_no = 0\n while True:\n frame = vs.read()\n frame = frame[1]\n if frame is None:\n break\n bullet_fd.write(text+'\\n')\n bullet_fd.close()\n \n def show_video_with_bullets(self, video_path, pos_file_path, bullets_path, bullets_path_backup):\n pos_fd = open(pos_file_path, 'r')\n bullets_fd = open(bullets_path, 'r+')\n bullets_backup_fd = open(bullets_path_backup, 'w')\n vs = cv2.VideoCapture(video_path)\n frame_no = 0\n text_file = ' '\n text_user = ' '\n text = ''\n while True:\n frame = vs.read()\n frame = frame[1]\n if frame is None:\n break\n (h, w) = frame.shape[:2]\n width=1000\n r = width / float(w)\n dim = (width, int(h * r))\n img = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)\n ret = self.read_face_pos(frame_no, pos_fd)\n text_file = self.read_bullet_screen(bullets_fd)\n text_file = text_file.strip('\\n')\n if text_user == ' ':\n text = text_file\n else:\n text = text_user\n if cv2.waitKey(30) == ord('s'):\n text_user = self.input_bullet_text_eng()\n text = text_user\n #self.write_bullet_screen(bullets_fd,text)\n print(text)\n self.write_bullet_screen(bullets_backup_fd,text)\n for p in ret['Position']:\n cv2.rectangle(img,(p[0],p[1]),(p[2],p[3]),(100,0,0),1)\n self.show_bullet_on_img(img,(p[2],p[3]),text)\n cv2.imshow(\"face detect\", img)\n frame_no += 1\n pos_fd.close()\n bullets_fd.close()\n bullets_backup_fd.close()\n remove(bullets_path)\n rename(bullets_path_backup,bullets_path)\n \n cv2.destroyAllWindows()\n print('frame_no = ' + str(frame_no))\n \n def read_face_pos(self,frame_no,pos_fd):\n ret = {'Frame_no':0,'lens':0, 'Position':[]}\n contexts = contexts = pos_fd.readline()\n if contexts != \" \":\n contexts = contexts.strip('\\n')\n contexts = contexts.strip(' ')\n contexts = contexts.replace('--','-')\n res_str = contexts.split('-')\n ret['Frame_no'] = int(res_str[0])\n ret['lens'] = int(res_str[1])\n i = 0\n while i < ret['lens']:\n #print(\"ret['Frame_no'] = \" + res_str[0])\n ret['Position'].append([int(res_str[i+2]),int(res_str[i+3]), \n int(res_str[i+4]),int(res_str[i+5])])\n i = i + 4\n return ret\n \n def get_face_pos_v2(self,frame):\n ret = []\n (h, w) = frame.shape[:2]\n width=1000\n r = width / float(w)\n dim = (width, int(h * r))\n img = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)\n rgb_frame = img[:, :, ::-1]\n find_face_locations = face_locations(rgb_frame)\n draw_img = img.copy()\n \n for face_location in find_face_locations:\n top, right, bottom, left = face_location\n ret += [left,top, right, bottom]\n cv2.rectangle(draw_img,(left,top),(right,bottom),(255,0,0),2)\n cv2.imshow(\"face detect\", draw_img)\n cv2.waitKey(1)\n return ret\n \n\n def get_face_pos_in_image(self,detector,img):\n (h, w) = img.shape[:2]\n width=1000\n r = width / float(w)\n dim = (width, int(h * r))\n img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n draw_img = img.copy()\n cv2.imshow(\"Image\", img)\n \n # 人脸检测\n ret = []\n rects = detector(gray, 1)\n for (i, rect) in enumerate(rects):\n ret += [rect.left(),rect.top(),rect.right(),rect.bottom()]\n cv2.rectangle(draw_img,(rect.left(),rect.top()),(rect.right(),rect.bottom()),(255,0,0),2)\n cv2.imshow(\"face detect\", draw_img)\n cv2.waitKey(1)\n return ret\n \n def show_bullet_on_img(self,img, pos, text):\n \"\"\"\n draw_img = ImageDraw.Draw(img)\n draw_img.text(pos,text,(255,255,0))\n \"\"\"\n cv2.putText(img, text,pos,cv2.FONT_HERSHEY_SIMPLEX, 0.7,(0,255,0), 1, cv2.LINE_AA)\n \n def show_utf8_bullet_on_img(self, img, pos, text):\n img_pillow = Image.fromarray(img)\n draw_img = ImageDraw.Draw(img_pillow)\n draw_img.text(pos,text,fill=(0,120,0,0))\n img = np.array(img_pillow)\n return img\n \n def input_bullet_text_eng(self):\n text = input('input bullet : \\n')\n return text\n \n def write_bullet_screen(self,fd,text):\n fd.write(text+'\\n')\n \n def read_bullet_screen(self, fd):\n text = fd.readline()\n return text\n " ]
[ [ "numpy.array" ] ]
DagerD/RadeonProRenderBlenderAddon
[ "188756291a0662f85b91b61aec276794785d75bd" ]
[ "src/bindings/pyrpr/src/pyrpr.py" ]
[ "#**********************************************************************\n# Copyright 2020 Advanced Micro Devices, Inc\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#********************************************************************\nimport platform\nimport traceback\nimport inspect\nimport ctypes\nimport time\nimport functools\nimport sys\nimport numpy as np\nfrom typing import List\n\nimport bgl\n\nimport pyrprwrap\nfrom pyrprwrap import *\n\n\nclass CoreError(Exception):\n def __init__(self, status, func_name, argv, module_name):\n super().__init__()\n self.status = status\n self.func_name = func_name\n self.argv = argv\n self.module_name = module_name\n\n for name in pyrprwrap._constants_names:\n value = getattr(pyrprwrap, name)\n if name.startswith('ERROR_') and status == value:\n status = \"%s<%d>\" % (name, value)\n break\n \n self.error_message = self.get_last_error_message()\n\n def __str__(self):\n return \"%s call %s(%s) returned error code <%s> with error message: '%s'\" % \\\n (self.module_name, self.func_name, ', '.join(str(a) for a in self.argv), self.status, self.error_message)\n\n @staticmethod\n def get_last_error_message():\n ffi = pyrprwrap.ffi\n lib = pyrprwrap.lib\n sizeParamPtr = ffi.new('size_t *', 0)\n\n # bypass calling ContextGetInfo through wrappers, that's why calling it directly to the lib\n state = lib.rprContextGetInfo(ffi.NULL, CONTEXT_LAST_ERROR_MESSAGE, 0, ffi.NULL, sizeParamPtr)\n sizeParam = sizeParamPtr[0]\n if state == SUCCESS and sizeParam >= 1:\n strData = ffi.new('char[%d]' % sizeParam)\n state = lib.rprContextGetInfo(ffi.NULL, CONTEXT_LAST_ERROR_MESSAGE, sizeParam, strData, ffi.NULL)\n if state == SUCCESS:\n return ffi.string(strData)\n\n return \"\"\n\n\ndef wrap_core_check_success(f, module_name):\n @functools.wraps(f)\n def wrapped(*argv):\n status = f(*argv)\n if SUCCESS != status:\n raise CoreError(status, f.__name__, argv, module_name)\n return status\n return wrapped\n\n\ndef wrap_core_log_call(f, log_fun, module_name):\n signature = inspect.signature(f)\n\n @functools.wraps(f)\n def wrapped(*argv):\n log_fun(module_name+'::'+f.__name__, ', '.join(p.name+': '+str(value) for p, value in zip(signature.parameters.values(), argv)))\n time_begin = time.perf_counter()\n result = f(*argv)\n time_end = time.perf_counter()\n log_fun(module_name+'::'+f.__name__, \"done in \", time_end-time_begin)\n return result\n return wrapped\n\n\nclass _init_data:\n log_fun = None\n lib_wrapped_log_calls = False\n\n\ndef init(lib_dir, log_fun, lib_wrapped_log_calls):\n _init_data.log_fun = log_fun\n _init_data.lib_wrapped_log_calls = lib_wrapped_log_calls\n\n lib_name = {\n 'Windows': \"RadeonProRender64.dll\",\n 'Linux': \"libRadeonProRender64.so\",\n 'Darwin': \"libRadeonProRender64.dylib\"\n }[platform.system()]\n\n ctypes.CDLL(str(lib_dir / lib_name))\n\n import __rpr\n try:\n lib = __rpr.lib\n except AttributeError:\n lib = __rpr.ffi.dlopen(str(lib_dir / lib_name))\n pyrprwrap.lib = lib\n pyrprwrap.ffi = __rpr.ffi\n global ffi\n ffi = __rpr.ffi\n\n _module = __import__(__name__)\n\n for name in pyrprwrap._constants_names:\n setattr(_module, name, getattr(pyrprwrap, name))\n \n for name in pyrprwrap._functions_names:\n \n wrapped = getattr(pyrprwrap, name)\n # wrap all functions here(for more flexilibity) to log call, if enabled\n # and to assert that SUCCESS is returned from them\n if lib_wrapped_log_calls:\n wrapped = wrap_core_log_call(wrapped, log_fun, 'RPR')\n if wrapped.__name__ != 'RegisterPlugin':\n wrapped = wrap_core_check_success(wrapped, 'RPR')\n setattr(_module, name, wrapped)\n\n del _module\n\n\ndef encode(string):\n return string.encode('utf8')\n\n\ndef decode(bin_str):\n return bin_str.decode('utf8')\n\n\ndef is_gpu_enabled(creation_flags):\n for i in range(16):\n if getattr(pyrprwrap, 'CREATION_FLAGS_ENABLE_GPU%d' % i) & creation_flags:\n return True\n\n return False\n\n\ndef get_first_gpu_id_used(creation_flags):\n for i in range(16):\n if getattr(pyrprwrap, 'CREATION_FLAGS_ENABLE_GPU%d' % i) & creation_flags:\n return i\n\n raise IndexError(\"GPU is not used\", creation_flags)\n\n\nclass Object:\n core_type_name = 'void*'\n\n def __init__(self):\n self._handle_ptr = ffi.new(self.core_type_name + '*', ffi.NULL)\n self.name = None\n\n def __del__(self):\n try:\n self.delete()\n except:\n _init_data.log_fun('EXCEPTION:', traceback.format_exc())\n\n def delete(self):\n if _init_data.lib_wrapped_log_calls:\n _init_data.log_fun('delete: ', self.name, self)\n\n if self._get_handle():\n ObjectDelete(self._get_handle())\n\n def _get_handle(self):\n return self._handle_ptr[0]\n\n def set_name(self, name):\n ObjectSetName(self._get_handle(), encode(name))\n self.name = name\n\n\nclass Context(Object):\n ''' Context wraps the rpr_context type with useful methods '''\n core_type_name = 'rpr_context'\n\n plugin_id = -1\n cache_path = None\n cpu_device = None\n gpu_devices = []\n\n @classmethod\n def register_plugin(cls, lib_path, cache_path):\n cls.plugin_id = RegisterPlugin(encode(str(lib_path)))\n if cls.plugin_id == -1:\n error_msg = CoreError.get_last_error_message()\n raise RuntimeError(\"Plugin is not registered\", lib_path, error_msg)\n\n cls.cache_path = cache_path\n\n @classmethod\n def load_devices(cls):\n # getting available devices\n def get_device(create_flag, info_flag):\n try:\n context = cls(create_flag)\n device_name = context.get_info_str(info_flag)\n if not device_name:\n return None\n\n return {'flag': create_flag, 'name': device_name.strip()}\n\n except CoreError as err:\n if err.status == ERROR_UNSUPPORTED or platform.system() == \"Darwin\":\n return None\n\n raise err\n\n cls.cpu_device = get_device(CREATION_FLAGS_ENABLE_CPU, CONTEXT_CPU_NAME)\n cls.gpu_devices = []\n for i in range(16):\n create_flag = getattr(pyrprwrap, 'CREATION_FLAGS_ENABLE_GPU%d' % i)\n create_flag = create_flag if platform.system() != 'Darwin' \\\n else (create_flag | CREATION_FLAGS_ENABLE_METAL)\n device = get_device(create_flag, getattr(pyrprwrap, 'CONTEXT_GPU%d_NAME' % i))\n if not device:\n break\n\n cls.gpu_devices.append(device)\n\n def __init__(self, flags: [set, int], props: list = None, use_cache=True):\n super().__init__()\n self.aovs = {}\n self.parameters = {}\n\n if isinstance(flags, set):\n flags_ = 0\n for flag in flags:\n flags_ |= flag\n flags = flags_\n\n props_ptr = ffi.NULL\n if props is not None:\n props_ptr = ffi.new(\"rpr_context_properties[]\",\n [ffi.cast(\"rpr_context_properties\", entry) for entry in props])\n\n if not self.cache_path.is_dir():\n self.cache_path.mkdir(parents=True)\n\n CreateContext(API_VERSION, [self.plugin_id], 1, flags,\n props_ptr, encode(str(self.cache_path)) if use_cache and self.cache_path else ffi.NULL,\n self)\n\n def set_parameter(self, key, param):\n if isinstance(param, int):\n ContextSetParameterByKey1u(self, key, param)\n elif isinstance(param, bool):\n ContextSetParameterByKey1u(self, key, int(param))\n elif isinstance(param, float):\n ContextSetParameterByKey1f(self, key, param)\n elif isinstance(param, str):\n ContextSetParameterByKeyString(self, key, encode(param))\n elif isinstance(param, tuple) and len(param) == 3:\n ContextSetParameterByKey3f(self, key, *param)\n elif isinstance(param, tuple) and len(param) == 4:\n ContextSetParameterByKey4f(self, key, *param)\n else:\n raise TypeError(\"Incorrect type for ContextSetParameter*\", self, key, param)\n\n if self:\n # self could be None\n self.parameters[key] = param\n\n def set_scene(self, scene):\n ContextSetScene(self, scene)\n\n def render(self):\n ContextRender(self)\n\n def abort_render(self):\n ContextAbortRender(self)\n \n def render_tile(self, xmin, xmax, ymin, ymax):\n ContextRenderTile(self, xmin, xmax, ymin, ymax)\n\n def attach_aov(self, aov, frame_buffer):\n if aov in self.aovs:\n self.detach_aov(aov)\n\n self.aovs[aov] = frame_buffer\n frame_buffer.aov = aov\n ContextSetAOV(self, aov, frame_buffer)\n\n def detach_aov(self, aov):\n self.aovs[aov].aov = None\n ContextSetAOV(self, aov, None)\n del self.aovs[aov]\n\n def set_aov_index_lookup(self, key, r, g, b, a):\n ContextSetAOVindexLookup(self, key, r, g, b, a)\n\n def get_info_size(self, context_info):\n size = ffi.new('size_t *', 0)\n ContextGetInfo(self, context_info, 0, ffi.NULL, size)\n return size[0]\n\n def get_info_int(self, context_info):\n ptr = ffi.new('int *', 0)\n ContextGetInfo(self, context_info, 4, ptr, ffi.NULL)\n return ptr[0]\n\n def get_info_str(self, context_info):\n size = self.get_info_size(context_info)\n ptr = ffi.new('char[]', size)\n ContextGetInfo(self, context_info, size, ptr, ffi.NULL)\n return decode(ffi.string(ptr))\n\n def get_creation_flags(self):\n creation_flags = ffi.new(\"rpr_creation_flags*\", 0)\n ContextGetInfo(self, CONTEXT_CREATION_FLAGS, sys.getsizeof(creation_flags), creation_flags, ffi.NULL)\n return creation_flags[0]\n\n def get_info(self, context_info, str_type):\n val = ffi.new('%s *' % str_type)\n ContextGetInfo(self, context_info, sys.getsizeof(val), val, ffi.NULL)\n return val[0]\n\n\nclass Scene(Object):\n core_type_name = 'rpr_scene'\n\n def __init__(self, context):\n super().__init__()\n self.context = context\n self.objects = set()\n self.camera = None\n self.subdivision_camera = None\n self.environment_light = None\n self.background_image = None\n self.environment_overrides = {}\n ContextCreateScene(self.context, self)\n\n def delete(self):\n self.clear()\n super().delete()\n\n def attach(self, obj):\n if isinstance(obj, Shape):\n SceneAttachShape(self, obj)\n elif isinstance(obj, AreaLight):\n SceneAttachShape(self, obj.mesh)\n elif isinstance(obj, Light):\n SceneAttachLight(self, obj)\n elif isinstance(obj, HeteroVolume):\n SceneAttachHeteroVolume(self, obj)\n elif isinstance(obj, Curve):\n SceneAttachCurve(self, obj)\n else:\n raise TypeError(\"Incorrect type for SceneAttach*\", self, obj)\n\n self.objects.add(obj)\n\n def detach(self, obj):\n if isinstance(obj, Shape):\n SceneDetachShape(self, obj)\n elif isinstance(obj, AreaLight):\n SceneDetachShape(self, obj.mesh)\n elif isinstance(obj, Light):\n SceneDetachLight(self, obj)\n elif isinstance(obj, HeteroVolume):\n SceneDetachHeteroVolume(self, obj)\n elif isinstance(obj, Curve):\n SceneDetachCurve(self, obj)\n else:\n raise TypeError(\"Incorrect type for SceneDetach*\", self, obj)\n \n self.objects.remove(obj)\n\n def clear(self):\n self.set_background_image(None)\n\n for override_type in tuple(self.environment_overrides.keys()):\n self.remove_environment_override(override_type)\n if self.environment_light:\n self.remove_environment_light()\n\n SceneClear(self)\n self.camera = None\n self.subdivision_camera = None\n self.objects = set()\n\n def set_camera(self, camera):\n self.camera = camera\n SceneSetCamera(self, self.camera)\n\n def set_subdivision_camera(self, camera):\n \"\"\" Keep subdivision camera reference if used \"\"\"\n self.subdivision_camera = camera\n\n def add_environment_light(self, light):\n self.environment_light = light\n self.attach(light)\n\n def remove_environment_light(self):\n self.detach(self.environment_light)\n self.environment_light = None\n\n def set_background_image(self, image):\n self.background_image = image\n SceneSetBackgroundImage(self, image)\n\n def set_background_color(self, r, g, b):\n self.set_background_image(\n ImageData(self.context, np.full((2, 2, 4), (r, g, b, 1.0), dtype=np.float32)))\n\n def add_environment_override(self, core_id, light):\n self.environment_overrides[core_id] = light\n EnvironmentLightSetEnvironmentLightOverride(self.environment_light, core_id, light)\n\n def remove_environment_override(self, core_id):\n EnvironmentLightSetEnvironmentLightOverride(self.environment_light, core_id, None)\n del self.environment_overrides[core_id]\n\n\nclass Shape(Object):\n core_type_name = 'rpr_shape'\n\n def __init__(self, context):\n super().__init__()\n self.context = context\n self.shadow_catcher = False\n self.reflection_catcher = False\n self.is_visible = True\n\n self.materials = []\n self.volume_material = None\n self.displacement_material = None\n self.hetero_volume = None\n\n self.subdivision = None # { 'factor': int, 'boundary': int, 'crease_weight': float }\n self.is_portal_light = False\n\n def delete(self):\n if self.materials:\n self.set_material(None)\n if self.volume_material:\n self.set_volume_material(None)\n if self.displacement_material:\n self.set_displacement_material(None)\n if self.hetero_volume:\n self.set_hetero_volume(None)\n\n super().delete()\n\n def set_material(self, material):\n if self.materials:\n ShapeSetMaterial(self, None)\n self.materials.clear()\n\n if material:\n ShapeSetMaterial(self, material)\n self.materials.append(material)\n\n def set_material_faces(self, material, face_indices: np.array):\n ShapeSetMaterialFaces(self, material, ffi.cast('rpr_int*', face_indices.ctypes.data), len(face_indices))\n self.materials.append(material)\n\n def set_volume_material(self, node):\n self.volume_material = node\n ShapeSetVolumeMaterial(self, self.volume_material)\n\n def set_displacement_material(self, node):\n self.displacement_material = node\n ShapeSetDisplacementMaterial(self, self.displacement_material)\n\n def set_displacement_scale(self, minscale, maxscale):\n ShapeSetDisplacementScale(self, minscale, maxscale)\n\n def set_hetero_volume(self, hetero_volume):\n self.hetero_volume = hetero_volume\n ShapeSetHeteroVolume(self, self.hetero_volume)\n\n def set_transform(self, transform:np.array, transpose=True): # Blender needs matrix to be transposed\n ShapeSetTransform(self, transpose, ffi.cast('float*', transform.ctypes.data))\n\n def set_linear_motion(self, x, y, z):\n ShapeSetLinearMotion(self, x, y, z)\n\n def set_angular_motion(self, x, y, z, w):\n ShapeSetAngularMotion(self, x, y, z, w)\n\n def set_scale_motion(self, x, y, z):\n ShapeSetScaleMotion(self, x, y, z)\n\n def set_shadow_catcher(self, shadow_catcher):\n ShapeSetShadowCatcher(self, shadow_catcher)\n self.shadow_catcher = shadow_catcher\n\n def set_shadow_color(self, r, g, b):\n ShapeSetShadowColor(self, r, g, b)\n\n def set_reflection_catcher(self, reflection_catcher):\n ShapeSetReflectionCatcher(self, reflection_catcher)\n self.reflection_catcher = reflection_catcher\n\n def set_shadow(self, casts_shadow):\n # 1.330 removes SetShadow(), use visibility Flag.\n self.set_visibility_ex(\"visible.shadow\", casts_shadow)\n\n def set_visibility(self, visible):\n self.is_visible = visible\n ShapeSetVisibility(self, visible)\n\n def set_visibility_ex(self, visibility_type, visible):\n flags = {\n \"visible.light\": SHAPE_VISIBILITY_LIGHT,\n \"visible.refraction.glossy\": SHAPE_VISIBILITY_GLOSSY_REFRACTION,\n \"visible.reflection.glossy\": SHAPE_VISIBILITY_GLOSSY_REFLECTION,\n \"visible.diffuse\": SHAPE_VISIBILITY_DIFFUSE,\n \"visible.transparent\": SHAPE_VISIBILITY_TRANSPARENT,\n \"visible.refraction\": SHAPE_VISIBILITY_REFRACTION,\n \"visible.reflection\": SHAPE_VISIBILITY_REFLECTION,\n \"visible.shadow\": SHAPE_VISIBILITY_SHADOW,\n \"visible.primary\": SHAPE_VISIBILITY_PRIMARY_ONLY_FLAG,\n }\n ShapeSetVisibilityFlag(self, flags[visibility_type], visible)\n\n def set_visibility_in_specular(self, visible):\n ShapeSetVisibilityInSpecular(self, visible)\n\n def set_visibility_primary_only(self, visible):\n ShapeSetVisibilityFlag(self, SHAPE_VISIBILITY_PRIMARY_ONLY_FLAG, visible)\n\n def set_subdivision_factor(self, factor):\n ShapeSetSubdivisionFactor(self, factor)\n\n def set_auto_adapt_subdivision_factor(self, framebuffer, camera, factor):\n ShapeAutoAdaptSubdivisionFactor(self, framebuffer, camera, factor)\n\n def set_subdivision_boundary_interop(self, boundary):\n ShapeSetSubdivisionBoundaryInterop(self, boundary)\n\n def set_subdivision_crease_weight(self, factor):\n ShapeSetSubdivisionCreaseWeight(self, factor)\n\n def set_subdivision_auto_ratio_cap(self, auto_ratio_cap):\n ShapeSetSubdivisionAutoRatioCap(self, auto_ratio_cap)\n\n def set_light_group_id(self, group_id):\n ShapeSetLightGroupID(self, group_id)\n\n def set_portal_light(self, is_portal):\n self.is_portal_light = is_portal\n\n def mark_static(self, is_static):\n ShapeMarkStatic(self, is_static)\n\n def set_vertex_value(self, index: int, indices, values):\n ShapeSetVertexValue(self, index, ffi.cast(\"rpr_int *\", indices.ctypes.data),\n ffi.cast(\"float *\", values.ctypes.data), len(indices))\n\n def set_vertex_colors(self, colors):\n indices = np.arange(len(colors), dtype=np.int32)\n\n # index is 0-3 index (use for r,g,b,a)\n for i in range(4):\n values = np.ascontiguousarray(colors[:, i], dtype=np.float32)\n self.set_vertex_value(i, indices, values)\n\n def set_id(self, id):\n ShapeSetObjectID(self, id)\n\n def set_contour_ignore(self, ignore_in_contour):\n ShapeSetContourIgnore(self, ignore_in_contour)\n\n\nclass Curve(Object):\n core_type_name = 'rpr_curve'\n\n def __init__(self, context, control_points, points_radii, uvs):\n def to_segments(n):\n \"\"\"Index iterator which splits curve with n points to segments by 4\"\"\"\n m = n - 1\n for s in range(0, m, 3):\n yield s\n yield s + 1\n yield min(s + 2, m)\n yield min(s + 3, m)\n\n def iter_segments_radii():\n \"\"\" Get root and tip radii for each curve segment \"\"\"\n for e in range(0, curve_length, 4):\n yield points_radii[segment_steps[e]]\n yield points_radii[segment_steps[e + 3]]\n\n super().__init__()\n self.context = context\n self.material = None\n\n num_curves = control_points.shape[0]\n segment_steps = np.fromiter(to_segments(control_points.shape[1]), dtype=np.int32)\n curve_length = len(segment_steps)\n\n # converting control_points to points splitted by segments\n points = np.fromiter(\n (elem for i in range(num_curves)\n for step in segment_steps\n for elem in control_points[i, step]),\n dtype=np.float32\n ).reshape(-1, 3)\n\n if uvs is None:\n uvs_ptr = ffi.NULL\n else:\n uvs_ptr = ffi.cast(\"float *\", uvs.ctypes.data)\n \n segments_per_curve = curve_length // 4\n # create list of indices 0-control_points length\n indices = np.arange(len(points), dtype=np.uint32)\n\n # list full radius values for each curve\n curve_radii = np.fromiter(iter_segments_radii(), dtype=np.float32)\n radii = np.full((num_curves, len(curve_radii)), curve_radii, dtype=np.float32)\n\n is_tapered = not np.all(radii == curve_radii[0])\n\n # create list of segments per curve num_segments = length / 4\n segments = np.full(num_curves, segments_per_curve, dtype=np.int32)\n \n ContextCreateCurve(self.context, self,\n len(points), ffi.cast(\"float *\", points.ctypes.data), points[0].nbytes,\n len(indices), num_curves,\n ffi.cast('rpr_uint*', indices.ctypes.data), ffi.cast(\"float *\", radii.ctypes.data),\n uvs_ptr,\n ffi.cast('rpr_int*', segments.ctypes.data),\n 1 if is_tapered else 0)\n \n def delete(self):\n self.set_material(None)\n super().delete()\n\n def set_material(self, material):\n CurveSetMaterial(self, material)\n self.material = material\n\n def set_transform(self, transform:np.array, transpose=True): # Blender needs matrix to be transposed\n CurveSetTransform(self, transpose, ffi.cast('float*', transform.ctypes.data))\n\n\nclass Mesh(Shape):\n def __init__(self, context, vertices, normals, uvs: List[np.array],\n vertex_indices, normal_indices, uv_indices: List[np.array],\n num_face_vertices, mesh_info):\n super().__init__(context)\n\n self.poly_count = 0 if vertices is None else len(num_face_vertices) \n\n mesh_info_ptr = ffi.NULL\n if mesh_info:\n mesh_info_ptr = ffi.new(f\"rpr_mesh_info[{2 * len(mesh_info) + 1}]\")\n i = 0\n for key, val in mesh_info.items():\n mesh_info_ptr[i] = key\n mesh_info_ptr[i + 1] = val\n i += 2\n mesh_info_ptr[i] = 0\n\n if vertices is None:\n ContextCreateMeshEx2(\n self.context,\n ffi.NULL, 0, 0,\n ffi.NULL, 0, 0,\n ffi.NULL, 0, 0,\n 0,\n ffi.NULL, ffi.NULL,\n ffi.NULL,\n ffi.NULL, 0,\n ffi.NULL, 0,\n ffi.NULL, ffi.NULL,\n ffi.NULL, 0,\n mesh_info_ptr,\n self\n )\n return\n\n if len(uvs) > 1 or mesh_info_ptr:\n # several UVs set present\n texcoords_layers_num = len(uvs)\n texcoords_uvs = ffi.new(\"float *[]\", texcoords_layers_num)\n texcoords_count = np.zeros(texcoords_layers_num, dtype=np.uint64)\n texcoords_nbytes = np.zeros(texcoords_layers_num, dtype=np.int32)\n texcoords_ind = ffi.new(\"rpr_int *[]\", texcoords_layers_num)\n texcoords_ind_nbytes = np.zeros(texcoords_layers_num, dtype=np.int32)\n\n for i, uvs_set in enumerate(uvs):\n texcoords_uvs[i] = ffi.cast('float *', uvs_set.ctypes.data)\n texcoords_count[i] = len(uvs_set)\n texcoords_nbytes[i] = uvs_set[0].nbytes\n texcoords_ind[i] = ffi.cast('rpr_int *', uv_indices[i].ctypes.data)\n texcoords_ind_nbytes[i] = uv_indices[i][0].nbytes\n\n ContextCreateMeshEx2(\n self.context,\n ffi.cast(\"float *\", vertices.ctypes.data), len(vertices), vertices[0].nbytes,\n ffi.cast(\"float *\", normals.ctypes.data), len(normals), normals[0].nbytes,\n ffi.NULL, 0, 0,\n texcoords_layers_num,\n texcoords_uvs, ffi.cast('size_t *', texcoords_count.ctypes.data),\n ffi.cast('rpr_int *', texcoords_nbytes.ctypes.data),\n ffi.cast('rpr_int*', vertex_indices.ctypes.data), vertex_indices[0].nbytes,\n ffi.cast('rpr_int*', normal_indices.ctypes.data), normal_indices[0].nbytes,\n texcoords_ind, ffi.cast('rpr_int*', texcoords_ind_nbytes.ctypes.data),\n ffi.cast('rpr_int*', num_face_vertices.ctypes.data), len(num_face_vertices),\n mesh_info_ptr,\n self\n )\n\n else:\n if uvs:\n # single UVs set\n uv = uvs[0]\n indices = uv_indices[0]\n texcoords_ptr = ffi.cast(\"float *\", uv.ctypes.data)\n texcoords_count = len(uv)\n texcoords_nbytes = uv[0].nbytes\n texcoords_ind_ptr = ffi.cast('rpr_int*', indices.ctypes.data)\n texcoords_ind_nbytes = indices[0].nbytes\n else:\n # No UVs data found\n texcoords_ptr = ffi.NULL\n texcoords_count = 0\n texcoords_nbytes = 0\n texcoords_ind_ptr = ffi.NULL\n texcoords_ind_nbytes = 0\n\n ContextCreateMesh(\n self.context,\n ffi.cast(\"float *\", vertices.ctypes.data), len(vertices), vertices[0].nbytes,\n ffi.cast(\"float *\", normals.ctypes.data), len(normals), normals[0].nbytes,\n texcoords_ptr, texcoords_count, texcoords_nbytes,\n ffi.cast('rpr_int*', vertex_indices.ctypes.data), vertex_indices[0].nbytes,\n ffi.cast('rpr_int*', normal_indices.ctypes.data), normal_indices[0].nbytes,\n texcoords_ind_ptr, texcoords_ind_nbytes,\n ffi.cast('rpr_int*', num_face_vertices.ctypes.data), len(num_face_vertices),\n self\n )\n\n\nclass Instance(Shape):\n def __init__(self, context, mesh):\n super().__init__(context)\n self.mesh = mesh\n ContextCreateInstance(self.context, mesh, self)\n\n\nclass Grid(Object):\n \"\"\" HeteroVolume grid data \"\"\"\n core_type_name = 'rpr_grid'\n\n def __init__(self, context):\n super().__init__()\n self.context = context\n\n @staticmethod\n def init_from_3d_array(context, grid_data: np.ndarray):\n grid = Grid(context)\n\n x, y, z = grid_data.shape\n grid_data = grid_data.reshape(-1)\n\n indices = np.nonzero(grid_data)[0]\n data = np.ascontiguousarray(grid_data[indices])\n\n ContextCreateGrid(\n grid.context, grid,\n x, y, z,\n ffi.cast('const size_t *', indices.ctypes.data), len(indices),\n GRID_INDICES_TOPOLOGY_I_U64,\n ffi.cast('const float *', data.ctypes.data), data.nbytes,\n 0\n )\n\n return grid\n\n @staticmethod\n def init_from_array_indices(context, x, y, z, grid_data, indices):\n grid = Grid(context)\n\n ContextCreateGrid(\n grid.context, grid,\n x, y, z,\n ffi.cast('const unsigned int *', indices.ctypes.data), len(indices),\n GRID_INDICES_TOPOLOGY_XYZ_U32,\n ffi.cast('const float *', grid_data.ctypes.data), grid_data.nbytes,\n 0\n )\n\n return grid\n\n\nclass HeteroVolume(Object):\n \"\"\" Heterogeneous volume voxels grid object to scatter and emit light \"\"\"\n core_type_name = 'rpr_hetero_volume'\n\n set_grid_func = {'density': HeteroVolumeSetDensityGrid,\n 'albedo': HeteroVolumeSetAlbedoGrid,\n 'emission': HeteroVolumeSetEmissionGrid}\n set_lookup_func = {'density': HeteroVolumeSetDensityLookup,\n 'albedo': HeteroVolumeSetAlbedoLookup,\n 'emission': HeteroVolumeSetEmissionLookup}\n\n def __init__(self, context):\n super().__init__()\n self.context = context\n ContextCreateHeteroVolume(self.context, self)\n\n # keep volume grids while volume exists\n self.grids = {}\n\n def set_transform(self, transform: np.array, transpose=True): # Blender needs matrix to be transposed\n HeteroVolumeSetTransform(self, transpose, ffi.cast('float*', transform.ctypes.data))\n\n def set_grid(self, grid_type, grid):\n self.set_grid_func[grid_type](self, grid)\n self.grids[grid_type] = grid\n\n def set_lookup(self, grid_type, lookup: np.array):\n self.set_lookup_func[grid_type](self, ffi.cast('const float *', lookup.ctypes.data),\n len(lookup))\n\n\nclass Camera(Object):\n core_type_name = 'rpr_camera'\n\n def __init__(self, context):\n super().__init__()\n self.context = context\n ContextCreateCamera(self.context, self)\n\n def set_mode(self, mode):\n CameraSetMode(self, mode)\n\n def look_at(self, pos, at, up):\n CameraLookAt(self, pos[0], pos[1], pos[2], \n at[0], at[1], at[2], \n up[0], up[1], up[2])\n\n def set_lens_shift(self, shiftx, shifty):\n CameraSetLensShift(self, shiftx, shifty)\n\n def set_focal_length(self, flength):\n CameraSetFocalLength(self, flength)\n\n def set_sensor_size(self, width, height):\n CameraSetSensorSize(self, width, height)\n\n def set_f_stop(self, fstop):\n if fstop is None:\n # if disabled fstop will be max float\n CameraSetFStop(self, np.finfo(np.float32).max)\n else:\n CameraSetFStop(self, fstop)\n\n def set_aperture_blades(self, num_blades):\n CameraSetApertureBlades(self, num_blades)\n\n def set_focus_distance(self, fdist):\n CameraSetFocusDistance(self, fdist)\n\n def set_ortho(self, width, height):\n CameraSetOrthoWidth(self, width)\n CameraSetOrthoHeight(self, height)\n\n def set_angular_motion(self, x, y, z, w):\n CameraSetAngularMotion(self, x, y, z, w)\n\n def set_linear_motion(self, x, y, z):\n CameraSetLinearMotion(self, x, y, z)\n\n def set_exposure(self, exposure):\n CameraSetExposure(self, exposure)\n\n def set_clip_plane(self, near, far):\n CameraSetNearPlane(self, near)\n CameraSetFarPlane(self, far)\n\n def set_transform(self, transform:np.array, transpose=True): # Blender needs matrix to be transposed\n CameraSetTransform(self, transpose, ffi.cast('float*', transform.ctypes.data))\n\n\nclass FrameBuffer(Object):\n core_type_name = 'rpr_framebuffer'\n channels = 4 # core requires always 4 channels\n\n def __init__(self, context, width, height):\n super().__init__()\n self.context = context\n self.width = width\n self.height = height\n self.aov = None\n self._create()\n\n def delete(self):\n if self.aov is not None:\n self.context.detach_aov(self.aov)\n \n return super().delete()\n\n def _create(self):\n desc = ffi.new(\"rpr_framebuffer_desc*\")\n desc.fb_width, desc.fb_height = self.width, self.height\n ContextCreateFrameBuffer(self.context, (self.channels, COMPONENT_TYPE_FLOAT32), desc, self)\n\n def resize(self, width, height):\n if self.width == width and self.height == height:\n return\n\n aov = self.aov\n self.delete()\n\n self.width = width\n self.height = height\n self._create()\n\n if aov is not None:\n self.context.attach_aov(aov, self)\n\n def clear(self):\n FrameBufferClear(self)\n\n def resolve(self, resolved_fb, normalize_only=True):\n ContextResolveFrameBuffer(self.context, self, resolved_fb, normalize_only)\n \n def get_data(self, buf=None):\n if buf:\n FrameBufferGetInfo(self, FRAMEBUFFER_DATA, self.size(), ffi.cast('float*', buf), ffi.NULL)\n return buf\n\n data = np.empty((self.height, self.width, self.channels), dtype=np.float32)\n FrameBufferGetInfo(self, FRAMEBUFFER_DATA, self.size(), ffi.cast('float*', data.ctypes.data), ffi.NULL)\n return data\n\n def size(self):\n return self.width * self.height * self.channels * 4 # 4 bytes = sizeof(float32)\n\n def save_to_file(self, file_path):\n FrameBufferSaveToFile(self, encode(file_path))\n\n def get_cl_mem(self):\n cl_mem = ffi.new('rpr_cl_mem *')\n FrameBufferGetInfo(self, CL_MEM_OBJECT, sys.getsizeof(cl_mem), cl_mem, ffi.NULL)\n return cl_mem[0]\n\n\nclass FrameBufferGL(FrameBuffer):\n def __init__(self, context, width, height):\n super().__init__(context, width, height)\n\n def _create(self):\n textures = bgl.Buffer(bgl.GL_INT, [1,])\n bgl.glGenTextures(1, textures)\n self.texture_id = textures[0]\n\n bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture_id)\n bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER, bgl.GL_LINEAR)\n bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER, bgl.GL_LINEAR)\n bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_S, bgl.GL_REPEAT)\n bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_T, bgl.GL_REPEAT)\n\n bgl.glTexImage2D(\n bgl.GL_TEXTURE_2D, 0, bgl.GL_RGBA if platform.system() == 'Darwin' else bgl.GL_RGBA16F,\n self.width, self.height, 0,\n bgl.GL_RGBA, bgl.GL_FLOAT,\n bgl.Buffer(bgl.GL_FLOAT, [self.width, self.height, self.channels])\n )\n\n ContextCreateFramebufferFromGLTexture2D(self.context, bgl.GL_TEXTURE_2D, 0, self.texture_id, self)\n\n def delete(self):\n super().delete()\n textures = bgl.Buffer(bgl.GL_INT, [1,], [self.texture_id, ])\n bgl.glDeleteTextures(1, textures)\n\n\nclass Composite(Object):\n core_type_name = 'rpr_composite'\n\n def __init__(self, context, in_type):\n super().__init__()\n self.context = context\n self.inputs = {}\n ContextCreateComposite(self.context, in_type, self)\n\n def set_input(self, name, in_value):\n if name == 'arithmetic.op':\n CompositeSetInputOp(self, encode(name), in_value)\n elif isinstance(in_value, int):\n CompositeSetInput1u(self, encode(name), in_value)\n elif isinstance(in_value, float):\n CompositeSetInput4f(self, encode(name), in_value, in_value, in_value, in_value)\n elif isinstance(in_value, tuple) and len(in_value) == 4:\n CompositeSetInput4f(self, encode(name), *in_value)\n elif isinstance(in_value, Composite):\n CompositeSetInputC(self, encode(name), in_value)\n elif isinstance(in_value, FrameBuffer):\n CompositeSetInputFb(self, encode(name), in_value)\n else:\n raise TypeError(\"Incorrect type for CompositeSetInput*\", self, name, in_value)\n\n self.inputs[name] = in_value\n\n def compute(self, fb):\n CompositeCompute(self, fb)\n\n ### MATH OPERATIONS ###\n def _arithmetic_helper(self, rpr_operation, other1=None, other2=None, reverse=False):\n result = Composite(self.context, COMPOSITE_ARITHMETIC)\n result.set_input('arithmetic.op', rpr_operation)\n\n if other1 is None:\n result.set_input('arithmetic.color0', self)\n\n else:\n if isinstance(other1, (float, tuple)):\n data = other1\n other1 = Composite(self.context, COMPOSITE_CONSTANT)\n other1.set_input('constant.input', data)\n\n if other2 is None:\n if reverse:\n result.set_input('arithmetic.color0', other1)\n result.set_input('arithmetic.color1', self)\n else:\n result.set_input('arithmetic.color0', self)\n result.set_input('arithmetic.color1', other1)\n\n else:\n if isinstance(other2, (float, tuple)):\n data = other2\n other2 = Composite(self.context, COMPOSITE_CONSTANT)\n other2.set_input('constant.input', data)\n\n result.set_input('arithmetic.color0', self)\n result.set_input('arithmetic.color1', other1)\n result.set_input('arithmetic.color1', other2)\n\n return result\n\n def __add__(self, other):\n return self._arithmetic_helper(MATERIAL_NODE_OP_ADD, other)\n\n def __sub__(self, other):\n return self._arithmetic_helper(MATERIAL_NODE_OP_SUB, other)\n\n def __mul__(self, other):\n return self._arithmetic_helper(MATERIAL_NODE_OP_MUL, other)\n\n def __truediv__(self, other):\n return self._arithmetic_helper(MATERIAL_NODE_OP_DIV, other)\n\n def __radd__(self, other):\n return self + other\n\n def __rsub__(self, other):\n return self._arithmetic_helper(MATERIAL_NODE_OP_SUB, other, None, True)\n\n def __rmul__(self, other):\n return self * other\n\n def __rtruediv__(self, other):\n return self._arithmetic_helper(MATERIAL_NODE_OP_DIV, other, None, True)\n\n def min(self, other):\n return self._arithmetic_helper(MATERIAL_NODE_OP_MIN, other)\n\n def max(self, other):\n return self._arithmetic_helper(MATERIAL_NODE_OP_MAX, other)\n\n def get_channel(self, key):\n rpr_operation = {\n 0: MATERIAL_NODE_OP_SELECT_X,\n 1: MATERIAL_NODE_OP_SELECT_Y,\n 2: MATERIAL_NODE_OP_SELECT_Z,\n 3: MATERIAL_NODE_OP_SELECT_W,\n }[key]\n\n return self._arithmetic_helper(rpr_operation)\n\n def __gt__(self, other):\n return self._arithmetic_helper(MATERIAL_NODE_OP_GREATER, other)\n\n def __ge__(self, other):\n return self._arithmetic_helper(MATERIAL_NODE_OP_GREATER_OR_EQUAL, other)\n\n def __lt__(self, other):\n return self._arithmetic_helper(MATERIAL_NODE_OP_LOWER, other)\n\n def __le__(self, other):\n return self._arithmetic_helper(MATERIAL_NODE_OP_LOWER_OR_EQUAL, other)\n\n def __eq__(self, other):\n return self._arithmetic_helper(MATERIAL_NODE_OP_EQUAL, other)\n\n def __ne__(self, other):\n return self._arithmetic_helper(MATERIAL_NODE_OP_NOT_EQUAL, other)\n\n def if_else(self, if_value, else_value):\n return self._arithmetic_helper(MATERIAL_NODE_OP_TERNARY, if_value, else_value)\n\n\nclass MaterialSystem(Object):\n core_type_name = 'rpr_material_system'\n\n def __init__(self, context):\n super().__init__()\n self.context = context\n ContextCreateMaterialSystem(self.context, 0, self)\n\n\nclass MaterialNode(Object):\n core_type_name = 'rpr_material_node'\n\n def __init__(self, material_system, material_type):\n super().__init__()\n self.material_system = material_system\n self.inputs = {}\n self.type = material_type\n MaterialSystemCreateNode(self.material_system, self.type, self)\n\n def delete(self):\n for name, value in self.inputs.items():\n if isinstance(value, MaterialNode):\n MaterialNodeSetInputNByKey(self, name, None)\n elif isinstance(value, Image):\n MaterialNodeSetInputImageDataByKey(self, name, None)\n elif isinstance(value, Buffer):\n MaterialNodeSetInputBufferDataByKey(self, name, None)\n elif isinstance(value, Grid):\n MaterialNodeSetInputGridDataByKey(self, name, None)\n self.inputs.clear()\n\n super().delete()\n\n def set_input(self, name, value):\n if isinstance(value, MaterialNode):\n MaterialNodeSetInputNByKey(self, name, value)\n elif isinstance(value, int):\n MaterialNodeSetInputUByKey(self, name, value)\n elif isinstance(value, bool):\n MaterialNodeSetInputUByKey(self, name, TRUE if value else FALSE)\n elif isinstance(value, float):\n MaterialNodeSetInputFByKey(self, name, value, value, value, value)\n elif isinstance(value, tuple) and len(value) == 3:\n MaterialNodeSetInputFByKey(self, name, *value, 1.0)\n elif isinstance(value, tuple) and len(value) == 4:\n MaterialNodeSetInputFByKey(self, name, *value)\n elif isinstance(value, Image):\n MaterialNodeSetInputImageDataByKey(self, name, value)\n elif isinstance(value, Buffer):\n MaterialNodeSetInputBufferDataByKey(self, name, value)\n elif isinstance(value, Grid):\n MaterialNodeSetInputGridDataByKey(self, name, value)\n else:\n raise TypeError(\"Incorrect type for MaterialNodeSetInput*\", self, name, value)\n\n self.inputs[name] = value\n\n def set_id(self, id):\n MaterialNodeSetID(self, id)\n\n\nclass Light(Object):\n core_type_name = 'rpr_light'\n\n def __init__(self, context):\n super().__init__()\n self.context = context\n\n def set_transform(self, transform:np.array, transpose=True): # Blender needs matrix to be transposed\n LightSetTransform(self, transpose, ffi.cast('float*', transform.ctypes.data))\n\n def set_group_id(self, group_id):\n LightSetGroupId(self, group_id)\n\n\nclass EnvironmentLight(Light):\n def __init__(self, context):\n super().__init__(context)\n self.portals = set()\n self.image = None\n ContextCreateEnvironmentLight(self.context, self)\n\n def delete(self):\n super().delete()\n\n def set_image(self, image):\n self.image = image\n if not self.image:\n self.set_color(1.0, 0.0, 1.0)\n return\n\n EnvironmentLightSetImage(self, image)\n\n def set_color(self, r, g, b):\n self.set_image(ImageData(self.context, np.full((2, 2, 4), (r, g, b, 1.0), dtype=np.float32)))\n\n def set_intensity_scale(self, intensity_scale):\n EnvironmentLightSetIntensityScale(self, intensity_scale)\n\n def attach_portal(self, scene, portal):\n self.portals.add(portal)\n EnvironmentLightAttachPortal(scene, self, portal)\n\n def detach_portal(self, scene, portal):\n EnvironmentLightDetachPortal(scene, self, portal)\n self.portals.remove(portal)\n\n\nclass IESLight(Light):\n def __init__(self, context):\n super().__init__(context)\n ContextCreateIESLight(self.context, self)\n\n def set_radiant_power(self, r, g, b):\n IESLightSetRadiantPower3f(self, r, g, b)\n\n def set_image_from_file(self, image_path, nx, ny):\n IESLightSetImageFromFile(self, encode(image_path), nx, ny)\n\n def set_transform(self, transform: np.array, transpose=True):\n # transform matrix has to be rotated by 90 degrees around X axis\n rot = np.array(((1, 0, 0, 0),\n (0, 0, -1, 0),\n (0, 1, 0, 0),\n (0, 0, 0, 1)), dtype=np.float32)\n transform_rot = transform @ rot\n LightSetTransform(self, transpose, ffi.cast('float*', transform_rot.ctypes.data))\n\n\nclass PointLight(Light):\n def __init__(self, context):\n super().__init__(context)\n ContextCreatePointLight(self.context, self)\n\n def set_radiant_power(self, r, g, b):\n PointLightSetRadiantPower3f(self, r, g, b)\n\n def set_radius(self, radius):\n \"\"\" Supported in RPR 2.0 Sphere Light \"\"\"\n pass\n\n\nclass SpotLight(Light):\n def __init__(self, context):\n super().__init__(context)\n ContextCreateSpotLight(self.context, self)\n\n def set_radiant_power(self, r, g, b):\n SpotLightSetRadiantPower3f(self, r, g, b)\n\n def set_cone_shape(self, iangle, oangle):\n SpotLightSetConeShape(self, iangle, oangle)\n\n def set_radius(self, radius):\n \"\"\" Supported in RPR 2.0 Disk Light \"\"\"\n pass\n\n\nclass DirectionalLight(Light):\n def __init__(self, context):\n super().__init__(context)\n ContextCreateDirectionalLight(self.context, self)\n\n def set_radiant_power(self, r, g, b):\n DirectionalLightSetRadiantPower3f(self, r, g, b)\n\n def set_shadow_softness_angle(self, angle):\n DirectionalLightSetShadowSoftnessAngle(self, angle)\n\n\nclass AreaLight(Light):\n core_type_name = ''\n\n def __init__(self, mesh, material_system):\n self.mesh = mesh\n self.material_system = material_system\n\n self.color_node = MaterialNode(self.material_system, MATERIAL_NODE_ARITHMETIC)\n self.color_node.set_input(MATERIAL_INPUT_OP, MATERIAL_NODE_OP_MUL)\n self.color_node.set_input(MATERIAL_INPUT_COLOR0, 1.0) # for color\n self.color_node.set_input(MATERIAL_INPUT_COLOR1, 1.0) # for image\n\n emissive_node = MaterialNode(self.material_system, MATERIAL_NODE_EMISSIVE)\n emissive_node.set_input(MATERIAL_INPUT_COLOR, self.color_node)\n\n self.mesh.set_material(emissive_node)\n\n def delete(self):\n # delete() should be empty\n pass\n\n def set_name(self, name):\n self.name = name\n self.mesh.set_name(name)\n\n def set_radiant_power(self, r, g, b):\n self.color_node.set_input(MATERIAL_INPUT_COLOR0, (r, g, b))\n\n def set_image(self, image):\n if image:\n image_node = MaterialNode(self.material_system, MATERIAL_NODE_IMAGE_TEXTURE)\n image_node.set_input(MATERIAL_INPUT_DATA, image)\n self.color_node.set_input(MATERIAL_INPUT_COLOR1, image_node)\n else:\n self.color_node.set_input(MATERIAL_INPUT_COLOR1, 1.0)\n\n def set_shadow(self, casts_shadow):\n self.mesh.set_shadow(casts_shadow)\n\n def set_visibility(self, visible):\n self.mesh.set_visibility_ex('visible.light', visible)\n self.mesh.set_visibility_ex('visible.primary', visible)\n\n def set_transform(self, transform:np.array, transpose=True): # Blender needs matrix to be transposed\n self.mesh.set_transform(transform, transpose)\n\n def set_group_id(self, group_id):\n self.mesh.set_light_group_id(group_id)\n\n def set_linear_motion(self, x, y, z):\n self.mesh.set_linear_motion(x, y, z)\n\n def set_angular_motion(self, x, y, z, w):\n self.mesh.set_angular_motion(x, y, z, w)\n\n def set_scale_motion(self, x, y, z):\n self.mesh.set_scale_motion(x, y, z)\n\n\nclass Image(Object):\n core_type_name = 'rpr_image'\n\n def __init__(self, context):\n super().__init__()\n self.context = context\n self._size_byte = None\n\n def set_gamma(self, gamma):\n ImageSetGamma(self, gamma)\n\n def set_wrap(self, wrap_type):\n ImageSetWrap(self, wrap_type)\n\n def set_colorspace(self, colorspace):\n ImageSetOcioColorspace(self, encode(colorspace))\n\n def set_compression(self, compression):\n ImageSetInternalCompression(self, compression)\n\n @property\n def size_byte(self):\n if self._size_byte is None:\n ptr = ffi.new('long long *', 0)\n ImageGetInfo(self, IMAGE_DATA_SIZEBYTE, 8, ptr, ffi.NULL)\n self._size_byte = ptr[0]\n\n return self._size_byte\n\n\nclass ImageData(Image):\n def __init__(self, context, data: np.array):\n super().__init__(context)\n\n components = data.shape[2]\n desc = ffi.new(\"rpr_image_desc*\")\n desc.image_width = data.shape[1]\n desc.image_height = data.shape[0]\n desc.image_depth = 0\n desc.image_row_pitch = desc.image_width * ffi.sizeof('rpr_float') * components\n desc.image_slice_pitch = 0\n\n ContextCreateImage(self.context, (components, COMPONENT_TYPE_FLOAT32), desc, ffi.cast(\"float *\", data.ctypes.data), self)\n\n\nclass ImageFile(Image):\n def __init__(self, context, path):\n super().__init__(context)\n\n self.path = path\n ContextCreateImageFromFile(self.context, encode(self.path), self)\n\n\nclass Buffer(Object):\n core_type_name = 'rpr_buffer'\n\n def __init__(self, context, data:np.array, element_type):\n super().__init__()\n self.context = context\n\n desc = ffi.new(\"rpr_buffer_desc*\")\n desc.nb_element = len(data)\n desc.element_type = element_type\n desc.element_channel_size = len(data[0])\n\n ContextCreateBuffer(self.context, desc, ffi.cast(\"float *\", data.ctypes.data), self)\n\n\nclass PostEffect(Object):\n core_type_name = 'rpr_post_effect'\n\n def __init__(self, context, post_effect_type):\n super().__init__()\n self.context = context\n ContextCreatePostEffect(self.context, post_effect_type, self)\n ContextAttachPostEffect(self.context, self)\n\n def delete(self):\n ContextDetachPostEffect(self.context, self)\n super().delete()\n\n def set_parameter(self, name, param):\n if isinstance(param, int):\n PostEffectSetParameter1u(self, encode(name), param)\n elif isinstance(param, float):\n PostEffectSetParameter1f(self, encode(name), param)\n else:\n raise TypeError(\"Not supported parameter type\", self, name, param)\n" ]
[ [ "numpy.nonzero", "numpy.ascontiguousarray", "numpy.full", "numpy.all", "numpy.finfo", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
moneypi/License-Plate-Detector
[ "59615d375ddd8e67b0c246d2b8997b0489760eef" ]
[ "models/yolo.py" ]
[ "import argparse\nimport logging\nimport math\nimport sys\nfrom copy import deepcopy\nfrom pathlib import Path\n\nimport torch\nimport torch.nn as nn\n\nsys.path.append('./') # to run '$ python *.py' files in subdirectories\nlogger = logging.getLogger(__name__)\n\nfrom models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, C3, ShuffleV2Block, Concat, NMS, autoShape, StemBlock\nfrom models.experimental import MixConv2d, CrossConv\nfrom utils.autoanchor import check_anchor_order\nfrom utils.general import make_divisible, check_file, set_logging\nfrom utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \\\n select_device, copy_attr\n\ntry:\n import thop # for FLOPS computation\nexcept ImportError:\n thop = None\n\n\nclass Detect(nn.Module):\n stride = None # strides computed during build\n export = False # onnx export\n\n def __init__(self, nc=80, anchors=(), ch=()): # detection layer\n super(Detect, self).__init__()\n self.nc = nc # number of classes\n #self.no = nc + 5 # number of outputs per anchor\n self.no = nc + 5 + 8 # number of outputs per anchor\n\n self.nl = len(anchors) # number of detection layers\n self.na = len(anchors[0]) // 2 # number of anchors\n self.grid = [torch.zeros(1)] * self.nl # init grid\n a = torch.tensor(anchors).float().view(self.nl, -1, 2)\n self.register_buffer('anchors', a) # shape(nl,na,2)\n self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)\n self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv\n\n def forward(self, x):\n # x = x.copy() # for profiling\n z = [] # inference output\n # self.training |= self.export\n if self.export:\n for i in range(self.nl):\n x[i] = self.m[i](x[i])\n bs, _, ny, nx = x[i].shape # x(bs,48,20,20) to x(bs,3,20,20,16)\n x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()\n\n return x\n for i in range(self.nl):\n x[i] = self.m[i](x[i]) # conv\n bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)\n x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()\n\n if not self.training: # inference\n if self.grid[i].shape[2:4] != x[i].shape[2:4]:\n self.grid[i] = self._make_grid(nx, ny).to(x[i].device)\n\n y = torch.full_like(x[i], 0)\n class_range = list(range(5)) + list(range(13,13+self.nc))\n y[..., class_range] = x[i][..., class_range].sigmoid()\n y[..., 5:13] = x[i][..., 5:13]\n #y = x[i].sigmoid()\n\n y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy\n y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh\n\n #y[..., 5:15] = y[..., 5:15] * 8 - 4\n y[..., 5:7] = y[..., 5:7] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x1 y1\n y[..., 7:9] = y[..., 7:9] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x2 y2\n y[..., 9:11] = y[..., 9:11] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x3 y3\n y[..., 11:13] = y[..., 11:13] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x4 y4\n \n\n #y[..., 5:7] = (y[..., 5:7] * 2 -1) * self.anchor_grid[i] # landmark x1 y1\n #y[..., 7:9] = (y[..., 7:9] * 2 -1) * self.anchor_grid[i] # landmark x2 y2\n #y[..., 9:11] = (y[..., 9:11] * 2 -1) * self.anchor_grid[i] # landmark x3 y3\n #y[..., 11:13] = (y[..., 11:13] * 2 -1) * self.anchor_grid[i] # landmark x4 y4\n #y[..., 13:15] = (y[..., 13:15] * 2 -1) * self.anchor_grid[i] # landmark x5 y5\n\n z.append(y.view(bs, -1, self.no))\n\n return x if self.training else (torch.cat(z, 1), x)\n\n @staticmethod\n def _make_grid(nx=20, ny=20):\n yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])\n return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()\n\n\nclass Model(nn.Module):\n def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes\n super(Model, self).__init__()\n if isinstance(cfg, dict):\n self.yaml = cfg # model dict\n else: # is *.yaml\n import yaml # for torch hub\n self.yaml_file = Path(cfg).name\n with open(cfg) as f:\n self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict\n\n # Define model\n ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels\n if nc and nc != self.yaml['nc']:\n logger.info('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc))\n self.yaml['nc'] = nc # override yaml value\n self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist\n self.names = [str(i) for i in range(self.yaml['nc'])] # default names\n # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])\n\n # Build strides, anchors\n m = self.model[-1] # Detect()\n if isinstance(m, Detect):\n s = 128 # 2x min stride\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward\n m.anchors /= m.stride.view(-1, 1, 1)\n check_anchor_order(m)\n self.stride = m.stride\n self._initialize_biases() # only run once\n # print('Strides: %s' % m.stride.tolist())\n\n # Init weights, biases\n initialize_weights(self)\n self.info()\n logger.info('')\n\n def forward(self, x, augment=False, profile=False):\n if augment:\n img_size = x.shape[-2:] # height, width\n s = [1, 0.83, 0.67] # scales\n f = [None, 3, None] # flips (2-ud, 3-lr)\n y = [] # outputs\n for si, fi in zip(s, f):\n xi = scale_img(x.flip(fi) if fi else x, si)\n yi = self.forward_once(xi)[0] # forward\n # cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save\n yi[..., :4] /= si # de-scale\n if fi == 2:\n yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud\n elif fi == 3:\n yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr\n y.append(yi)\n return torch.cat(y, 1), None # augmented inference, train\n else:\n return self.forward_once(x, profile) # single-scale inference, train\n\n def forward_once(self, x, profile=False):\n y, dt = [], [] # outputs\n for m in self.model:\n if m.f != -1: # if not from previous layer\n x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers\n\n if profile:\n o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS\n t = time_synchronized()\n for _ in range(10):\n _ = m(x)\n dt.append((time_synchronized() - t) * 100)\n print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))\n\n x = m(x) # run\n y.append(x if m.i in self.save else None) # save output\n\n if profile:\n print('%.1fms total' % sum(dt))\n return x\n\n def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency\n # https://arxiv.org/abs/1708.02002 section 3.3\n # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n m = self.model[-1] # Detect() module\n for mi, s in zip(m.m, m.stride): # from\n b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def _print_biases(self):\n m = self.model[-1] # Detect() module\n for mi in m.m: # from\n b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)\n print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))\n\n # def _print_weights(self):\n # for m in self.model.modules():\n # if type(m) is Bottleneck:\n # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights\n\n def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers\n print('Fusing layers... ')\n for m in self.model.modules():\n if type(m) is Conv and hasattr(m, 'bn'):\n m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv\n delattr(m, 'bn') # remove batchnorm\n m.forward = m.fuseforward # update forward\n self.info()\n return self\n\n def nms(self, mode=True): # add or remove NMS module\n present = type(self.model[-1]) is NMS # last layer is NMS\n if mode and not present:\n print('Adding NMS... ')\n m = NMS() # module\n m.f = -1 # from\n m.i = self.model[-1].i + 1 # index\n self.model.add_module(name='%s' % m.i, module=m) # add\n self.eval()\n elif not mode and present:\n print('Removing NMS... ')\n self.model = self.model[:-1] # remove\n return self\n\n def autoshape(self): # add autoShape module\n print('Adding autoShape... ')\n m = autoShape(self) # wrap model\n copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes\n return m\n\n def info(self, verbose=False, img_size=640): # print model information\n model_info(self, verbose, img_size)\n\n\ndef parse_model(d, ch): # model_dict, input_channels(3)\n logger.info('\\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))\n anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']\n na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors\n no = na * (nc + 5) # number of outputs = anchors * (classes + 5)\n\n layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out\n for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args\n m = eval(m) if isinstance(m, str) else m # eval strings\n for j, a in enumerate(args):\n try:\n args[j] = eval(a) if isinstance(a, str) else a # eval strings\n except:\n pass\n\n n = max(round(n * gd), 1) if n > 1 else n # depth gain\n if m in [Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3, ShuffleV2Block, StemBlock]:\n c1, c2 = ch[f], args[0]\n\n # Normal\n # if i > 0 and args[0] != no: # channel expansion factor\n # ex = 1.75 # exponential (default 2.0)\n # e = math.log(c2 / ch[1]) / math.log(2)\n # c2 = int(ch[1] * ex ** e)\n # if m != Focus:\n\n c2 = make_divisible(c2 * gw, 8) if c2 != no else c2\n\n # Experimental\n # if i > 0 and args[0] != no: # channel expansion factor\n # ex = 1 + gw # exponential (default 2.0)\n # ch1 = 32 # ch[1]\n # e = math.log(c2 / ch1) / math.log(2) # level 1-n\n # c2 = int(ch1 * ex ** e)\n # if m != Focus:\n # c2 = make_divisible(c2, 8) if c2 != no else c2\n\n args = [c1, c2, *args[1:]]\n if m in [BottleneckCSP, C3]:\n args.insert(2, n)\n n = 1\n elif m is nn.BatchNorm2d:\n args = [ch[f]]\n elif m is Concat:\n c2 = sum([ch[-1 if x == -1 else x + 1] for x in f])\n elif m is Detect:\n args.append([ch[x + 1] for x in f])\n if isinstance(args[1], int): # number of anchors\n args[1] = [list(range(args[1] * 2))] * len(f)\n else:\n c2 = ch[f]\n\n m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module\n t = str(m)[8:-2].replace('__main__.', '') # module type\n np = sum([x.numel() for x in m_.parameters()]) # number params\n m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params\n logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print\n save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist\n layers.append(m_)\n ch.append(c2)\n return nn.Sequential(*layers), sorted(save)\n\n\nfrom thop import profile\nfrom thop import clever_format\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')\n parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')\n opt = parser.parse_args()\n opt.cfg = check_file(opt.cfg) # check file\n set_logging()\n device = select_device(opt.device)\n \n # Create model\n model = Model(opt.cfg).to(device)\n stride = model.stride.max()\n if stride == 32:\n input = torch.Tensor(1, 3, 480, 640).to(device)\n else:\n input = torch.Tensor(1, 3, 512, 640).to(device)\n model.train()\n print(model)\n flops, params = profile(model, inputs=(input, ))\n flops, params = clever_format([flops, params], \"%.3f\")\n print('Flops:', flops, ',Params:' ,params)\n" ]
[ [ "torch.nn.Sequential", "torch.Tensor", "torch.cat", "torch.zeros", "torch.nn.Conv2d", "torch.tensor", "torch.arange", "torch.full_like", "torch.stack" ] ]
google-research/deadunits
[ "5f4c7d9dc0201cefeb3dc970bcaee66a78cfa423" ]
[ "deadunits/utils_test.py" ]
[ "# coding=utf-8\n# Copyright 2021 The Deadunits Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python2, python3\n\"\"\"Tests for `deadunits.utils`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nfrom deadunits import utils\nimport mock\nimport tensorflow.compat.v2 as tf\n\n\nclass Score2BinaryMaskTest(parameterized.TestCase, tf.test.TestCase):\n\n def testCorrectness(self):\n score = tf.range(5)\n m = utils.create_binary_mask_from_scores(score, f=0.5)\n self.assertAllEqual(m, tf.constant([0, 0, 1, 1, 1]))\n\n m = utils.create_binary_mask_from_scores(score, n_zeros=3)\n self.assertAllEqual(m, tf.constant([0, 0, 0, 1, 1]))\n\n score = tf.range(4, 0, -1)\n m = utils.create_binary_mask_from_scores(score, f=0.5)\n self.assertAllEqual(m, tf.constant([1, 1, 0, 0]))\n\n score = tf.constant([0.1, 0.2, -0.5, 0.23, -5.1, 3])\n m = utils.create_binary_mask_from_scores(score, f=0.35)\n self.assertAllEqual(m, tf.constant([1, 1, 0, 1, 0, 1]))\n\n @parameterized.named_parameters(\n ('fraction0.5', 0.5, [[1, 0, 0], [1, 1, 0]]),\n ('fraction0.7', 0.7, [[0, 0, 0], [1, 1, 0]]),\n ('fraction0.2', 0.2, [[1, 1, 0], [1, 1, 1]]))\n def test2DScoresFractions(self, f, expected_mask):\n score = tf.constant([[0.6, 0.5, 0.1],\n [0.9, 0.7, 0.4]])\n m = utils.create_binary_mask_from_scores(score, f=f)\n self.assertAllEqual(m, tf.constant(expected_mask))\n\n @parameterized.named_parameters(\n ('nzeros_2', 2, [[1, 1, 0], [1, 1, 0]]),\n ('nzeros_0', 0, [[1, 1, 1], [1, 1, 1]]),\n ('nzeros_6', 6, [[0, 0, 0], [0, 0, 0]]))\n def test2DScoresNZeros(self, n_zeros, expected_mask):\n score = tf.constant([[0.6, 0.5, 0.1],\n [0.9, 0.7, 0.4]])\n m = utils.create_binary_mask_from_scores(score, n_zeros=n_zeros)\n self.assertAllEqual(m, tf.constant(expected_mask))\n\n def testInvalidArgs(self):\n score = tf.constant([0.1, 0.2, -0.5, 0.23, -5.1, 3])\n with self.assertRaises(AssertionError):\n utils.create_binary_mask_from_scores(score, 0)\n with self.assertRaises(AssertionError):\n utils.create_binary_mask_from_scores(score, 1)\n with self.assertRaises(AssertionError):\n utils.create_binary_mask_from_scores(score, -0.5)\n with self.assertRaises(AssertionError):\n utils.create_binary_mask_from_scores(tf.ones((3, 5)), 0)\n\n\nclass MaskAndBroadCastTest(tf.test.TestCase):\n\n def testCorrectnessNoBroadcast(self):\n vals = tf.range(1, 6)\n mask = tf.constant([0, 1, 1, 0, 0])\n res = utils.mask_and_broadcast(vals, mask)\n self.assertAllEqual(res, tf.constant([0, 2, 3, 0, 0]))\n res = utils.mask_and_broadcast(vals, mask, invert_mask=True)\n self.assertAllEqual(res, tf.constant([1, 0, 0, 4, 5]))\n\n def testCorrectnessWithBroadcast(self):\n vals = tf.range(1, 4)\n mask = tf.constant([0, 1, 1])\n res = utils.mask_and_broadcast(vals, mask, out_shape=[2, 3])\n self.assertAllEqual(res, tf.constant([[0, 2, 3], [0, 2, 3]]))\n res = utils.mask_and_broadcast(vals, mask, out_shape=[2, 3],\n invert_mask=True)\n self.assertAllEqual(res, tf.constant([[1, 0, 0], [1, 0, 0]]))\n\n\nclass BindGinParamsTest(tf.test.TestCase):\n\n @mock.patch('gin.bind_parameter')\n @mock.patch('gin.unlock_config')\n def testDefault(self, unlock_mock, bind_param_mock):\n c_dict = {'fun1.arg1': 1,\n 'fun2.arg2': (2, 3)}\n utils.bind_gin_params(c_dict)\n self.assertEqual(bind_param_mock.call_count, len(c_dict))\n self.assertEqual(unlock_mock.call_count, len(c_dict))\n for k, v in c_dict.items():\n bind_param_mock.assert_any_call(k, v)\n\nif __name__ == '__main__':\n tf.enable_v2_behavior()\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.enable_v2_behavior", "tensorflow.compat.v2.range", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.constant" ] ]
gritYCDA/boundaryOCDA
[ "d93f2d4ad1f41d7ec19ba2a2fc7e98ecce914ccb" ]
[ "advent/scripts/train.py" ]
[ "# --------------------------------------------------------\r\n# AdvEnt training\r\n# Copyright (c) 2019 valeo.ai\r\n#\r\n# Written by Tuan-Hung Vu\r\n# --------------------------------------------------------\r\nimport argparse\r\nimport os\r\nimport os.path as osp\r\nimport pprint\r\nimport random\r\nimport warnings\r\n\r\nimport numpy as np\r\nimport yaml\r\nimport torch\r\nfrom torch.utils import data\r\n\r\nfrom advent.model.deeplabv2 import get_deeplab_v2\r\nfrom advent.model.deeplabv2_vgg import get_deeplab_v2_vgg\r\n\r\nfrom advent.dataset.gta5 import GTA5DataSet\r\nfrom advent.dataset.bdd import BDDdataset\r\nfrom advent.dataset.cityscapes import CityscapesDataSet\r\nfrom advent.domain_adaptation.config import cfg, cfg_from_file\r\nfrom advent.domain_adaptation.train_UDA import train_domain_adaptation\r\n\r\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nimport sys\r\nsys.path.append(\"/data2/OCDA/ADVENT\")\r\nfrom advent.utils import project_root\r\n\r\n\r\ndef get_arguments():\r\n \"\"\"\r\n Parse input arguments\r\n \"\"\"\r\n parser = argparse.ArgumentParser(description=\"Code for domain adaptation (DA) training\")\r\n parser.add_argument('--cfg', type=str, default=None,\r\n help='optional config file', )\r\n parser.add_argument(\"--random-train\", action=\"store_true\",\r\n help=\"not fixing random seed.\")\r\n parser.add_argument(\"--tensorboard\", action=\"store_true\",\r\n help=\"visualize training loss with tensorboardX.\")\r\n parser.add_argument(\"--viz-every-iter\", type=int, default=None,\r\n help=\"visualize results.\")\r\n parser.add_argument(\"--exp-suffix\", type=str, default=None,\r\n help=\"optional experiment suffix\")\r\n parser.add_argument(\"--num-workers\", type=int, default=4,\r\n help=\"the number of dataloader workers\")\r\n parser.add_argument(\"--LAMBDA_BOUNDARY\", type=float, default=0.5,\r\n help=\"cfg.TRAIN.LAMBDA_BOUNDARY\")\r\n parser.add_argument(\"--LAMBDA_DICE\", type=float, default=1,\r\n help=\"cfg.TRAIN.LAMBDA_BOUNDARY\")\r\n \"\"\"\r\n gan method settings\r\n \"\"\"\r\n parser.add_argument('--gan', type=str, default='lsgan',\r\n help='gan methods [gan, lsgan]')\r\n\r\n \"\"\"\r\n dataset manager\r\n \"\"\"\r\n parser.add_argument('--source', type=str, default='GTA',\r\n help='source dataset [GTA, SYNTHIA]')\r\n parser.add_argument('--target', type=str, default='BDD',\r\n help='target dataset [Cityscapes, BDD]')\r\n\r\n \"\"\"\r\n train opticn\r\n \"\"\"\r\n parser.add_argument('--option', type=str, default=None,\r\n help='for the many trial, option tag can be divider')\r\n\r\n return parser.parse_args()\r\n\r\n\r\ndef main():\r\n # LOAD ARGS\r\n args = get_arguments()\r\n print('Called with args:')\r\n print(args)\r\n\r\n assert args.cfg is not None, 'Missing cfg file'\r\n cfg_from_file(args.cfg)\r\n cfg.NUM_WORKERS = args.num_workers\r\n if args.option is not None:\r\n cfg.TRAIN.OPTION = args.option\r\n cfg.TRAIN.LAMBDA_BOUNDARY = args.LAMBDA_BOUNDARY\r\n cfg.TRAIN.LAMBDA_DICE = args.LAMBDA_DICE\r\n\r\n ## gan method settings\r\n cfg.GAN = args.gan\r\n if cfg.GAN == 'gan':\r\n cfg.TRAIN.LAMBDA_ADV_MAIN = 0.001 # GAN\r\n elif cfg.GAN == 'lsgan':\r\n cfg.TRAIN.LAMBDA_ADV_MAIN = 0.01 # LS-GAN\r\n else:\r\n raise NotImplementedError(f\"Not Supported gan method\")\r\n\r\n ### dataset settings\r\n cfg.SOURCE = args.source\r\n cfg.TARGET = args.target\r\n ## source config\r\n if cfg.SOURCE == 'GTA':\r\n cfg.DATA_LIST_SOURCE = str(project_root / 'advent/dataset/gta5_list/{}.txt')\r\n cfg.DATA_DIRECTORY_SOURCE = str(project_root / 'data/GTA5')\r\n cfg.TRAIN.INPUT_SIZE_SOURCE = (1280, 720)\r\n\r\n elif cfg.SOURCE == 'SYNTHIA':\r\n raise NotImplementedError(f\"Not yet supported {cfg.SOURCE} dataset\")\r\n else:\r\n raise NotImplementedError(f\"Not yet supported {cfg.SOURCE} dataset\")\r\n\r\n ## target config\r\n if cfg.TARGET == 'Cityscapes':\r\n cfg.DATA_LIST_TARGET = str(project_root / 'advent/dataset/cityscapes_list/{}.txt')\r\n cfg.DATA_DIRECTORY_TARGET = str(project_root / 'data/cityscapes')\r\n cfg.EXP_ROOT = project_root / 'experiments_G2C'\r\n cfg.EXP_ROOT_SNAPSHOT = osp.join(cfg.EXP_ROOT, 'snapshots_G2C')\r\n cfg.EXP_ROOT_LOGS = osp.join(cfg.EXP_ROOT, 'logs_G2C')\r\n cfg.TRAIN.INPUT_SIZE_TARGET = (1024, 512)\r\n cfg.TRAIN.INFO_TARGET = str(project_root / 'advent/dataset/cityscapes_list/info.json')\r\n\r\n cfg.TEST.INPUT_SIZE_TARGET = (1024, 512)\r\n cfg.TEST.OUTPUT_SIZE_TARGET = (2048, 1024)\r\n cfg.TEST.INFO_TARGET = str(project_root / 'advent/dataset/cityscapes_list/info.json')\r\n\r\n elif cfg.TARGET == 'BDD':\r\n cfg.DATA_LIST_TARGET = str(project_root / 'advent/dataset/compound_list/{}.txt')\r\n cfg.DATA_DIRECTORY_TARGET = str(project_root / 'data/bdd/Compound')\r\n cfg.EXP_ROOT = project_root / 'experiments'\r\n cfg.EXP_ROOT_SNAPSHOT = osp.join(cfg.EXP_ROOT, 'snapshots')\r\n cfg.EXP_ROOT_LOGS = osp.join(cfg.EXP_ROOT, 'logs')\r\n cfg.TRAIN.INPUT_SIZE_TARGET = (960, 540)\r\n cfg.TRAIN.INFO_TARGET = str(project_root / 'advent/dataset/compound_list/info.json')\r\n\r\n else:\r\n raise NotImplementedError(f\"Not yet supported {cfg.TARGET} dataset\")\r\n\r\n\r\n # auto-generate exp name if not specified\r\n if cfg.EXP_NAME == '':\r\n cfg.EXP_NAME = f'{cfg.SOURCE}2{cfg.TARGET}_{cfg.TRAIN.MODEL}_{cfg.TRAIN.DA_METHOD}_{cfg.TRAIN.OCDA_METHOD}'\r\n\r\n if args.exp_suffix:\r\n cfg.EXP_NAME += f'_{args.exp_suffix}'\r\n # auto-generate snapshot path if not specified\r\n if cfg.TRAIN.SNAPSHOT_DIR == '':\r\n cfg.TRAIN.SNAPSHOT_DIR = osp.join(cfg.EXP_ROOT_SNAPSHOT, cfg.EXP_NAME)\r\n os.makedirs(cfg.TRAIN.SNAPSHOT_DIR, exist_ok=True)\r\n # tensorboard\r\n if args.tensorboard:\r\n if cfg.TRAIN.TENSORBOARD_LOGDIR == '':\r\n cfg.TRAIN.TENSORBOARD_LOGDIR = osp.join(cfg.EXP_ROOT_LOGS, 'tensorboard', cfg.EXP_NAME)\r\n os.makedirs(cfg.TRAIN.TENSORBOARD_LOGDIR, exist_ok=True)\r\n if args.viz_every_iter is not None:\r\n cfg.TRAIN.TENSORBOARD_VIZRATE = args.viz_every_iter\r\n else:\r\n cfg.TRAIN.TENSORBOARD_LOGDIR = ''\r\n\r\n print('Using config:')\r\n pprint.pprint(cfg)\r\n\r\n # INIT\r\n _init_fn = None\r\n if not args.random_train:\r\n torch.manual_seed(cfg.TRAIN.RANDOM_SEED)\r\n torch.cuda.manual_seed(cfg.TRAIN.RANDOM_SEED)\r\n np.random.seed(cfg.TRAIN.RANDOM_SEED)\r\n random.seed(cfg.TRAIN.RANDOM_SEED)\r\n\r\n def _init_fn(worker_id):\r\n np.random.seed(cfg.TRAIN.RANDOM_SEED + worker_id)\r\n\r\n if os.environ.get('ADVENT_DRY_RUN', '0') == '1':\r\n return\r\n\r\n # LOAD SEGMENTATION NET\r\n if cfg.TRAIN.MODEL == 'DeepLabv2':\r\n model = get_deeplab_v2(num_classes=cfg.NUM_CLASSES, multi_level=cfg.TRAIN.MULTI_LEVEL)\r\n saved_state_dict = torch.load(cfg.TRAIN.RESTORE_FROM)\r\n if 'DeepLab_resnet_pretrained_imagenet' in cfg.TRAIN.RESTORE_FROM:\r\n new_params = model.state_dict().copy()\r\n for i in saved_state_dict:\r\n i_parts = i.split('.')\r\n if not i_parts[1] == 'layer5':\r\n new_params['.'.join(i_parts[1:])] = saved_state_dict[i]\r\n model.load_state_dict(new_params)\r\n else:\r\n model.load_state_dict(saved_state_dict)\r\n elif cfg.TRAIN.MODEL == 'DeepLabv2_VGG':\r\n model = get_deeplab_v2_vgg(cfg=cfg, num_classes=cfg.NUM_CLASSES,pretrained_model=cfg.TRAIN_VGG_PRE_MODEL)\r\n\r\n if cfg.TRAIN.SELF_TRAINING:\r\n path = osp.join(cfg.EXP_ROOT_SNAPSHOT, cfg.TRAIN.RESTORE_FROM_SELF)\r\n saved_state_dict = torch.load(path)\r\n model.load_state_dict(saved_state_dict, strict=False)\r\n trg_list = cfg.DATA_LIST_TARGET_ORDER\r\n print(\"self-training model loaded: {} \".format(path))\r\n else:\r\n trg_list = cfg.DATA_LIST_TARGET\r\n else:\r\n raise NotImplementedError(f\"Not yet supported {cfg.TRAIN.MODEL}\")\r\n\r\n print(\"model: \")\r\n print(model)\r\n print('Model loaded')\r\n\r\n ######## DATALOADERS ########\r\n # GTA5: 24,966: 274,626 / 24,966 = 11 epoch\r\n\r\n # self-training : target data shuffle\r\n shuffle = cfg.TRAIN.SHUFFLE\r\n if cfg.TRAIN.SELF_TRAINING:\r\n max_iteration = None\r\n else:\r\n max_iteration = cfg.TRAIN.MAX_ITERS * cfg.TRAIN.BATCH_SIZE_SOURCE\r\n\r\n source_dataset = GTA5DataSet(root=cfg.DATA_DIRECTORY_SOURCE,\r\n list_path=cfg.DATA_LIST_SOURCE,\r\n set=cfg.TRAIN.SET_SOURCE,\r\n max_iters=max_iteration,\r\n crop_size=cfg.TRAIN.INPUT_SIZE_SOURCE,\r\n mean=cfg.TRAIN.IMG_MEAN)\r\n source_loader = data.DataLoader(source_dataset,\r\n batch_size=cfg.TRAIN.BATCH_SIZE_SOURCE,\r\n num_workers=cfg.NUM_WORKERS,\r\n shuffle=True,\r\n pin_memory=True,\r\n worker_init_fn=_init_fn)\r\n if cfg.TARGET == \"BDD\":\r\n # GTA5: 14,697: 264,546 / 14,697 = 18 epoch\r\n target_dataset = BDDdataset(root=cfg.DATA_DIRECTORY_TARGET,\r\n list_path=trg_list,\r\n set=cfg.TRAIN.SET_TARGET,\r\n info_path=cfg.TRAIN.INFO_TARGET,\r\n max_iters=max_iteration,\r\n crop_size=cfg.TRAIN.INPUT_SIZE_TARGET,\r\n mean=cfg.TRAIN.IMG_MEAN)\r\n target_loader = data.DataLoader(target_dataset,\r\n batch_size=cfg.TRAIN.BATCH_SIZE_TARGET,\r\n num_workers=cfg.NUM_WORKERS,\r\n shuffle=shuffle,\r\n pin_memory=True,\r\n worker_init_fn=_init_fn)\r\n elif cfg.TARGET == 'Cityscapes':\r\n target_dataset = CityscapesDataSet(root=cfg.DATA_DIRECTORY_TARGET,\r\n list_path=cfg.DATA_LIST_TARGET,\r\n set=cfg.TRAIN.SET_TARGET,\r\n info_path=cfg.TRAIN.INFO_TARGET,\r\n max_iters=cfg.TRAIN.MAX_ITERS * cfg.TRAIN.BATCH_SIZE_TARGET,\r\n crop_size=cfg.TRAIN.INPUT_SIZE_TARGET,\r\n mean=cfg.TRAIN.IMG_MEAN)\r\n target_loader = data.DataLoader(target_dataset,\r\n batch_size=cfg.TRAIN.BATCH_SIZE_TARGET,\r\n num_workers=cfg.NUM_WORKERS,\r\n shuffle=True,\r\n pin_memory=True,\r\n worker_init_fn=_init_fn)\r\n else:\r\n raise NotImplementedError(f\"Not yet supported {cfg.TARGET} datasets\")\r\n\r\n with open(osp.join(cfg.TRAIN.SNAPSHOT_DIR, 'train_cfg.yml'), 'w') as yaml_file:\r\n yaml.dump(cfg, yaml_file, default_flow_style=False)\r\n\r\n # UDA TRAINING\r\n train_domain_adaptation(model, source_loader, target_loader, cfg)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" ]
[ [ "numpy.random.seed", "torch.cuda.manual_seed", "torch.load", "torch.manual_seed", "torch.utils.data.DataLoader" ] ]
menefotto/Kats
[ "3fc8a3f819502d45736734eabb3601f42a6b7759" ]
[ "kats/models/ensemble/weighted_avg_ensemble.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"Ensemble models with weighted average individual models\n\nAssume we have k base models, after we make forecasts with each individual\nmodel, we learn the weights for each individual model based on corresponding\nback testing results, i.e., model with better performance should have higher\nweight.\n\"\"\"\nimport logging\nimport sys\nfrom multiprocessing import Pool, cpu_count\n\nimport numpy as np\nimport pandas as pd\nimport kats.models.model as mm\nfrom kats.consts import Params, TimeSeriesData\nfrom kats.models.ensemble import ensemble\nfrom kats.models.ensemble.ensemble import BASE_MODELS, EnsembleParams\nfrom kats.utils.backtesters import BackTesterSimple\n\nclass WeightedAvgEnsemble(ensemble.BaseEnsemble):\n \"\"\"Weighted average ensemble model class\n\n Attributes:\n data: the input time series data as in :class:`kats.consts.TimeSeriesData`\n params: the model parameter class in Kats\n \"\"\"\n\n def __init__(self, data: TimeSeriesData, params: EnsembleParams) -> None:\n self.data = data\n self.params = params\n if not isinstance(self.data.value, pd.Series):\n msg = \"Only support univariate time series, but get {type}.\".format(\n type=type(self.data.value)\n )\n logging.error(msg)\n raise ValueError(msg)\n\n def _backtester_single(\n self,\n params: Params,\n model_class,\n train_percentage : int = 80,\n test_percentage : int = 20,\n err_method : str = \"mape\",\n ) -> float:\n \"\"\"Private method to run all backtesting process\n\n Args:\n params: Kats model parameters\n model_class: Untyped. Defines type of model\n train_percentage: float. Percentage of data used for training\n test_percentage: float. Percentage of data used for testing\n error_method: list of strings indicating which errors to calculate\n we currently support \"mape\", \"smape\", \"mae\", \"mase\", \"mse\", \"rmse\"\n\n Returns:\n float, the backtesting error\n \"\"\"\n\n bt = BackTesterSimple(\n [err_method],\n self.data,\n params,\n train_percentage,\n test_percentage,\n model_class\n )\n bt.run_backtest()\n return bt.get_error_value(err_method)\n\n def _backtester_all(self, err_method: str = \"mape\"):\n \"\"\"Private method to run all backtesting process\n\n Args:\n error_method: list of strings indicating which errors to calculate\n we currently support \"mape\", \"smape\", \"mae\", \"mase\", \"mse\", \"rmse\"\n\n Returns:\n Dict of errors from each model\n \"\"\"\n\n num_process = min(len(BASE_MODELS.keys()), (cpu_count() - 1) // 2)\n if num_process < 1:\n num_process = 1\n pool = Pool(processes=(num_process), maxtasksperchild=1000)\n backtesters = {}\n for model in self.params.models:\n backtesters[model.model_name] = pool.apply_async(\n self._backtester_single,\n args=(model.model_params, BASE_MODELS[model.model_name.lower()]),\n kwds={\"err_method\": err_method},\n )\n pool.close()\n pool.join()\n # pyre-fixme[16]: `WeightedAvgEnsemble` has no attribute `errors`.\n self.errors = {model: res.get() for model, res in backtesters.items()}\n original_weights = {\n model: 1 / (err + sys.float_info.epsilon)\n for model, err in self.errors.items()\n }\n # pyre-fixme[16]: `WeightedAvgEnsemble` has no attribute `weights`.\n self.weights = {\n model: err / sum(original_weights.values())\n for model, err in original_weights.items()\n }\n return self.weights\n\n def predict(self, steps: int, **kwargs):\n \"\"\"Predict method of weighted average ensemble model\n\n Args:\n steps: the length of forecasting horizon\n\n Returns:\n forecasting results as in pd.DataFrame\n \"\"\"\n\n # pyre-fixme[16]: `WeightedAvgEnsemble` has no attribute `freq`.\n self.freq = kwargs.get(\"freq\", \"D\")\n err_method = kwargs.get(\"err_method\", \"mape\")\n # calculate the weights\n self._backtester_all(err_method=err_method)\n\n # fit model with all available time series\n pred_dict = self._predict_all(steps, **kwargs)\n\n fcst_all = pd.concat(\n [x.fcst.reset_index(drop=True) for x in pred_dict.values()], axis=1\n )\n fcst_all.columns = pred_dict.keys()\n # pyre-fixme[16]: `WeightedAvgEnsemble` has no attribute `fcst_weighted`.\n # pyre-fixme[16]: `WeightedAvgEnsemble` has no attribute `weights`.\n self.fcst_weighted = fcst_all.dot(np.array(list(self.weights.values())))\n\n # create future dates\n last_date = self.data.time.max()\n dates = pd.date_range(start=last_date, periods=steps + 1, freq=self.freq)\n dates = dates[dates != last_date]\n # pyre-fixme[16]: `WeightedAvgEnsemble` has no attribute `fcst_dates`.\n self.fcst_dates = dates.to_pydatetime()\n # pyre-fixme[16]: `WeightedAvgEnsemble` has no attribute `dates`.\n self.dates = dates[dates != last_date]\n\n # pyre-fixme[16]: `WeightedAvgEnsemble` has no attribute `fcst_df`.\n self.fcst_df = pd.DataFrame({\"time\": self.dates, \"fcst\": self.fcst_weighted})\n\n logging.debug(\"Return forecast data: {fcst_df}\".format(fcst_df=self.fcst_df))\n return self.fcst_df\n\n def plot(self):\n \"\"\"Plot method for weighted average ensemble model\n \"\"\"\n logging.info(\"Generating chart for forecast result from Ensemble.\")\n mm.Model.plot(self.data, self.fcst_df)\n\n def __str__(self):\n \"\"\"Get default parameter search space for the weighted average ensemble model\n\n Args:\n None\n\n Returns:\n Model name as a string\n \"\"\"\n return \"Weighted Average Ensemble\"\n" ]
[ [ "pandas.DataFrame", "pandas.date_range" ] ]
IS2AI/trimodal_person_verification
[ "f811d97717a552a7d6a474b1cc2d7ecc416a1e19" ]
[ "models/ResNetBlocks.py" ]
[ "#! /usr/bin/python\n# -*- encoding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super().__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\nclass SEBasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=8):\n super(SEBasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.se = SELayer(planes, reduction)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.relu(out)\n out = self.bn1(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.se(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n return out\n\n\nclass SEBottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=8):\n super(SEBottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.se = SELayer(planes * 4, reduction)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n out = self.se(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=8):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y" ]
[ [ "torch.nn.Conv2d", "torch.nn.Sigmoid", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
benedictflorance/keras-retinanet
[ "0d4bfe0db4fad3b15e99afdc3be4badc90ee23f9" ]
[ "keras_retinanet/bin/train.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport warnings\n\nimport keras\nimport keras.preprocessing.image\nimport tensorflow as tf\n\n# Allow relative imports when being executed as script.\nif __name__ == \"__main__\" and __package__ is None:\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))\n import keras_retinanet.bin # noqa: F401\n __package__ = \"keras_retinanet.bin\"\n\n# Change these to absolute imports if you copy this script outside the keras_retinanet package.\nfrom .. import layers # noqa: F401\nfrom .. import losses\nfrom .. import models\nfrom ..callbacks import RedirectModel\nfrom ..callbacks.eval import Evaluate\nfrom ..models.retinanet import retinanet_bbox\nfrom ..preprocessing.csv_generator import CSVGenerator\nfrom ..preprocessing.kitti import KittiGenerator\nfrom ..preprocessing.open_images import OpenImagesGenerator\nfrom ..preprocessing.pascal_voc import PascalVocGenerator\nfrom ..utils.anchors import make_shapes_callback\nfrom ..utils.config import read_config_file, parse_anchor_parameters\nfrom ..utils.gpu import setup_gpu\nfrom ..utils.image import random_visual_effect_generator\nfrom ..utils.keras_version import check_keras_version\nfrom ..utils.model import freeze as freeze_model\nfrom ..utils.tf_version import check_tf_version\nfrom ..utils.transform import random_transform_generator\n\n\ndef makedirs(path):\n # Intended behavior: try to create the directory,\n # pass if the directory exists already, fails otherwise.\n # Meant for Python 2.7/3.n compatibility.\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n\n\ndef model_with_weights(model, weights, skip_mismatch):\n \"\"\" Load weights for model.\n\n Args\n model : The model to load weights for.\n weights : The weights to load.\n skip_mismatch : If True, skips layers whose shape of weights doesn't match with the model.\n \"\"\"\n if weights is not None:\n model.load_weights(weights, by_name=True, skip_mismatch=skip_mismatch)\n return model\n\n\ndef create_models(backbone_retinanet, num_classes, weights, alpha, gamma, multi_gpu=0,\n freeze_backbone=False, lr=1e-5, config=None):\n \"\"\" Creates three models (model, training_model, prediction_model).\n\n Args\n backbone_retinanet : A function to call to create a retinanet model with a given backbone.\n num_classes : The number of classes to train.\n weights : The weights to load into the model.\n multi_gpu : The number of GPUs to use for training.\n freeze_backbone : If True, disables learning for the backbone.\n config : Config parameters, None indicates the default configuration.\n\n Returns\n model : The base model. This is also the model that is saved in snapshots.\n training_model : The training model. If multi_gpu=0, this is identical to model.\n prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).\n \"\"\"\n\n modifier = freeze_model if freeze_backbone else None\n\n # load anchor parameters, or pass None (so that defaults will be used)\n anchor_params = None\n num_anchors = None\n if config and 'anchor_parameters' in config:\n anchor_params = parse_anchor_parameters(config)\n num_anchors = anchor_params.num_anchors()\n\n # Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.\n # optionally wrap in a parallel model\n if multi_gpu > 1:\n from keras.utils import multi_gpu_model\n with tf.device('/cpu:0'):\n model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)\n training_model = multi_gpu_model(model, gpus=multi_gpu)\n else:\n model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)\n training_model = model\n\n # make prediction model\n prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)\n\n # compile model\n training_model.compile(\n loss={\n 'regression' : losses.smooth_l1(),\n 'classification': losses.focal(alpha, gamma)\n },\n optimizer=keras.optimizers.adam(lr=lr, clipnorm=0.001)\n )\n\n return model, training_model, prediction_model\n\n\ndef create_callbacks(model, training_model, prediction_model, validation_generator, args):\n \"\"\" Creates the callbacks to use during training.\n\n Args\n model: The base model.\n training_model: The model that is used for training.\n prediction_model: The model that should be used for validation.\n validation_generator: The generator for creating validation data.\n args: parseargs args object.\n\n Returns:\n A list of callbacks used for training.\n \"\"\"\n callbacks = []\n\n tensorboard_callback = None\n\n if args.tensorboard_dir:\n makedirs(args.tensorboard_dir)\n tensorboard_callback = keras.callbacks.TensorBoard(\n log_dir = args.tensorboard_dir,\n histogram_freq = 0,\n batch_size = args.batch_size,\n write_graph = True,\n write_grads = False,\n write_images = False,\n embeddings_freq = 0,\n embeddings_layer_names = None,\n embeddings_metadata = None\n )\n\n if args.evaluation and validation_generator:\n if args.dataset_type == 'coco':\n from ..callbacks.coco import CocoEval\n\n # use prediction model for evaluation\n evaluation = CocoEval(validation_generator, tensorboard=tensorboard_callback)\n else:\n evaluation = Evaluate(validation_generator, tensorboard=tensorboard_callback, weighted_average=args.weighted_average)\n evaluation = RedirectModel(evaluation, prediction_model)\n callbacks.append(evaluation)\n\n # save the model\n if args.snapshots:\n # ensure directory created first; otherwise h5py will error after epoch.\n makedirs(args.snapshot_path)\n checkpoint = keras.callbacks.ModelCheckpoint(\n os.path.join(\n args.snapshot_path,\n '{backbone}_{dataset_type}_{{epoch:02d}}.h5'.format(backbone=args.backbone, dataset_type=args.dataset_type)\n ),\n verbose=1,\n # save_best_only=True,\n # monitor=\"mAP\",\n # mode='max'\n )\n checkpoint = RedirectModel(checkpoint, model)\n callbacks.append(checkpoint)\n\n callbacks.append(keras.callbacks.ReduceLROnPlateau(\n monitor = 'loss',\n factor = 0.1,\n patience = 2,\n verbose = 1,\n mode = 'auto',\n min_delta = 0.0001,\n cooldown = 0,\n min_lr = 0\n ))\n\n if args.tensorboard_dir:\n callbacks.append(tensorboard_callback)\n\n return callbacks\n\n\ndef create_generators(args, preprocess_image):\n \"\"\" Create generators for training and validation.\n\n Args\n args : parseargs object containing configuration for generators.\n preprocess_image : Function that preprocesses an image for the network.\n \"\"\"\n common_args = {\n 'batch_size' : args.batch_size,\n 'config' : args.config,\n 'image_min_side' : args.image_min_side,\n 'image_max_side' : args.image_max_side,\n 'no_resize' : args.no_resize,\n 'preprocess_image' : preprocess_image,\n }\n\n # create random transform generator for augmenting training data\n if args.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.5,\n )\n visual_effect_generator = random_visual_effect_generator(\n contrast_range=(0.9, 1.1),\n brightness_range=(-.1, .1),\n hue_range=(-0.05, 0.05),\n saturation_range=(0.95, 1.05)\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n visual_effect_generator = None\n\n if args.dataset_type == 'coco':\n # import here to prevent unnecessary dependency on cocoapi\n from ..preprocessing.coco import CocoGenerator\n\n train_generator = CocoGenerator(\n args.coco_path,\n 'train2017',\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = CocoGenerator(\n args.coco_path,\n 'val2017',\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'pascal':\n train_generator = PascalVocGenerator(\n args.pascal_path,\n 'train',\n image_extension=args.image_extension,\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = PascalVocGenerator(\n args.pascal_path,\n 'val',\n image_extension=args.image_extension,\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'csv':\n train_generator = CSVGenerator(\n args.annotations,\n args.classes,\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n if args.val_annotations:\n validation_generator = CSVGenerator(\n args.val_annotations,\n args.classes,\n shuffle_groups=False,\n **common_args\n )\n else:\n validation_generator = None\n elif args.dataset_type == 'oid':\n train_generator = OpenImagesGenerator(\n args.main_dir,\n subset='train',\n version=args.version,\n labels_filter=args.labels_filter,\n annotation_cache_dir=args.annotation_cache_dir,\n parent_label=args.parent_label,\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = OpenImagesGenerator(\n args.main_dir,\n subset='validation',\n version=args.version,\n labels_filter=args.labels_filter,\n annotation_cache_dir=args.annotation_cache_dir,\n parent_label=args.parent_label,\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'kitti':\n train_generator = KittiGenerator(\n args.kitti_path,\n subset='train',\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = KittiGenerator(\n args.kitti_path,\n subset='val',\n shuffle_groups=False,\n **common_args\n )\n else:\n raise ValueError('Invalid data type received: {}'.format(args.dataset_type))\n\n return train_generator, validation_generator\n\n\ndef check_args(parsed_args):\n \"\"\" Function to check for inherent contradictions within parsed arguments.\n For example, batch_size < num_gpus\n Intended to raise errors prior to backend initialisation.\n\n Args\n parsed_args: parser.parse_args()\n\n Returns\n parsed_args\n \"\"\"\n\n if parsed_args.multi_gpu > 1 and parsed_args.batch_size < parsed_args.multi_gpu:\n raise ValueError(\n \"Batch size ({}) must be equal to or higher than the number of GPUs ({})\".format(parsed_args.batch_size,\n parsed_args.multi_gpu))\n\n if parsed_args.multi_gpu > 1 and parsed_args.snapshot:\n raise ValueError(\n \"Multi GPU training ({}) and resuming from snapshots ({}) is not supported.\".format(parsed_args.multi_gpu,\n parsed_args.snapshot))\n\n if parsed_args.multi_gpu > 1 and not parsed_args.multi_gpu_force:\n raise ValueError(\"Multi-GPU support is experimental, use at own risk! Run with --multi-gpu-force if you wish to continue.\")\n\n if 'resnet' not in parsed_args.backbone:\n warnings.warn('Using experimental backbone {}. Only resnet50 has been properly tested.'.format(parsed_args.backbone))\n\n return parsed_args\n\n\ndef parse_args(args):\n \"\"\" Parse the arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')\n subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')\n subparsers.required = True\n\n coco_parser = subparsers.add_parser('coco')\n coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')\n\n pascal_parser = subparsers.add_parser('pascal')\n pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')\n pascal_parser.add_argument('--image-extension', help='Declares the dataset images\\' extension.', default='.jpg')\n\n kitti_parser = subparsers.add_parser('kitti')\n kitti_parser.add_argument('kitti_path', help='Path to dataset directory (ie. /tmp/kitti).')\n\n def csv_list(string):\n return string.split(',')\n\n oid_parser = subparsers.add_parser('oid')\n oid_parser.add_argument('main_dir', help='Path to dataset directory.')\n oid_parser.add_argument('--version', help='The current dataset version is v4.', default='v4')\n oid_parser.add_argument('--labels-filter', help='A list of labels to filter.', type=csv_list, default=None)\n oid_parser.add_argument('--annotation-cache-dir', help='Path to store annotation cache.', default='.')\n oid_parser.add_argument('--parent-label', help='Use the hierarchy children of this label.', default=None)\n\n csv_parser = subparsers.add_parser('csv')\n csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for training.')\n csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')\n csv_parser.add_argument('--val-annotations', help='Path to CSV file containing annotations for validation (optional).')\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument('--snapshot', help='Resume training from a snapshot.')\n group.add_argument('--imagenet-weights', help='Initialize the model with pretrained imagenet weights. This is the default behaviour.', action='store_const', const=True, default=True)\n group.add_argument('--weights', help='Initialize the model with weights from a file.')\n group.add_argument('--no-weights', help='Don\\'t initialize the model with any weights.', dest='imagenet_weights', action='store_const', const=False)\n parser.add_argument('--backbone', help='Backbone model used by retinanet.', default='resnet50', type=str)\n parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int)\n parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).', type=int)\n parser.add_argument('--multi-gpu', help='Number of GPUs to use for parallel processing.', type=int, default=0)\n parser.add_argument('--multi-gpu-force', help='Extra flag needed to enable (experimental) multi-gpu support.', action='store_true')\n parser.add_argument('--initial-epoch', help='Epoch from which to begin the train, useful if resuming from snapshot.', type=int, default=0)\n parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=30)\n parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=2000)\n parser.add_argument('--lr', help='Learning rate.', type=float, default=1e-5)\n parser.add_argument('--snapshot-path', help='Path to store snapshots of models during training (defaults to \\'./snapshots\\')', default='./snapshots')\n parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output', default='') # default='./logs') => https://github.com/tensorflow/tensorflow/pull/34870\n parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false')\n parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation', action='store_false')\n parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')\n parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')\n parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)\n parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)\n parser.add_argument('--no-resize', help='Don''t rescale the image.', action='store_true')\n parser.add_argument('--config', help='Path to a configuration parameters .ini file.')\n parser.add_argument('--weighted-average', help='Compute the mAP using the weighted average of precisions among classes.', action='store_true')\n parser.add_argument('--compute-val-loss', help='Compute validation loss during training', dest='compute_val_loss', action='store_true')\n parser.add_argument('--alpha', help='Scale the focal weight with alpha.', type=float, default=0.25)\n parser.add_argument('--gamma', help='Take the power of the focal weight with gamma.', type=float, default=2.0)\n\n # Fit generator arguments\n parser.add_argument('--multiprocessing', help='Use multiprocessing in fit_generator.', action='store_true')\n parser.add_argument('--workers', help='Number of generator workers.', type=int, default=1)\n parser.add_argument('--max-queue-size', help='Queue length for multiprocessing workers in fit_generator.', type=int, default=10)\n\n return check_args(parser.parse_args(args))\n\n\ndef main(args=None):\n # parse arguments\n if args is None:\n args = sys.argv[1:]\n args = parse_args(args)\n\n # create object that stores backbone information\n backbone = models.backbone(args.backbone)\n\n # make sure keras and tensorflow are the minimum required version\n check_keras_version()\n check_tf_version()\n\n # optionally choose specific GPU\n if args.gpu is not None:\n setup_gpu(args.gpu)\n\n # optionally load config parameters\n if args.config:\n args.config = read_config_file(args.config)\n\n # create the generators\n train_generator, validation_generator = create_generators(args, backbone.preprocess_image)\n\n # create the model\n if args.snapshot is not None:\n print('Loading model, this may take a second...')\n model = models.load_model(args.snapshot, backbone_name=args.backbone)\n training_model = model\n anchor_params = None\n if args.config and 'anchor_parameters' in args.config:\n anchor_params = parse_anchor_parameters(args.config)\n prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)\n else:\n weights = args.weights\n # default to imagenet if nothing else is specified\n if weights is None and args.imagenet_weights:\n weights = backbone.download_imagenet()\n\n print('Creating model, this may take a second...')\n model, training_model, prediction_model = create_models(\n backbone_retinanet=backbone.retinanet,\n num_classes=train_generator.num_classes(),\n weights=weights,\n multi_gpu=args.multi_gpu,\n freeze_backbone=args.freeze_backbone,\n lr=args.lr,\n config=args.config,\n alpha=args.alpha,\n gamma=args.gamma\n )\n\n # print model summary\n print(model.summary())\n\n # this lets the generator compute backbone layer shapes using the actual backbone model\n if 'vgg' in args.backbone or 'densenet' in args.backbone:\n train_generator.compute_shapes = make_shapes_callback(model)\n if validation_generator:\n validation_generator.compute_shapes = train_generator.compute_shapes\n\n # create the callbacks\n callbacks = create_callbacks(\n model,\n training_model,\n prediction_model,\n validation_generator,\n args,\n )\n\n if not args.compute_val_loss:\n validation_generator = None\n\n # start training\n return training_model.fit_generator(\n generator=train_generator,\n steps_per_epoch=args.steps,\n epochs=args.epochs,\n verbose=1,\n callbacks=callbacks,\n workers=args.workers,\n use_multiprocessing=args.multiprocessing,\n max_queue_size=args.max_queue_size,\n validation_data=validation_generator,\n initial_epoch=args.initial_epoch\n )\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.device" ] ]
ikazos/project11737-reprise
[ "afa6d7682d1857c42a7a32726065fcb4652034c2" ]
[ "onmt/transforms/transform.py" ]
[ "\"\"\"Base Transform class and relate utils.\"\"\"\nimport torch\nfrom onmt.utils.logging import logger\nfrom onmt.utils.misc import check_path\nfrom onmt.inputters.fields import get_vocabs\nimport os\n\n\nclass Transform(object):\n \"\"\"A Base class that every transform method should derived from.\"\"\"\n\n def __init__(self, opts):\n \"\"\"Initialize Transform by parsing `opts` and add them as attribute.\"\"\"\n self.opts = opts\n self._parse_opts()\n\n def _set_seed(self, seed):\n \"\"\"Reproducibility: Set seed for non-deterministic transform.\"\"\"\n pass\n\n def warm_up(self, vocabs=None):\n \"\"\"Procedure needed after initialize and before apply.\n\n This should be override if there exist any procedure necessary\n before `apply`, like setups based on parsed options or load models,\n etc.\n \"\"\"\n if self.opts.seed > 0:\n self._set_seed(self.opts.seed)\n\n @classmethod\n def add_options(cls, parser):\n \"\"\"Available options relate to this Transform.\"\"\"\n pass\n\n @classmethod\n def _validate_options(cls, opts):\n \"\"\"Extra checks to validate options added from `add_options`.\"\"\"\n pass\n\n @classmethod\n def get_specials(cls, opts):\n return (set(), set())\n\n def apply(self, example, is_train=False, stats=None, **kwargs):\n \"\"\"Apply transform to `example`.\n\n Args:\n example (dict): a dict of field value, ex. src, tgt;\n is_train (bool): Indicate if src/tgt is training data;\n stats (TransformStatistics): a statistic object.\n \"\"\"\n raise NotImplementedError\n\n def __getstate__(self):\n \"\"\"Pickling following for rebuild.\"\"\"\n state = {\"opts\": self.opts}\n if hasattr(self, 'vocabs'):\n state['vocabs'] = self.vocabs\n return state\n\n def _parse_opts(self):\n \"\"\"Parse opts to set/reset instance's attributes.\n\n This should be override if there are attributes other than self.opts.\n To make sure we recover from picked state.\n (This should only contain attribute assignment, other routine is\n suggest to define in `warm_up`.)\n \"\"\"\n pass\n\n def __setstate__(self, state):\n \"\"\"Reload when unpickling from save file.\"\"\"\n self.opts = state[\"opts\"]\n self._parse_opts()\n vocabs = state.get('vocabs', None)\n self.warm_up(vocabs=vocabs)\n\n def stats(self):\n \"\"\"Return statistic message.\"\"\"\n return ''\n\n def _repr_args(self):\n \"\"\"Return str represent key arguments for class.\"\"\"\n return ''\n\n def __repr__(self):\n cls_name = type(self).__name__\n cls_args = self._repr_args()\n return '{}({})'.format(cls_name, cls_args)\n\n\nclass TransformStatistics(object):\n \"\"\"Return a statistic counter for Transform.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize statistic counter.\"\"\"\n self.reset()\n\n def reset(self):\n \"\"\"Statistic counters for all transforms.\"\"\"\n self.filtered = 0\n self.words, self.subwords = 0, 0\n self.n_switchouted, self.so_total = 0, 0\n self.n_dropped, self.td_total = 0, 0\n self.n_masked, self.tm_total = 0, 0\n\n def filter_too_long(self):\n \"\"\"Update filtered sentence counter.\"\"\"\n self.filtered += 1\n\n def subword(self, subwords, words):\n \"\"\"Update subword counter.\"\"\"\n self.words += words\n self.subwords += subwords\n\n def switchout(self, n_switchout, n_total):\n \"\"\"Update switchout counter.\"\"\"\n self.n_switchouted += n_switchout\n self.so_total += n_total\n\n def token_drop(self, n_dropped, n_total):\n \"\"\"Update token drop counter.\"\"\"\n self.n_dropped += n_dropped\n self.td_total += n_total\n\n def token_mask(self, n_masked, n_total):\n \"\"\"Update token mask counter.\"\"\"\n self.n_masked += n_masked\n self.tm_total += n_total\n\n def report(self):\n \"\"\"Return transform statistics report and reset counter.\"\"\"\n msg = ''\n if self.filtered > 0:\n msg += f'Filtred sentence: {self.filtered} sent\\n'.format()\n if self.words > 0:\n msg += f'Subword(SP/Tokenizer): {self.words} -> {self.subwords} tok\\n' # noqa: E501\n if self.so_total > 0:\n msg += f'SwitchOut: {self.n_switchouted}/{self.so_total} tok\\n'\n if self.td_total > 0:\n msg += f'Token dropped: {self.n_dropped}/{self.td_total} tok\\n'\n if self.tm_total > 0:\n msg += f'Token masked: {self.n_masked}/{self.tm_total} tok\\n'\n self.reset()\n return msg\n\n\nclass TransformPipe(Transform):\n \"\"\"Pipeline built by a list of Transform instance.\"\"\"\n\n def __init__(self, opts, transform_list):\n \"\"\"Initialize pipeline by a list of transform instance.\"\"\"\n self.opts = None # opts is not required\n self.transforms = transform_list\n self.statistics = TransformStatistics()\n\n @classmethod\n def build_from(cls, transform_list):\n \"\"\"Return a `TransformPipe` instance build from `transform_list`.\"\"\"\n for transform in transform_list:\n assert isinstance(transform, Transform), \\\n \"transform should be a instance of Transform.\"\n transform_pipe = cls(None, transform_list)\n return transform_pipe\n\n def warm_up(self, vocabs):\n \"\"\"Warm up Pipeline by iterate over all transfroms.\"\"\"\n for transform in self.transforms:\n transform.warm_up(vocabs)\n\n @classmethod\n def get_specials(cls, opts, transforms):\n \"\"\"Return all specials introduced by `transforms`.\"\"\"\n src_specials, tgt_specials = set(), set()\n for transform in transforms:\n _src_special, _tgt_special = transform.get_specials(transform.opts)\n src_specials.update(_src_special)\n tgt_specials.update(tgt_specials)\n return (src_specials, tgt_specials)\n\n def apply(self, example, is_train=False, **kwargs):\n \"\"\"Apply transform pipe to `example`.\n\n Args:\n example (dict): a dict of field value, ex. src, tgt.\n\n \"\"\"\n for transform in self.transforms:\n example = transform.apply(\n example, is_train=is_train, stats=self.statistics, **kwargs)\n if example is None:\n break\n return example\n\n def __getstate__(self):\n \"\"\"Pickling following for rebuild.\"\"\"\n return (self.opts, self.transforms, self.statistics)\n\n def __setstate__(self, state):\n \"\"\"Reload when unpickling from save file.\"\"\"\n self.opts, self.transforms, self.statistics = state\n\n def stats(self):\n \"\"\"Return statistic message.\"\"\"\n return self.statistics.report()\n\n def _repr_args(self):\n \"\"\"Return str represent key arguments for class.\"\"\"\n info_args = []\n for transform in self.transforms:\n info_args.append(repr(transform))\n return ', '.join(info_args)\n\n\ndef make_transforms(opts, transforms_cls, fields):\n \"\"\"Build transforms in `transforms_cls` with vocab of `fields`.\"\"\"\n vocabs = get_vocabs(fields) if fields is not None else None\n transforms = {}\n for name, transform_cls in transforms_cls.items():\n transform_obj = transform_cls(opts)\n transform_obj.warm_up(vocabs)\n transforms[name] = transform_obj\n return transforms\n\n\ndef get_specials(opts, transforms_cls_dict):\n \"\"\"Get specials of transforms that should be registed in Vocab.\"\"\"\n all_specials = {'src': set(), 'tgt': set()}\n for name, transform_cls in transforms_cls_dict.items():\n src_specials, tgt_specials = transform_cls.get_specials(opts)\n all_specials['src'].update(src_specials)\n all_specials['tgt'].update(tgt_specials)\n if os.path.isfile(opts.src_vocab+\".special\"):\n with open(os.path.join(opts.src_vocab+\".special\"), \"r\") as file:\n all_specials['src'].update([l.strip() for l in file.readlines()])\n if os.path.isfile(opts.tgt_vocab+\".special\"):\n with open(os.path.join(opts.tgt_vocab+\".special\"), \"r\") as file:\n all_specials['tgt'].update([l.strip() for l in file.readlines()])\n logger.info(f\"Get special vocabs from Transforms: {all_specials}.\")\n return all_specials\n\n\ndef save_transforms(transforms, save_data, overwrite=True):\n \"\"\"Dump `transforms` object.\"\"\"\n transforms_path = \"{}.transforms.pt\".format(save_data)\n check_path(transforms_path, exist_ok=overwrite, log=logger.warning)\n logger.info(f\"Saving Transforms to {transforms_path}.\")\n torch.save(transforms, transforms_path)\n\n\ndef load_transforms(opts):\n \"\"\"Load dumped `transforms` object.\"\"\"\n transforms_path = \"{}.transforms.pt\".format(opts.save_data)\n transforms = torch.load(transforms_path)\n logger.info(\"Transforms loaded.\")\n return transforms\n" ]
[ [ "torch.load", "torch.save" ] ]
reedan88/ioos_qc
[ "447e78766926ef82b8081d269479193fe5361386" ]
[ "ioos_qc/qartod.py" ]
[ "#!/usr/bin/env python\n# coding=utf-8\nimport logging\nimport warnings\nfrom collections import namedtuple\nfrom numbers import Real\nfrom typing import Sequence, Tuple, Union, Dict\n\nimport numpy as np\nfrom pygc import great_distance\n\nfrom ioos_qc.utils import (\n isnan,\n isfixedlength\n)\n\nL = logging.getLogger(__name__) # noqa\n\n\nclass QartodFlags(object):\n \"\"\"Primary flags for QARTOD.\"\"\"\n\n GOOD = 1\n UNKNOWN = 2\n SUSPECT = 3\n FAIL = 4\n MISSING = 9\n\n\nFLAGS = QartodFlags # Default name for all check modules\n\n\nN = Real\nspan = namedtuple('Span', 'minv maxv')\n\n\n# Convert dates to datetime and leave datetimes alone. This is also reducing all\n# objects to second precision\ndef mapdates(dates):\n if hasattr(dates, 'dtype') and np.issubdtype(dates.dtype, np.datetime64):\n return dates.astype('datetime64[ns]')\n else:\n return np.array(dates, dtype='datetime64[ns]')\n\n\ndef qartod_compare(vectors : Sequence[Sequence[N]]\n ) -> np.ma.MaskedArray:\n \"\"\"Aggregates an array of flags by precedence into a single array.\n\n Args:\n vectors: An array of uniform length arrays representing individual flags\n\n Returns:\n A masked array of aggregated flag data.\n \"\"\"\n shapes = [v.shape[0] for v in vectors]\n # Assert that all of the vectors are the same size.\n assert all([s == shapes[0] for s in shapes])\n assert all([v.ndim == 1 for v in vectors])\n\n result = np.ma.empty(shapes[0])\n result.fill(QartodFlags.MISSING)\n\n priorities = [\n QartodFlags.MISSING,\n QartodFlags.UNKNOWN,\n QartodFlags.GOOD,\n QartodFlags.SUSPECT,\n QartodFlags.FAIL\n ]\n # For each of the priorities in order, set the resultant array to the the\n # flag where that flag exists in each of the vectors.\n for p in priorities:\n for v in vectors:\n idx = np.where(v == p)[0]\n result[idx] = p\n return result\n\n\ndef location_test(lon : Sequence[N],\n lat : Sequence[N],\n bbox : Tuple[N, N, N, N] = (-180, -90, 180, 90),\n range_max : N = None\n ) -> np.ma.core.MaskedArray:\n \"\"\"Checks that a location is within reasonable bounds.\n\n Checks that longitude and latitude are within reasonable bounds defaulting\n to lon = [-180, 180] and lat = [-90, 90]. Optionally, check for a maximum\n range parameter in great circle distance defaulting to meters which can\n also use a unit from the quantities library. Missing and masked data is\n flagged as UNKNOWN.\n\n Args:\n lon: Longitudes as a numeric numpy array or a list of numbers.\n lat: Latitudes as a numeric numpy array or a list of numbers.\n bbox: A length 4 tuple expressed in (minx, miny, maxx, maxy) [optional].\n range_max: Maximum allowed range expressed in geodesic curve distance (meters).\n\n Returns:\n A masked array of flag values equal in size to that of the input.\n \"\"\"\n\n bboxnt = namedtuple('BBOX', 'minx miny maxx maxy')\n if bbox is not None:\n assert isfixedlength(bbox, 4)\n bbox = bboxnt(*bbox)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n lat = np.ma.masked_invalid(np.array(lat).astype(np.floating))\n lon = np.ma.masked_invalid(np.array(lon).astype(np.floating))\n\n if lon.shape != lat.shape:\n raise ValueError(\n 'Lon ({0.shape}) and lat ({1.shape}) are different shapes'.format(\n lon, lat\n )\n )\n\n # Save original shape\n original_shape = lon.shape\n lon = lon.flatten()\n lat = lat.flatten()\n\n # Start with everything as passing (1)\n flag_arr = np.ma.ones(lon.size, dtype='uint8')\n\n # If either lon or lat are masked we just set the flag to MISSING\n mloc = lon.mask & lat.mask\n flag_arr[mloc] = QartodFlags.MISSING\n\n # If there is only one masked value fail the location test\n mismatch = lon.mask != lat.mask\n flag_arr[mismatch] = QartodFlags.FAIL\n\n if range_max is not None and lon.size > 1:\n # Calculating the great_distance between each point\n # Flag suspect any distance over range_max\n d = np.ma.zeros(lon.size, dtype=np.float64)\n d[1:] = great_distance(\n start_latitude=lat[:-1],\n end_latitude=lat[1:],\n start_longitude=lon[:-1],\n end_longitude=lon[1:]\n )['distance']\n flag_arr[d > range_max] = QartodFlags.SUSPECT\n\n # Ignore warnings when comparing NaN values even though they are masked\n # https://github.com/numpy/numpy/blob/master/doc/release/1.8.0-notes.rst#runtime-warnings-when-comparing-nan-numbers\n with np.errstate(invalid='ignore'):\n flag_arr[(lon < bbox.minx) | (lat < bbox.miny) |\n (lon > bbox.maxx) | (lat > bbox.maxy)] = QartodFlags.FAIL\n\n return flag_arr.reshape(original_shape)\n\n\ndef gross_range_test(inp : Sequence[N],\n fail_span : Tuple[N, N],\n suspect_span : Tuple[N, N] = None\n ) -> np.ma.core.MaskedArray:\n \"\"\"Checks that values are within reasonable range bounds.\n\n Given a 2-tuple of minimum/maximum values, flag data outside of the given\n range as FAIL data. Optionally also flag data which falls outside of a user\n defined range as SUSPECT. Missing and masked data is flagged as UNKNOWN.\n\n Args:\n inp: Input data as a numeric numpy array or a list of numbers.\n fail_span: 2-tuple range which to flag outside data as FAIL.\n suspect_span: 2-tuple range which to flag outside data as SUSPECT. [optional]\n\n Returns:\n A masked array of flag values equal in size to that of the input.\n \"\"\"\n\n assert isfixedlength(fail_span, 2)\n sspan = span(*sorted(fail_span))\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n inp = np.ma.masked_invalid(np.array(inp).astype(np.floating))\n\n # Save original shape\n original_shape = inp.shape\n inp = inp.flatten()\n # Start with everything as passing (1)\n flag_arr = np.ma.ones(inp.size, dtype='uint8')\n\n # If the value is masked set the flag to MISSING\n flag_arr[inp.mask] = QartodFlags.MISSING\n\n if suspect_span is not None:\n assert isfixedlength(suspect_span, 2)\n uspan = span(*sorted(suspect_span))\n if uspan.minv < sspan.minv or uspan.maxv > sspan.maxv:\n raise ValueError('User span range may not exceed sensor span')\n # Flag suspect outside of user span\n with np.errstate(invalid='ignore'):\n flag_arr[(inp < uspan.minv) | (inp > uspan.maxv)] = QartodFlags.SUSPECT\n\n # Flag suspect outside of sensor span\n with np.errstate(invalid='ignore'):\n flag_arr[(inp < sspan.minv) | (inp > sspan.maxv)] = QartodFlags.FAIL\n\n return flag_arr.reshape(original_shape)\n\n\nclass ClimatologyConfig(object):\n\n mem = namedtuple('window', [\n 'tspan',\n 'vspan',\n 'zspan'\n ])\n\n def __init__(self, members=None):\n members = members or []\n self._members = members\n\n @property\n def members(self):\n return self._members\n\n def values(self, tind, zind=None):\n span = (None, None)\n for m in self._members:\n # If we are between times\n if tind > m.tspan.minv and tind <= m.tspan.maxv:\n if not isnan(zind) and not isnan(m.zspan):\n # If we are between depths\n if zind > m.zspan.minv and zind <= m.zspan.maxv:\n span = m.vspan\n elif isnan(zind) and isnan(m.zspan):\n span = m.vspan\n return span\n\n def add(self,\n tspan : Tuple[N, N],\n vspan : Tuple[N, N],\n zspan : Tuple[N, N] = None) -> None:\n\n assert isfixedlength(tspan, 2)\n tspan = mapdates(tspan)\n tspan = span(*sorted(tspan))\n\n assert isfixedlength(vspan, 2)\n vspan = span(*sorted(vspan))\n\n if zspan is not None:\n assert isfixedlength(zspan, 2)\n zspan = span(*sorted(zspan))\n\n self._members.append(\n self.mem(\n tspan,\n vspan,\n zspan\n )\n )\n\n\ndef climatology_test(config : Union[ClimatologyConfig, Sequence[Dict[str, Tuple]]],\n inp : Sequence[N],\n tinp : Sequence[N],\n zinp : Sequence[N],\n ) -> np.ma.core.MaskedArray:\n \"\"\"Checks that values are within reasonable range bounds and flags as SUSPECT.\n\n Data for which no ClimatologyConfig member exists is marked as UNKNOWN.\n\n Args:\n config: A ClimatologyConfig object or a list of dicts containing tuples\n that can be used to create a ClimatologyConfig object. Dict should be composed of\n keywords 'tspan' and 'vspan' as well as an optional 'zspan'\n tinp: Time data as a numpy array of dtype `datetime64`.\n vinp: Input data as a numeric numpy array or a list of numbers.\n zinp: Z (depth) data as a numeric numpy array or a list of numbers.\n\n Returns:\n A masked array of flag values equal in size to that of the input.\n \"\"\"\n\n # Create a ClimatologyConfig object if one was not passed in\n if not isinstance(config, ClimatologyConfig):\n c = ClimatologyConfig()\n for climate_config_dict in config:\n c.add(**climate_config_dict)\n config = c\n\n tinp = mapdates(tinp)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n inp = np.ma.masked_invalid(np.array(inp).astype(np.floating))\n zinp = np.ma.masked_invalid(np.array(zinp).astype(np.floating))\n\n # Save original shape\n original_shape = inp.shape\n\n tinp = tinp.flatten()\n inp = inp.flatten()\n zinp = zinp.flatten()\n\n # Start with everything as passing (1)\n flag_arr = np.ma.ones(inp.size, dtype='uint8')\n\n # If the value is masked set the flag to MISSING\n flag_arr[inp.mask] = QartodFlags.MISSING\n\n for i, (tind, ind, zind) in enumerate(zip(tinp, inp, zinp)):\n minv, maxv = config.values(tind, zind)\n if minv is None or maxv is None:\n flag_arr[i] = QartodFlags.MISSING\n else:\n # Flag suspect outside of climatology span\n with np.errstate(invalid='ignore'):\n if ind < minv or ind > maxv:\n flag_arr[i] = QartodFlags.SUSPECT\n\n return flag_arr.reshape(original_shape)\n\n\ndef spike_test(inp : Sequence[N],\n suspect_threshold: N,\n fail_threshold: N\n ) -> np.ma.core.MaskedArray:\n \"\"\"Check for spikes by checking neighboring data against thresholds\n\n Determine if there is a spike at data point n-1 by subtracting\n the midpoint of n and n-2 and taking the absolute value of this\n quantity, and checking if it exceeds a low or high threshold.\n Values which do not exceed either threshold are flagged GOOD,\n values which exceed the low threshold are flagged SUSPECT,\n and values which exceed the high threshold are flagged FAIL.\n Missing and masked data is flagged as UNKNOWN.\n\n Args:\n inp: Input data as a numeric numpy array or a list of numbers.\n suspect_threshold: The SUSPECT threshold value, in observations units.\n fail_threshold: The SUSPECT threshold value, in observations units.\n\n Returns:\n A masked array of flag values equal in size to that of the input.\n \"\"\"\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n inp = np.ma.masked_invalid(np.array(inp).astype(np.floating))\n\n # Save original shape\n original_shape = inp.shape\n inp = inp.flatten()\n\n # Calculate the average of n-2 and n\n ref = np.zeros(inp.size, dtype=np.float64)\n ref[1:-1] = (inp[0:-2] + inp[2:]) / 2\n ref = np.ma.masked_invalid(ref)\n\n # Start with everything as passing (1)\n flag_arr = np.ma.ones(inp.size, dtype='uint8')\n\n # Calculate the (n-1 - ref) difference\n diff = np.abs(inp - ref)\n\n # If n-1 - ref is greater than the low threshold, SUSPECT test\n with np.errstate(invalid='ignore'):\n flag_arr[diff > suspect_threshold] = QartodFlags.SUSPECT\n\n # If n-1 - ref is greater than the high threshold, FAIL test\n with np.errstate(invalid='ignore'):\n flag_arr[diff > fail_threshold] = QartodFlags.FAIL\n\n # If the value is masked or nan set the flag to MISSING\n flag_arr[diff.mask] = QartodFlags.MISSING\n\n return flag_arr.reshape(original_shape)\n\n\ndef rate_of_change_test(inp : Sequence[N],\n tinp : Sequence[N],\n threshold : float\n ) -> np.ma.core.MaskedArray:\n \"\"\"Checks the first order difference of a series of values to see if\n there are any values exceeding a threshold defined by the inputs.\n These are then marked as SUSPECT. It is up to the test operator\n to determine an appropriate threshold value for the absolute difference not to\n exceed. Threshold is expressed as a rate in observations units per second.\n Missing and masked data is flagged as UNKNOWN.\n\n Args:\n inp: Input data as a numeric numpy array or a list of numbers.\n tinp: Time data as a numpy array of dtype `datetime64`.\n threshold: A float value representing a rate of change over time,\n in observation units per second.\n\n Returns:\n A masked array of flag values equal in size to that of the input.\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n inp = np.ma.masked_invalid(np.array(inp).astype(np.floating))\n\n # Save original shape\n original_shape = inp.shape\n inp = inp.flatten()\n\n # Start with everything as passing (1)\n flag_arr = np.ma.ones(inp.size, dtype='uint8')\n\n # calculate rate of change in units/second\n roc = np.ma.zeros(inp.size, dtype='float')\n roc[1:] = np.abs(np.diff(inp) / np.diff(tinp).astype(float))\n\n with np.errstate(invalid='ignore'):\n flag_arr[roc > threshold] = QartodFlags.SUSPECT\n\n # If the value is masked set the flag to MISSING\n flag_arr[inp.mask] = QartodFlags.MISSING\n\n return flag_arr.reshape(original_shape)\n\n\ndef flat_line_test(inp: Sequence[N],\n tinp: Sequence[N],\n suspect_threshold: int,\n fail_threshold: int,\n tolerance: N = 0\n ) -> np.ma.MaskedArray:\n \"\"\"Check for consecutively repeated values within a tolerance.\n Missing and masked data is flagged as UNKNOWN.\n More information: https://github.com/ioos/ioos_qc/pull/11\n Args:\n inp: Input data as a numeric numpy array or a list of numbers.\n tinp: Time data as a numpy array of dtype `datetime64`, or seconds as type `int`.\n suspect_threshold: The number of seconds within `tolerance` to\n allow before being flagged as SUSPECT.\n fail_threshold: The number of seconds within `tolerance` to\n allow before being flagged as FAIL.\n tolerance: The tolerance that should be exceeded between consecutive values.\n To determine if the current point `n` should be flagged, we use a rolling window, with endpoint at\n point `n`, and calculate the range of values in the window. If that range is less than `tolerance`,\n then the point is flagged.\n Returns:\n A masked array of flag values equal in size to that of the input.\n \"\"\"\n\n # input as numpy arr\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n inp = np.ma.masked_invalid(np.array(inp).astype(np.floating))\n\n # Save original shape\n original_shape = inp.shape\n inp = inp.flatten()\n\n # Start with everything as passing\n flag_arr = np.full((inp.size,), QartodFlags.GOOD)\n\n # if we have fewer than 3 points, we can't run the test, so everything passes\n if len(inp) < 3:\n return flag_arr.reshape(original_shape)\n\n # determine median time interval\n time_interval = np.median(np.diff(tinp)).astype(float)\n\n def rolling_window(a, window):\n \"\"\"\n https://rigtorp.se/2011/01/01/rolling-statistics-numpy.html\n \"\"\"\n if len(a) < window:\n return np.ma.MaskedArray(np.empty((0, window + 1)))\n shape = a.shape[:-1] + (a.shape[-1] - window + 1, window + 1)\n strides = a.strides + (a.strides[-1],)\n arr = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n return np.ma.masked_invalid(arr[:-1, :])\n\n def run_test(test_threshold, flag_value):\n # convert time thresholds to number of observations\n count = (int(test_threshold) / time_interval).astype(int)\n\n # calculate actual data ranges for each window\n window = rolling_window(inp, count)\n data_min = np.min(window, 1)\n data_max = np.max(window, 1)\n data_range = np.abs(data_max - data_min)\n\n # find data ranges that are within threshold and flag them\n test_results = np.ma.filled(data_range < tolerance, fill_value=False)\n # data points before end of first window should pass\n n_fill = count if count < len(inp) else len(inp)\n test_results = np.insert(test_results, 0, np.full((n_fill,), False))\n flag_arr[test_results] = flag_value\n\n run_test(suspect_threshold, QartodFlags.SUSPECT)\n run_test(fail_threshold, QartodFlags.FAIL)\n\n # If the value is masked set the flag to MISSING\n flag_arr[inp.mask] = QartodFlags.MISSING\n\n return flag_arr.reshape(original_shape)\n\ndef attenuated_signal_test(inp : Sequence[N],\n threshold : Tuple[N, N],\n check_type : str = 'std'\n ) -> np.ma.MaskedArray:\n \"\"\"Check for near-flat-line conditions using a range or standard deviation.\n\n Missing and masked data is flagged as UNKNOWN.\n\n Args:\n inp: Input data as a numeric numpy array or a list of numbers.\n threshold: 2-tuple representing the minimum thresholds to use for SUSPECT\n and FAIL checks. The smaller of the two values is used in the SUSPECT\n tests and the greater of the two values is used in the FAIL tests.\n check_type: Either 'std' (default) or 'range', depending on the type of check\n you wish to perform.\n\n Returns:\n A masked array of flag values equal in size to that of the input.\n This array will always contain only a single unique value since all\n input data is flagged together.\n \"\"\"\n\n assert isfixedlength(threshold, 2)\n threshold = span(*reversed(sorted(threshold)))\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n inp = np.ma.masked_invalid(np.array(inp).astype(np.floating))\n\n # Save original shape\n original_shape = inp.shape\n inp = inp.flatten()\n\n if check_type == 'std':\n check_val = np.std(inp)\n elif check_type == 'range':\n check_val = np.ptp(inp)\n else:\n raise ValueError('Check type \"{}\" is not defined'.format(check_type))\n\n # Start with everything as passing (1)\n flag_arr = np.ma.ones(inp.size, dtype='uint8')\n\n if check_val < threshold.maxv:\n flag_arr.fill(QartodFlags.FAIL)\n elif check_val < threshold.minv:\n flag_arr.fill(QartodFlags.SUSPECT)\n\n # If the value is masked set the flag to MISSING\n flag_arr[inp.mask] = QartodFlags.MISSING\n\n return flag_arr.reshape(original_shape)\n" ]
[ [ "numpy.issubdtype", "numpy.lib.stride_tricks.as_strided", "numpy.max", "numpy.where", "numpy.full", "numpy.std", "numpy.diff", "numpy.ma.zeros", "numpy.ma.ones", "numpy.zeros", "numpy.min", "numpy.errstate", "numpy.array", "numpy.ma.filled", "numpy.abs", "numpy.ptp", "numpy.ma.empty", "numpy.ma.masked_invalid", "numpy.empty" ] ]
rproepp/spykeutils
[ "0bdae5fc6493b01bc9744a84b0c288ae49a5614d" ]
[ "spykeutils/stationarity.py" ]
[ "import scipy as sp\nimport quantities as pq\n\nfrom progress_indicator import ProgressIndicator\nfrom . import SpykeException\n\n\ndef spike_amplitude_histogram(trains, num_bins, uniform_y_scale=True,\n unit=pq.uV, progress=None):\n \"\"\" Return a spike amplitude histogram.\n\n The resulting is useful to assess the drift in spike amplitude over a\n longer recording. It shows histograms (one for each ``trains`` entry,\n e.g. segment) of maximum and minimum spike amplitudes.\n\n :param list trains: A list of lists of :class:`neo.core.SpikeTrain`\n objects. Each entry of the outer list will be one point on the\n x-axis (they could correspond to segments), all amplitude occurences\n of spikes contained in the inner list will be added up.\n :param int num_bins: Number of bins for the histograms.\n :param bool uniform_y_scale: If True, the histogram for each channel\n will use the same bins. Otherwise, the minimum bin range is computed\n separately for each channel.\n :param Quantity unit: Unit of Y-Axis.\n :param progress: Set this parameter to report progress.\n :type progress: :class:`.progress_indicator.ProgressIndicator`\n :return: A tuple with three values:\n\n * A three-dimensional histogram matrix, where the first dimension\n corresponds to bins, the second dimension to the entries of\n ``trains`` (e.g. segments) and the third dimension to channels.\n * A list of the minimum amplitude value for each channel (all values\n will be equal if ``uniform_y_scale`` is true).\n * A list of the maximum amplitude value for each channel (all values\n will be equal if ``uniform_y_scale`` is true).\n :rtype: (ndarray, list, list)\n \"\"\"\n if not progress:\n progress = ProgressIndicator()\n\n num_channels = 1\n for t in trains:\n if not t:\n continue\n num_channels = t[0].waveforms.shape[2]\n break\n\n progress.set_ticks(2*len(trains))\n progress.set_status('Calculating Spike Amplitude Histogram')\n\n # Find maximum and minimum amplitudes on all channels\n up = [0] * num_channels\n down = [0] * num_channels\n for t in trains:\n for s in t:\n if s.waveforms is None:\n continue\n if s.waveforms.shape[2] != num_channels:\n raise SpykeException('All spikes need to have the same ' +\n 'numer of channels for Spike Amplitude Histogram!')\n a = sp.asarray(s.waveforms.rescale(unit))\n u = a.max(1)\n d = a.min(1)\n for c in xrange(num_channels):\n up[c] = max(up[c], sp.stats.mstats.mquantiles(\n u[:,c], [0.999])[0])\n down[c] = min(down[c], sp.stats.mstats.mquantiles(\n d[:,c], [0.001])[0])\n progress.step()\n\n if uniform_y_scale:\n up = [max(up)] * num_channels\n down = [min(down)] * num_channels\n\n # Create histogram\n bins = [sp.linspace(down[c],up[c], num_bins+1)\n for c in xrange(num_channels)]\n hist = sp.zeros((num_bins, len(trains), num_channels))\n for i, t in enumerate(trains):\n for s in t:\n if s.waveforms is None:\n continue\n a = sp.asarray(s.waveforms.rescale(unit))\n upper = a.max(1)\n lower = a.min(1)\n for c in xrange(num_channels):\n hist[:,i,c] += sp.histogram(upper[:,c], bins[c])[0]\n hist[:,i,c] += sp.histogram(lower[:,c], bins[c])[0]\n progress.step()\n\n return hist, down, up\n" ]
[ [ "scipy.linspace", "scipy.stats.mstats.mquantiles", "scipy.histogram" ] ]
tsutterley/pointCollection
[ "04e4359e463ff8a556e0d078373578bd96390151" ]
[ "scripts/make_mosaic.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nmake_mosaic.py\nWritten by Tyler Sutterley (10/2021)\n\nCreate a weighted mosaic from a series of tiles\n\nCOMMAND LINE OPTIONS:\n --help: list the command line options\n -d X, --directory X: directory to run\n -g X, --glob_string X: quoted string to pass to glob to find the files\n -r X, --range X: valid range of tiles to read [xmin,xmax,ymin,ymax]\n -G X, --group X: input HDF5 group\n -F X, --field X: input HDF5 field map\n -p X, --pad X: pad width in meters for weights\n -f X, --feather X: feathering width in meters for weights\n -w, --weight: use a weighted summation scheme for calculating mosaic\n -S X, --spacing X: output grid spacing if creating from uniform tiles\n -c X, --crop X: crop mosaic to bounds [xmin,xmax,ymin,ymax]\n -O X, --output X: output filename\n -v, --verbose: verbose output of run\n -R, --replace: overwrite existing output files\n -s, --show: create plot of output mosaic\n -m X, --mode X: Local permissions mode of the output mosaic\n\nUPDATE HISTORY:\n Updated 10/2021: added option for using a non-weighted summation\n Updated 07/2021: added option replace for overwriting existing files\n added option for cropping output mosaic\n Updated 11/2020: added option spacing for setting output grid\n Updated 03/2020: adding argparse bug fix for negative arguments\n made output filename a command line option\n Written 03/2020\n\"\"\"\nimport os\nimport re\nimport glob\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pointCollection as pc\n\nimport sys\ndef main(argv):\n \"\"\"\n Create a weighted mosaic from a series of tiles\n \"\"\"\n\n # account for a bug in argparse that misinterprets negative arguments\n # this bug may be fixed in some versions of argparse\n # but this should keep compatibility\n for i, arg in enumerate(argv):\n if (arg[0] == '-') and arg[1].isdigit(): argv[i] = ' ' + arg\n\n parser=argparse.ArgumentParser()\n parser.add_argument('--directory','-d',\n type=os.path.expanduser, default=os.getcwd(),\n help='directory to run')\n parser.add_argument('--glob_string','-g', type=str,\n default='/*/*.h5',\n help='quoted string to pass to glob to find the files')\n parser.add_argument('--range','-r', type=float,\n nargs=4, default=[-np.inf,np.inf,-np.inf,np.inf],\n metavar=('xmin','xmax','ymin','ymax'),\n help='valid range of tiles to read')\n parser.add_argument('--in_group','-G',\n type=str, default='/',\n help='input HDF5 group')\n parser.add_argument('--out_group', type=str,\n help=\"\"\"output hdf5 group (specify \"/\" for root;\n default is the same as the input group\"\"\")\n # parser.add_argument('--field','-F', type=str,\n # nargs='+', default=['z','dz'],\n # help='input HDF5 field map')\n parser.add_argument('--fields','-F', type=str,\n nargs='+', default=['z'],\n help='input HDF5 fields')\n parser.add_argument('--pad','-p', type=float,\n default=0,\n help='pad width in meters for weights')\n parser.add_argument('--feather','-f', type=float,\n default=0,\n help='feathering width in meters for weights')\n parser.add_argument('--weight','-w',\n action=\"store_true\",\n help='use a weighted summation scheme for calculating mosaic')\n parser.add_argument('--spacing','-S', type=float,\n nargs=2, default=[None,None], metavar=('dx','dy'),\n help='output grid spacing if creating from uniform tiles')\n parser.add_argument('--crop','-c', type=float,\n nargs=4, metavar=('xmin','xmax','ymin','ymax'),\n help='crop mosaic to bounds')\n parser.add_argument('--output','-O',\n type=str, default='mosaic.h5',\n help='output filename')\n parser.add_argument('--verbose','-v',\n default=False, action='store_true',\n help='verbose output of run')\n parser.add_argument('--replace','-R',\n default=False, action='store_true',\n help='overwrite existing output files')\n parser.add_argument('--show','-s',\n action=\"store_true\",\n help='create plot of output mosaic')\n parser.add_argument('--mode','-m',\n type=lambda x: int(x,base=8), default=0o775,\n help='permissions mode of output mosaic')\n args=parser.parse_args()\n\n # valid range of tiles\n xmin,xmax,ymin,ymax = args.range\n # convert field mapping from list to dict\n #field_mapping = {args.field[0]:args.field[1]}\n\n if args.out_group is None:\n args.out_group=args.in_group\n\n if args.verbose:\n print(\"searching glob string:\"+\"[\"+args.glob_string+\"]\")\n # find list of valid files\n file_list = []\n for file in glob.glob(args.directory +'/'+args.glob_string):\n try:\n xc,yc=[int(item)*1.e3 for item in re.compile(r'E(.*)_N(.*).h5').search(file).groups()]\n except Exception:\n continue\n if ((xc >= xmin) and (xc <= xmax) & (yc >= ymin) and (yc <= ymax)):\n file_list.append(file)\n\n # get bounds, grid spacing and dimensions of output mosaic\n mosaic=pc.grid.mosaic(spacing=args.spacing)\n for file in file_list.copy():\n # read tile grid from HDF5\n try:\n temp=pc.grid.data().from_h5(file, group=args.in_group, fields=[])\n # update grid spacing of output mosaic\n mosaic.update_spacing(temp)\n # update the extents of the output mosaic\n mosaic.update_bounds(temp)\n # update dimensions of output mosaic with new extents\n mosaic.update_dimensions(temp)\n except Exception:\n print(f\"failed to read group {args.in_group} \"+ file)\n file_list.remove(file)\n # create output mosaic, weights, and mask\n mosaic.assign({field:np.zeros(mosaic.dimensions) for field in args.fields})\n mosaic.invalid = np.ones(mosaic.dimensions,dtype=bool)\n mosaic.weight = np.zeros((mosaic.dimensions[0],mosaic.dimensions[1]))\n field_dims={}\n\n # read data grid from a single tile HDF5\n temp=pc.grid.mosaic().from_h5(file_list[0], group=args.in_group, fields=args.fields)\n these_fields=[field for field in args.fields if field in temp.fields]\n field_dims={field:getattr(temp, field).ndim for field in these_fields}\n\n # check if using a weighted summation scheme for calculating mosaic\n if args.weight:\n # create the weights for a single file\n # as the tiles have the same dimensions, we only have to calculate\n # the first set. After that we can just copy the first\n temp.weights(pad=args.pad, feather=args.feather, apply=False)\n # allocate for output weight matrix\n mosaic.weight=np.zeros((mosaic.dimensions[0],mosaic.dimensions[1]))\n tile_weight=temp.weight.copy()\n # for each file in the list\n for file in file_list:\n # read data grid from HDF5\n temp=pc.grid.mosaic().from_h5(file, group=args.in_group, fields=args.fields)\n these_fields=[field for field in args.fields if field in temp.fields]\n # copy weights for tile\n temp.weight=tile_weight.copy()\n # get the image coordinates of the input file\n iy,ix = mosaic.image_coordinates(temp)\n for field in these_fields:\n field_data=np.atleast_3d(getattr(temp, field))\n try:\n for band in range(mosaic.dimensions[2]):\n getattr(mosaic, field)[iy,ix,band] += field_data[:,:,band]*temp.weight\n mosaic.invalid[iy,ix,band] = False\n except IndexError:\n print(f\"problem with field {field} in file {file}\")\n # add weights to total weight matrix\n mosaic.weight[iy,ix] += temp.weight[:,:]\n # find valid weights\n iy,ix = np.nonzero(mosaic.weight == 0)\n mosaic.invalid[iy,ix,:] = True\n # normalize fields by weights\n iy,ix = np.nonzero(mosaic.weight > 0)\n for band in range(mosaic.dimensions[2]):\n for field in mosaic.fields:\n getattr(mosaic, field)[iy,ix,band] /= mosaic.weight[iy,ix]\n else:\n # use a simple summation scheme for calculating mosaic\n # for each file in the list\n for file in file_list:\n # read data grid from HDF5\n temp=pc.grid.mosaic().from_h5(file, group=args.in_group, fields=args.fields)\n these_fields=[field for field in args.fields if field in temp.fields]\n # get the image coordinates of the input file\n iy,ix = mosaic.image_coordinates(temp)\n for field in these_fields:\n field_data=np.atleast_3d(getattr(temp, field))\n try:\n for band in range(mosaic.dimensions[2]):\n getattr(mosaic, field)[iy,ix,band] = field_data[:,:,band]\n mosaic.invalid[iy,ix,band] = False\n except IndexError:\n print(f\"problem with field {field} in file {file}\")\n\n # replace invalid points with fill_value\n for field in mosaic.fields:\n getattr(mosaic, field)[mosaic.invalid] = mosaic.fill_value\n # crop mosaic to bounds\n if np.any(args.crop):\n # x and y range (verify min and max order)\n XR = np.sort([args.crop[0],args.crop[1]])\n YR = np.sort([args.crop[2],args.crop[3]])\n mosaic.crop(XR, YR, fields=mosaic.fields)\n\n # output each field\n for field in mosaic.fields:\n if field_dims[field] == 2:\n pc.grid.data().from_dict({'x':mosaic.x,'y':mosaic.y,\\\n field:np.squeeze(getattr(mosaic,field)[:,:,0])})\\\n .to_h5(os.path.join(args.directory,args.output), \\\n group=args.out_group, replace=args.replace)\n else:\n pc.grid.data().from_dict({'x':mosaic.x,'y':mosaic.y, 't': mosaic.t,\\\n field:getattr(mosaic,field)})\\\n .to_h5(os.path.join(args.directory,args.output), \\\n group=args.out_group, replace=args.replace)\n # only want the 'replace' argument on the first field\n args.replace=False\n if args.show:\n if len(mosaic.z.shape) > 2:\n plt.imshow(mosaic.z[:,:,-1]-mosaic.z[:,:,0], extent=mosaic.extent)\n else:\n mosaic.z.show()\n plt.colorbar()\n plt.show()\n\nif __name__=='__main__':\n main(sys.argv)\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.nonzero", "numpy.sort", "numpy.ones", "matplotlib.pyplot.colorbar", "numpy.any", "matplotlib.pyplot.show", "numpy.zeros" ] ]
AaronGlanville/Barry
[ "f181448b2ed10a8c08195e7e34819ceb8abfe532" ]
[ "tests/test_models.py" ]
[ "from barry.datasets.dummy import DummyCorrelationFunction_SDSS_DR12_Z061_NGC, DummyPowerSpectrum_SDSS_DR12_Z061_NGC\nfrom barry.models.model import Model\nfrom barry.models.bao_power import PowerSpectrumFit\nfrom barry.models.bao_correlation import CorrelationFunctionFit\n\nfrom tests.utils import get_concrete\nimport numpy as np\n\n\nclass TestDataset:\n classes = get_concrete(Model)\n concrete = []\n num_start = 100\n\n @classmethod\n def setup_class(cls):\n cls.pk_data = DummyPowerSpectrum_SDSS_DR12_Z061_NGC()\n cls.xi_data = DummyCorrelationFunction_SDSS_DR12_Z061_NGC()\n for c in cls.classes:\n model = c()\n if isinstance(model, PowerSpectrumFit):\n model.set_data(cls.pk_data.get_data())\n elif isinstance(model, CorrelationFunctionFit):\n model.set_data(cls.xi_data.get_data())\n cls.concrete.append(model)\n\n def test_pk_nonnan_likelihood_with_default_param_values(self):\n for c in self.concrete:\n if isinstance(c, PowerSpectrumFit):\n params = c.get_defaults()\n posterior = c.get_posterior(params)\n assert np.isfinite(posterior), f\"Model {str(c)} at params {params} gave posterior {posterior}\"\n\n def test_pk_random_starting_point_doesnt_fail(self):\n for c in self.concrete:\n if isinstance(c, PowerSpectrumFit):\n np.random.seed(0)\n for i in range(self.num_start):\n params = c.get_raw_start()\n posterior = c.get_posterior(params)\n assert np.isfinite(posterior), f\"Model {str(c)} at params {params} gave posterior {posterior}\"\n\n def test_xi_nonnan_likelihood_with_default_param_values(self):\n for c in self.concrete:\n if isinstance(c, CorrelationFunctionFit):\n params = c.get_defaults()\n posterior = c.get_posterior(params)\n assert np.isfinite(posterior), f\"Model {str(c)} at params {params} gave posterior {posterior}\"\n\n def test_xi_random_starting_point_doesnt_fail(self):\n for c in self.concrete:\n if isinstance(c, CorrelationFunctionFit):\n np.random.seed(0)\n for i in range(self.num_start):\n params = c.get_raw_start()\n posterior = c.get_posterior(params)\n assert np.isfinite(posterior), f\"Model {str(c)} at params {params} gave posterior {posterior}\"\n" ]
[ [ "numpy.random.seed", "numpy.isfinite" ] ]
irecsys/DeepCARSKit
[ "20b861728efa0b416075d2e26c102c509923848e", "20b861728efa0b416075d2e26c102c509923848e" ]
[ "deepcarskit/model/fms/fm.py", "deepcarskit/model/neucf/neucmfw0.py" ]
[ "# -*- coding: utf-8 -*-\n# @Time : 2020/7/8 10:09\n# @Author : Shanlei Mu\n# @Email : [email protected]\n# @File : fms.py\n\n# UPDATE:\n# @Time : 2020/8/13,\n# @Author : Zihan Lin\n# @Email : [email protected]\n\n# UPDATE:\n# @Time : 2021/12\n# @Author : Yong Zheng\n# @Notes : made changes to adapt it for CARS\n\nr\"\"\"\nFM\n################################################\nReferences\n-----\nSteffen Rendle et al. \"Factorization Machines.\" in ICDM 2010.\n\nNotes\n-----\ncontext variables are treated as individual dimensions\n\"\"\"\n\nimport torch.nn as nn\nfrom torch.nn.init import xavier_normal_\n\nfrom deepcarskit.model.context_recommender import ContextRecommender\nfrom recbole.model.layers import BaseFactorizationMachine\nfrom recbole.utils import EvaluatorType\n\n\nclass FM(ContextRecommender):\n \"\"\"Factorization Machine considers the second-order interaction with features to predict the final score.\n\n \"\"\"\n\n def __init__(self, config, dataset):\n\n super(FM, self).__init__(config, dataset)\n\n # define layers and loss\n self.fm = BaseFactorizationMachine(reduce_sum=True)\n self.config = config\n\n if self.config['eval_type'] == EvaluatorType.RANKING:\n self.actfun = nn.Sigmoid()\n self.loss = nn.BCELoss()\n self.LABEL = self.config['LABEL_FIELD']\n else:\n self.actfun = nn.LeakyReLU()\n self.loss = nn.MSELoss()\n self.LABEL = self.config['RATING_FIELD']\n\n # parameters initialization\n self.apply(self._init_weights)\n\n def _init_weights(self, module):\n if isinstance(module, nn.Embedding):\n xavier_normal_(module.weight.data)\n\n def forward(self, interaction):\n fm_all_embeddings = self.concat_embed_input_fields(interaction) # [batch_size, num_field, embed_dim]\n y = self.actfun(self.first_order_linear(interaction) + self.fm(fm_all_embeddings))\n return y.squeeze(-1)\n\n def calculate_loss(self, interaction):\n label = interaction[self.LABEL]\n\n output = self.forward(interaction)\n return self.loss(output, label)\n\n def predict(self, interaction):\n return self.forward(interaction)\n", "# -*- coding: utf-8 -*-\n# @Time : 2022\n# @Author : Yong Zheng\n\n\n\nr\"\"\"\nNeuCMFw0\n################################################\nReferences\n-----\nYong Zheng, Gonzalo Florez Arias. \"A Family of Neural Contextual Matrix Factorization Models for Context-Aware Recommendations\", ACM UMAP, 2022\n\nNotes\n-----\n1). NeuCMFw0 has 4 towers: MLP tower without contexts, MF tower with UI, MF with UC, MF with IC\n\n2). w => we consider context situation as a whole/single dimension and create embedding for it, when we fuse them into the MF towers\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.init import normal_\n\nfrom deepcarskit.model.context_recommender import ContextRecommender\nfrom recbole.model.layers import MLPLayers\nfrom recbole.utils import InputType, EvaluatorType\n\n\nclass NeuCMFw0(ContextRecommender):\n\n input_type = InputType.POINTWISE\n\n def __init__(self, config, dataset):\n super(NeuCMFw0, self).__init__(config, dataset)\n\n # load parameters info\n self.mf_embedding_size = config['mf_embedding_size']\n self.mlp_embedding_size = config['mlp_embedding_size']\n self.mlp_hidden_size = config['mlp_hidden_size']\n self.dropout_prob = config['dropout_prob']\n self.mf_train = config['mf_train']\n self.mlp_train = config['mlp_train']\n self.use_pretrain = config['use_pretrain']\n self.mf_pretrain_path = config['mf_pretrain_path']\n self.mlp_pretrain_path = config['mlp_pretrain_path']\n\n # define layers and loss\n self.user_mf_embedding = nn.Embedding(self.n_users, self.mf_embedding_size)\n self.item_mf_embedding = nn.Embedding(self.n_items, self.mf_embedding_size)\n self.context_situation_mf_embedding = nn.Embedding(self.n_context_situation, self.mf_embedding_size)\n self.user_mlp_embedding = nn.Embedding(self.n_users, self.mlp_embedding_size)\n self.item_mlp_embedding = nn.Embedding(self.n_items, self.mlp_embedding_size)\n self.context_situation_mlp_embedding = nn.Embedding(self.n_context_situation, self.mlp_embedding_size)\n\n # mlp layers = user, item\n self.mlp_layers = MLPLayers([2 * self.mlp_embedding_size] + self.mlp_hidden_size, self.dropout_prob)\n self.mlp_layers.logger = None # remove logger to use torch.save()\n if self.mf_train and self.mlp_train:\n self.predict_layer = nn.Linear(3 * self.mf_embedding_size + self.mlp_hidden_size[-1], 1)\n elif self.mf_train:\n self.predict_layer = nn.Linear(3 * self.mf_embedding_size, 1)\n elif self.mlp_train:\n self.predict_layer = nn.Linear(self.mlp_hidden_size[-1], 1)\n\n # parameters initialization\n if self.use_pretrain:\n self.load_pretrain()\n else:\n self.apply(self._init_weights)\n\n def _init_weights(self, module):\n if isinstance(module, nn.Embedding):\n normal_(module.weight.data, mean=0.0, std=0.01)\n\n def forward(self, user, item, context_situation):\n user_mf_e = self.user_mf_embedding(user)\n item_mf_e = self.item_mf_embedding(item)\n context_situation_mf_e = self.context_situation_mf_embedding(context_situation)\n user_mlp_e = self.user_mlp_embedding(user)\n item_mlp_e = self.item_mlp_embedding(item)\n if self.mf_train:\n mf_ui_output = torch.mul(user_mf_e, item_mf_e) # [batch_size, embedding_size]\n mf_uc_output = torch.mul(user_mf_e, context_situation_mf_e) # [batch_size, embedding_size]\n mf_ic_output = torch.mul(item_mf_e, context_situation_mf_e) # [batch_size, embedding_size]\n if self.mlp_train:\n mlp_output = self.mlp_layers(torch.cat((user_mlp_e, item_mlp_e), -1)) # [batch_size, layers[-1]]\n\n if self.mf_train and self.mlp_train:\n output = self.actfun(self.predict_layer(torch.cat((mf_ui_output, mf_uc_output, mf_ic_output, mlp_output), -1)))\n elif self.mf_train:\n output = self.actfun(self.predict_layer(torch.cat((mf_ui_output, mf_uc_output, mf_ic_output), -1)))\n elif self.mlp_train:\n output = self.actfun(self.predict_layer(mlp_output))\n else:\n raise RuntimeError('mf_train and mlp_train can not be False at the same time')\n return output.squeeze(-1)\n\n def calculate_loss(self, interaction):\n user = interaction[self.USER_ID]\n item = interaction[self.ITEM_ID]\n context_situation = interaction[self.CONTEXT_SITUATION_ID]\n label = interaction[self.LABEL]\n\n output = self.forward(user, item, context_situation)\n return self.loss(output, label)\n\n def predict(self, interaction):\n user = interaction[self.USER_ID]\n item = interaction[self.ITEM_ID]\n context_situation = interaction[self.CONTEXT_SITUATION_ID]\n return self.forward(user, item, context_situation)\n\n def dump_parameters(self):\n r\"\"\"A simple implementation of dumping model parameters for pretrain.\n\n \"\"\"\n if self.mf_train and not self.mlp_train:\n save_path = self.mf_pretrain_path\n torch.save(self, save_path)\n elif self.mlp_train and not self.mf_train:\n save_path = self.mlp_pretrain_path\n torch.save(self, save_path)\n" ]
[ [ "torch.nn.init.xavier_normal_", "torch.nn.BCELoss", "torch.nn.Sigmoid", "torch.nn.LeakyReLU", "torch.nn.MSELoss" ], [ "torch.cat", "torch.nn.Embedding", "torch.nn.Linear", "torch.mul", "torch.nn.init.normal_", "torch.save" ] ]
sherry0219/ignite
[ "e96203f05a5d2da9226169fbab13d56ece675e41" ]
[ "ignite/contrib/metrics/average_precision.py" ]
[ "from ignite.metrics import EpochMetric\n\n\ndef average_precision_compute_fn(y_preds, y_targets):\n try:\n from sklearn.metrics import average_precision_score\n except ImportError:\n raise RuntimeError(\"This contrib module requires sklearn to be installed.\")\n\n y_true = y_targets.numpy()\n y_pred = y_preds.numpy()\n return average_precision_score(y_true, y_pred)\n\n\nclass AveragePrecision(EpochMetric):\n \"\"\"Computes Average Precision accumulating predictions and the ground-truth during an epoch\n and applying `sklearn.metrics.average_precision_score <http://scikit-learn.org/stable/modules/generated/\n sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score>`_ .\n\n Args:\n output_transform (callable, optional): a callable that is used to transform the\n :class:`~ignite.engine.Engine`'s `process_function`'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n\n AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or\n confidence values. To apply an activation to y_pred, use output_transform as shown below:\n\n .. code-block:: python\n\n def activated_output_transform(output):\n y_pred, y = output\n y_pred = torch.softmax(y_pred)\n return y_pred, y\n\n avg_precision = AveragePrecision(activated_output_transform)\n\n \"\"\"\n def __init__(self, activation=None, output_transform=lambda x: x):\n super(AveragePrecision, self).__init__(average_precision_compute_fn, output_transform=output_transform)\n" ]
[ [ "sklearn.metrics.average_precision_score" ] ]
mikigom/dual-hrnet
[ "123ee7bd8287ae16c944ff36d972f634dd652cc6" ]
[ "utils.py" ]
[ "import os\nimport math\nimport random\nimport errno\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\nfrom PIL import Image\n\n\nclass AverageMeter(object):\n def __init__(self):\n self.val = None\n self.sum = None\n self.cnt = None\n self.avg = None\n self.ema = None\n self.initialized = False\n\n def update(self, val, n=1):\n if not self.initialized:\n self.initialize(val, n)\n else:\n self.add(val, n)\n\n def initialize(self, val, n):\n self.val = val\n self.sum = val * n\n self.cnt = n\n self.avg = val\n self.ema = val\n self.initialized = True\n\n def add(self, val, n):\n self.val = val\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt\n self.ema = self.ema * 0.99 + self.val * 0.01\n\n\ndef inter_and_union(pred, mask, num_class):\n pred = np.asarray(pred, dtype=np.uint8).copy()\n mask = np.asarray(mask, dtype=np.uint8).copy()\n\n # 255 -> 0\n pred += 1\n mask += 1\n pred = pred * (mask > 0)\n\n inter = pred * (pred == mask)\n (area_inter, _) = np.histogram(inter, bins=num_class, range=(1, num_class))\n (area_pred, _) = np.histogram(pred, bins=num_class, range=(1, num_class))\n (area_mask, _) = np.histogram(mask, bins=num_class, range=(1, num_class))\n area_union = area_pred + area_mask - area_inter\n\n return area_inter, area_union\n\n\ndef preprocess(image1, image2, mask, flip=False, scale=False, crop=False):\n if isinstance(image1, np.ndarray):\n image1 = Image.fromarray(image1)\n if isinstance(image2, np.ndarray):\n image2 = Image.fromarray(image2)\n if isinstance(mask, np.ndarray):\n mask = Image.fromarray(mask)\n if flip:\n if random.random() < 0.5:\n image1 = image1.transpose(Image.FLIP_LEFT_RIGHT)\n image2 = image2.transpose(Image.FLIP_LEFT_RIGHT)\n mask = mask.transpose(Image.FLIP_LEFT_RIGHT)\n\n if random.random() < 0.5:\n image1 = image1.transpose(Image.FLIP_TOP_BOTTOM)\n image2 = image2.transpose(Image.FLIP_TOP_BOTTOM)\n mask = mask.transpose(Image.FLIP_TOP_BOTTOM)\n\n if random.random() < 0.5:\n image1 = image1.transpose(Image.ROTATE_90)\n image2 = image2.transpose(Image.ROTATE_90)\n mask = mask.transpose(Image.ROTATE_90)\n\n if scale:\n w, h = image1.size\n rand_log_scale = math.log(scale[0], 2) + random.random() * (math.log(scale[1], 2) - math.log(scale[0], 2))\n random_scale = math.pow(2, rand_log_scale)\n new_size = (int(round(w * random_scale)), int(round(h * random_scale)))\n image1 = image1.resize(new_size, Image.ANTIALIAS)\n image2 = image2.resize(new_size, Image.ANTIALIAS)\n mask = mask.resize(new_size, Image.NEAREST)\n\n transform_list = []\n transform_list.append(transforms.ToTensor())\n transform_list.append(transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]))\n data_transforms = transforms.Compose(transform_list)\n\n image1 = data_transforms(image1)\n image2 = data_transforms(image2)\n mask = torch.LongTensor(np.array(mask).astype(np.int64))\n\n if crop:\n h, w = image1.shape[1], image1.shape[2]\n pad_tb = max(0, crop[0] - h)\n pad_lr = max(0, crop[1] - w)\n image1 = torch.nn.ZeroPad2d((0, pad_lr, 0, pad_tb))(image1)\n image2 = torch.nn.ZeroPad2d((0, pad_lr, 0, pad_tb))(image2)\n mask = torch.nn.ConstantPad2d((0, pad_lr, 0, pad_tb), 255)(mask)\n\n h, w = image1.shape[1], image1.shape[2]\n i = random.randint(0, h - crop[0])\n j = random.randint(0, w - crop[1])\n image1 = image1[:, i:i + crop[0], j:j + crop[1]]\n image2 = image2[:, i:i + crop[0], j:j + crop[1]]\n mask = mask[i:i + crop[0], j:j + crop[1]]\n\n return image1, image2, mask\n\n\nclass OhemCrossEntropy(nn.Module):\n def __init__(self, ignore_label=-1, thres=0.7,\n min_kept=100000, weight=None):\n super(OhemCrossEntropy, self).__init__()\n self.thresh = thres\n self.min_kept = max(1, min_kept)\n self.ignore_label = ignore_label\n self.criterion = nn.CrossEntropyLoss(weight=weight,\n ignore_index=ignore_label,\n reduction='none')\n\n def forward(self, score, target, **kwargs):\n ph, pw = score.size(2), score.size(3)\n h, w = target.size(1), target.size(2)\n if ph != h or pw != w:\n score = F.upsample(input=score, size=(h, w), mode='bilinear')\n pred = F.softmax(score, dim=1)\n pixel_losses = self.criterion(score, target).contiguous().view(-1)\n mask = target.contiguous().view(-1) != self.ignore_label\n\n tmp_target = target.clone()\n tmp_target[tmp_target == self.ignore_label] = 0\n pred = pred.gather(1, tmp_target.unsqueeze(1))\n pred, ind = pred.contiguous().view(-1, )[mask].contiguous().sort()\n min_value = pred[min(self.min_kept, pred.numel() - 1)]\n threshold = max(min_value, self.thresh)\n\n pixel_losses = pixel_losses[mask][ind]\n pixel_losses = pixel_losses[pred < threshold]\n return pixel_losses.mean()\n\n\ndef adjust_learning_rate(optimizer, base_lr, max_iters, cur_iters, power=0.9):\n lr = base_lr * ((1 - float(cur_iters) / max_iters) ** power)\n optimizer.param_groups[0]['lr'] = lr\n return lr\n\n\nclass CRF_Refiner(object):\n def __init__(self, shape):\n self.dcrf = __import__('pydensecrf.densecrf')\n\n self.d = self.dcrf.DenseCRF(shape[0], shape[1], 5)\n\n def __call__(self, softmax, image):\n \"\"\"\n :param softmax: [C, H, W]\n :param image: [H, W, 3]\n :return:\n \"\"\"\n # The input should be the negative of the logarithm of probability values\n # Look up the definition of the softmax_to_unary for more information\n unary = self.dcrf.utils.softmax_to_unary(softmax)\n\n # The inputs should be C-continious -- we are using Cython wrapper\n unary = np.ascontiguousarray(unary)\n self.d.setUnaryEnergy(unary)\n\n # This potential penalizes small pieces of segmentation that are\n # spatially isolated -- enforces more spatially consistent segmentations\n feats = self.dcrf.utils.create_pairwise_gaussian(sdims=(10, 10), shape=image.shape[:2])\n\n self.d.addPairwiseEnergy(feats, compat=3,\n kernel=self.dcrf.DIAG_KERNEL,\n normalization=self.dcrf.NORMALIZE_SYMMETRIC)\n\n # This creates the color-dependent features --\n # because the segmentation that we get from CNN are too coarse\n # and we can use local color features to refine them\n feats = self.dcrf.utils.create_pairwise_bilateral(sdims=(50, 50), schan=(20, 20, 20), img=image, chdim=2)\n\n self.d.addPairwiseEnergy(feats, compat=10,\n kernel=self.dcrf.DIAG_KERNEL,\n normalization=self.dcrf.NORMALIZE_SYMMETRIC)\n Q = self.d.inference(5)\n res = np.argmax(Q, axis=0).reshape((image.shape[0], image.shape[1]))\n return res\n\n\ndef safe_mkdir(directory):\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n" ]
[ [ "torch.nn.functional.upsample", "torch.nn.CrossEntropyLoss", "torch.nn.functional.softmax", "numpy.ascontiguousarray", "numpy.asarray", "torch.nn.ConstantPad2d", "numpy.argmax", "torch.nn.ZeroPad2d", "numpy.array", "numpy.histogram" ] ]
arpitran/HackerRank_solutions
[ "a3a77c858edd3955ea38530916db9051b1aa93f9" ]
[ "Python/Eye and Identity/solution.py" ]
[ "#!/bin/python3\n\nimport numpy as np\n\nn,m = map(int,input(\"Enter shape of your array \").split())\n\nprint(str(np.eye(n,m,k=0)).replace('1',' 1').replace('0',' 0'))\n\n" ]
[ [ "numpy.eye" ] ]
AndreuxMath/kymatio
[ "0073d325ee508ac2236a992d9c2e29190d40595c" ]
[ "kymatio/scattering1d/backend/backend_torch.py" ]
[ "# Authors: Edouard Oyallon, Joakim Anden, Mathieu Andreux\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Function\n\nNAME = 'torch'\n\ndef is_complex(input):\n return input.size(-1) == 2\n\nclass ModulusStable(Function):\n \"\"\"Stable complex modulus\n\n This class implements a modulus transform for complex numbers which is\n stable with respect to very small inputs (z close to 0), avoiding\n returning nans in all cases.\n\n Usage\n -----\n modulus = ModulusStable.apply # apply inherited from Function\n x_mod = modulus(x)\n\n Parameters\n ---------\n x : tensor\n The complex tensor (i.e., whose last dimension is two) whose modulus\n we want to compute.\n\n Returns\n -------\n output : tensor\n A tensor of same size as the input tensor, except for the last\n dimension, which is removed. This tensor is differentiable with respect\n to the input in a stable fashion (so gradent of the modulus at zero is\n zero).\n \"\"\"\n\n @staticmethod\n def forward(ctx, x):\n \"\"\"Forward pass of the modulus.\n\n This is a static method which does not require an instantiation of the\n class.\n\n Arguments\n ---------\n ctx : context object\n Collected during the forward pass. These are automatically added\n by PyTorch and should not be touched. They are then used for the\n backward pass.\n x : tensor\n The complex tensor whose modulus is to be computed.\n\n Returns\n -------\n output : tensor\n This contains the modulus computed along the last axis, with that\n axis removed.\n \"\"\"\n ctx.p = 2\n ctx.dim = -1\n ctx.keepdim = False\n\n output = (x[...,0]*x[...,0] + x[...,1]*x[...,1]).sqrt()\n\n ctx.save_for_backward(x, output)\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"Backward pass of the modulus\n\n This is a static method which does not require an instantiation of the\n class.\n\n Arguments\n ---------\n ctx : context object\n Collected during the forward pass. These are automatically added\n by PyTorch and should not be touched. They are then used for the\n backward pass.\n grad_output : tensor\n The gradient with respect to the output tensor computed at the\n forward pass.\n\n Returns\n -------\n grad_input : tensor\n The gradient with respect to the input.\n \"\"\"\n x, output = ctx.saved_tensors\n if ctx.dim is not None and ctx.keepdim is False and x.dim() != 1:\n grad_output = grad_output.unsqueeze(ctx.dim)\n output = output.unsqueeze(ctx.dim)\n\n grad_input = x.mul(grad_output).div(output)\n\n # Special case at 0 where we return a subgradient containing 0\n grad_input.masked_fill_(output == 0, 0)\n\n return grad_input\n\n# shortcut for ModulusStable.apply\nmodulus = ModulusStable.apply\n\ndef modulus_complex(x):\n \"\"\"Compute the complex modulus\n\n Computes the modulus of x and stores the result in a complex tensor of the\n same size, with the real part equal to the modulus and the imaginary part\n equal to zero.\n\n Parameters\n ----------\n x : tensor\n A complex tensor (that is, whose last dimension is equal to 2).\n\n Returns\n -------\n res : tensor\n A tensor with the same dimensions as x, such that res[..., 0] contains\n the complex modulus of x, while res[..., 1] = 0.\n \"\"\"\n if not is_complex(x):\n raise TypeError('The input should be complex.')\n\n norm = modulus(x)\n\n res = torch.zeros_like(x)\n res[...,0] = norm\n\n return res\n\ndef subsample_fourier(x, k):\n \"\"\"Subsampling in the Fourier domain\n\n Subsampling in the temporal domain amounts to periodization in the Fourier\n domain, so the input is periodized according to the subsampling factor.\n\n Parameters\n ----------\n x : tensor\n Input tensor with at least 3 dimensions, where the next to last\n corresponds to the frequency index in the standard PyTorch FFT\n ordering. The length of this dimension should be a power of 2 to\n avoid errors. The last dimension should represent the real and\n imaginary parts of the Fourier transform.\n k : int\n The subsampling factor.\n\n Returns\n -------\n res : tensor\n The input tensor periodized along the next to last axis to yield a\n tensor of size x.shape[-2] // k along that dimension.\n \"\"\"\n if not is_complex(x):\n raise TypeError('The input should be complex.')\n\n N = x.shape[-2]\n res = x.view(x.shape[:-2] + (k, N // k, 2)).mean(dim=-3)\n return res\n\ndef pad_1d(x, pad_left, pad_right, mode='constant', value=0.):\n \"\"\"Pad real 1D tensors\n\n 1D implementation of the padding function for real PyTorch tensors.\n\n Parameters\n ----------\n x : tensor\n Three-dimensional input tensor with the third axis being the one to\n be padded.\n pad_left : int\n Amount to add on the left of the tensor (at the beginning of the\n temporal axis).\n pad_right : int\n amount to add on the right of the tensor (at the end of the temporal\n axis).\n mode : string, optional\n Padding mode. Options include 'constant' and 'reflect'. See the\n PyTorch API for other options. Defaults to 'constant'.\n value : float, optional\n If mode == 'constant', value to input within the padding. Defaults to\n 0.\n\n Returns\n -------\n res : tensor\n The tensor passed along the third dimension.\n \"\"\"\n if (pad_left >= x.shape[-1]) or (pad_right >= x.shape[-1]):\n if mode == 'reflect':\n raise ValueError('Indefinite padding size (larger than tensor).')\n res = F.pad(x.unsqueeze(2),\n (pad_left, pad_right, 0, 0),\n mode=mode, value=value).squeeze(2)\n return res\n\ndef pad(x, pad_left=0, pad_right=0, to_complex=True):\n \"\"\"Pad real 1D tensors and map to complex\n\n Padding which allows to simultaneously pad in a reflection fashion and map\n to complex if necessary.\n\n Parameters\n ----------\n x : tensor\n Three-dimensional input tensor with the third axis being the one to\n be padded.\n pad_left : int\n Amount to add on the left of the tensor (at the beginning of the\n temporal axis).\n pad_right : int\n amount to add on the right of the tensor (at the end of the temporal\n axis).\n to_complex : boolean, optional\n Whether to map the resulting padded tensor to a complex type (seen\n as a real number). Defaults to True.\n\n Returns\n -------\n output : tensor\n A padded signal, possibly transformed into a four-dimensional tensor\n with the last axis of size 2 if to_complex is True (this axis\n corresponds to the real and imaginary parts).\n \"\"\"\n output = pad_1d(x, pad_left, pad_right, mode='reflect')\n if to_complex:\n output = torch.stack((output, torch.zeros_like(output)), dim=-1)\n return output\n\ndef unpad(x, i0, i1):\n \"\"\"Unpad real 1D tensor\n\n Slices the input tensor at indices between i0 and i1 along the last axis.\n\n Parameters\n ----------\n x : tensor\n Input tensor with least one axis.\n i0 : int\n Start of original signal before padding.\n i1 : int\n End of original signal before padding.\n\n Returns\n -------\n x_unpadded : tensor\n The tensor x[..., i0:i1].\n \"\"\"\n return x[..., i0:i1]\n\ndef real(x):\n \"\"\"Real part of complex tensor\n\n Takes the real part of a complex tensor, where the last axis corresponds\n to the real and imaginary parts.\n\n Parameters\n ----------\n x : tensor\n A complex tensor (that is, whose last dimension is equal to 2).\n\n Returns\n -------\n x_real : tensor\n The tensor x[..., 0] which is interpreted as the real part of x.\n \"\"\"\n return x[..., 0]\n\ndef fft1d_c2c(x):\n \"\"\"Compute the 1D FFT of a complex signal\n\n Input\n -----\n x : tensor\n A tensor of size (..., T, 2), where x[..., 0] is the real part and\n x[..., 1] is the imaginary part.\n\n Returns\n -------\n x_f : tensor\n A tensor of the same size as x containing its Fourier transform in the\n standard PyTorch FFT ordering.\n \"\"\"\n return torch.fft(x, signal_ndim=1)\n\ndef ifft1d_c2c(x):\n \"\"\"Compute the normalized 1D inverse FFT of a complex signal\n\n Input\n -----\n x_f : tensor\n A tensor of size (..., T, 2), where x_f[..., 0] is the real part and\n x[..., 1] is the imaginary part. The frequencies are assumed to be in\n the standard PyTorch FFT ordering.\n\n Returns\n -------\n x : tensor\n A tensor of the same size of x_f containing the normalized inverse\n Fourier transform of x_f.\n \"\"\"\n return torch.ifft(x, signal_ndim=1)\n" ]
[ [ "torch.ifft", "torch.zeros_like", "torch.fft" ] ]
sh4dw/shapley
[ "fa6fd9320b2b854f9899824664ae480291e776d0" ]
[ "performance_test.py" ]
[ "\"\"\"\nPerformance testing of the Shapley module based on processing times\n\n\n\"\"\"\n\nfrom modules import shapley\nfrom pprint import pprint\nfrom pandas import pandas\nfrom time import time\n\nDATASET_SIMPLE = pandas.read_csv('./test_datasets/simple.csv', sep=',')\nDATASET_MEDIUM = pandas.read_csv('./test_datasets/medium.csv', sep=',')\nDATASET_BIG = pandas.read_csv('./test_datasets/big.csv', sep=',')\n\nITERATIONS = 100\nTIMING_PRECISION = 3\n\n\ndef run_sequences(dataset, sequence_identifier):\n \"\"\"\n Run a sequence of executions and capture the average processing time\n\n Arguments:\n dataset {dict} -- [The target dataset]\n sequence_identifier {string} -- [Any name]\n\n Returns:\n [float] -- [the average processing time]\n \"\"\"\n\n test_times = []\n for i in range(0, ITERATIONS):\n T_START = time()\n s = shapley.Shapley(\n coalition_values=dataset.set_index(\n \"channels\").to_dict()[\"conversions\"]\n )\n s.run()\n test_times.append((time()-T_START))\n\n average_processing_time = round(\n _avg_time(test_times)*1000, TIMING_PRECISION\n )\n\n print(\n f\"{sequence_identifier}, average processing time [n={ITERATIONS}]: {average_processing_time} ms\"\n )\n\n return average_processing_time\n\n\ndef _avg_time(test_times):\n \"\"\"\n Capture the average time from a set of times\n\n Arguments:\n test_times {list} -- [A list of times recorded]\n\n Returns:\n [float] -- [The average time]\n \"\"\"\n\n _sum = 0\n for t in test_times:\n _sum += t\n return _sum / len(test_times)\n\n\nif __name__ == \"__main__\":\n\n average_time_simple = run_sequences(DATASET_SIMPLE, 'Simple Dataset')\n\n average_time_medium = run_sequences(DATASET_MEDIUM, 'Medium Dataset')\n\n average_time_big = run_sequences(DATASET_BIG, 'Big Dataset')\n" ]
[ [ "pandas.pandas.read_csv" ] ]
nickcafferry/Advanced_Materials_Engineer
[ "0edf63a57761c4b867f0fced543fecfaf0913440" ]
[ "doc/model/utils.py" ]
[ "import os\nimport logging\n\nfrom sklearn.model_selection import KFold\n\ndef set_logger(model_dir, log_name):\n '''Set logger to write info to terminal and save in a file.\n\n Args:\n model_dir: (string) path to store the log file\n\n Returns:\n None\n '''\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n #Don't create redundant handlers everytime set_logger is called\n if not logger.handlers:\n\n #File handler with debug level stored in model_dir/generation.log\n fh = logging.FileHandler(os.path.join(model_dir, log_name))\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(logging.Formatter('%(asctime)s: %(levelname)s: %(message)s'))\n logger.addHandler(fh)\n\n #Stream handler with info level written to terminal\n sh = logging.StreamHandler()\n sh.setLevel(logging.INFO)\n sh.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(sh)\n \n return logger\n\ndef train_test_generator(X, y, n_splits):\n '''\n Create a generator to return next train and test data split when called\n\n Args:\n X: (np.array) of dimension [num_samples x features]\n y: (np.array) of dimension [num_samples x target groups]\n n_splits: (int) Number of cross validation folds\n \n Returns:\n (X_train, y_train): (tuple) of np.arrays containing single fold of train data\n (X_test, y_test): (tuple) of np.arrays containing single fold of test data\n '''\n\n kfold = KFold(n_splits=5,shuffle=True,random_state=4)\n\n for train_index, val_index in kfold.split(X, y):\n X_train, X_val = X[train_index], X[val_index]\n y_train, y_val = y[train_index], y[val_index]\n\n yield (X_train, y_train), (X_val, y_val)\n\n\n " ]
[ [ "sklearn.model_selection.KFold" ] ]
CalebFenton/TensorflowTTS
[ "5b21d0459f2ca9d93713d7496d044a8728fc9d8a" ]
[ "tensorflow_tts/bin/preprocess.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright 2020 Minh Nguyen (@dathudeptrai)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Perform preprocessing, raw feature extraction and train/valid split.\"\"\"\n\nimport argparse\nimport logging\nimport os\n\nfrom pathos.multiprocessing import ProcessingPool as Pool\n\nimport librosa\nimport numpy as np\nimport yaml\nimport pyworld as pw\n\nfrom tqdm import tqdm\nfrom sklearn.model_selection import train_test_split\n\nfrom tensorflow_tts.processor import LJSpeechProcessor\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\n\ndef logmelfilterbank(audio,\n sampling_rate,\n fft_size=1024,\n hop_size=256,\n win_length=None,\n window=\"hann\",\n num_mels=80,\n fmin=None,\n fmax=None,\n eps=1e-10,\n ):\n \"\"\"Compute log-Mel filterbank feature.\n Args:\n audio (ndarray): Audio signal (T,).\n sampling_rate (int): Sampling rate.\n fft_size (int): FFT size.\n hop_size (int): Hop size.\n win_length (int): Window length. If set to None, it will be the same as fft_size.\n window (str): Window function type.\n num_mels (int): Number of mel basis.\n fmin (int): Minimum frequency in mel basis calculation.\n fmax (int): Maximum frequency in mel basis calculation.\n eps (float): Epsilon value to avoid inf in log calculation.\n Returns:\n ndarray: Log Mel filterbank feature (#frames, num_mels).\n \"\"\"\n # get amplitude spectrogram\n x_stft = librosa.stft(audio, n_fft=fft_size, hop_length=hop_size,\n win_length=win_length, window=window, pad_mode=\"reflect\")\n spc = np.abs(x_stft).T # (#frames, #bins)\n\n # get mel basis\n fmin = 0 if fmin is None else fmin\n fmax = sampling_rate / 2 if fmax is None else fmax\n mel_basis = librosa.filters.mel(sampling_rate, fft_size, num_mels, fmin, fmax)\n\n return np.log10(np.maximum(eps, np.dot(spc, mel_basis.T))), x_stft\n\n\ndef main():\n \"\"\"Run preprocessing process.\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Preprocess audio and then extract features (See detail in tensorflow_tts/bin/preprocess.py).\")\n parser.add_argument(\"--rootdir\", default=None, type=str, required=True,\n help=\"root path.\")\n parser.add_argument(\"--outdir\", default=None, type=str, required=True,\n help=\"output dir.\")\n parser.add_argument(\"--config\", type=str, required=True,\n help=\"yaml format configuration file.\")\n parser.add_argument(\"--n_cpus\", type=int, default=4, required=False,\n help=\"yaml format configuration file.\")\n parser.add_argument(\"--test_size\", type=float, default=0.05, required=False,\n help=\"yaml format configuration file.\")\n parser.add_argument(\"--verbose\", type=int, default=1,\n help=\"logging level. higher is more logging. (default=1)\")\n args = parser.parse_args()\n\n # set logger\n if args.verbose > 1:\n logging.basicConfig(\n level=logging.DEBUG, format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\")\n elif args.verbose > 0:\n logging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\")\n else:\n logging.basicConfig(\n level=logging.WARN, format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\")\n logging.warning('Skip DEBUG/INFO messages')\n\n # load config\n with open(args.config) as f:\n config = yaml.load(f, Loader=yaml.Loader)\n config.update(vars(args))\n\n processor = LJSpeechProcessor(\n root_path=args.rootdir,\n cleaner_names=\"english_cleaners\"\n )\n\n # check directly existence\n if not os.path.exists(args.outdir):\n os.makedirs(args.outdir, exist_ok=True)\n os.makedirs(os.path.join(args.outdir, 'valid'), exist_ok=True)\n os.makedirs(os.path.join(args.outdir, 'valid', 'raw-feats'), exist_ok=True)\n os.makedirs(os.path.join(args.outdir, 'valid', 'wavs'), exist_ok=True)\n os.makedirs(os.path.join(args.outdir, 'valid', 'ids'), exist_ok=True)\n os.makedirs(os.path.join(args.outdir, 'valid', 'raw-f0'), exist_ok=True)\n os.makedirs(os.path.join(args.outdir, 'valid', 'raw-energies'), exist_ok=True)\n os.makedirs(os.path.join(args.outdir, 'train'), exist_ok=True)\n os.makedirs(os.path.join(args.outdir, 'train', 'raw-feats'), exist_ok=True)\n os.makedirs(os.path.join(args.outdir, 'train', 'wavs'), exist_ok=True)\n os.makedirs(os.path.join(args.outdir, 'train', 'ids'), exist_ok=True)\n os.makedirs(os.path.join(args.outdir, 'train', 'raw-f0'), exist_ok=True)\n os.makedirs(os.path.join(args.outdir, 'train', 'raw-energies'), exist_ok=True)\n\n # train test split\n idx_train, idx_valid = train_test_split(\n range(len(processor.items)), shuffle=True, test_size=args.test_size, random_state=42)\n\n # train/valid utt_ids\n train_utt_ids = []\n valid_utt_ids = []\n\n for idx in range(len(processor.items)):\n utt_ids = processor.get_one_sample(idx)[\"utt_id\"]\n if idx in idx_train:\n train_utt_ids.append(utt_ids)\n elif idx in idx_valid:\n valid_utt_ids.append(utt_ids)\n\n # save train and valid utt_ids to track later.\n np.save(os.path.join(args.outdir, \"train_utt_ids.npy\"), train_utt_ids)\n np.save(os.path.join(args.outdir, \"valid_utt_ids.npy\"), valid_utt_ids)\n\n # process each data\n def save_to_file(idx):\n sample = processor.get_one_sample(idx)\n\n # get info from sample.\n audio = sample[\"audio\"]\n text_ids = sample[\"text_ids\"]\n utt_id = sample[\"utt_id\"]\n rate = sample[\"rate\"]\n\n # check\n assert len(audio.shape) == 1, \\\n f\"{utt_id} seems to be multi-channel signal.\"\n assert np.abs(audio).max() <= 1.0, \\\n f\"{utt_id} seems to be different from 16 bit PCM.\"\n assert rate == config[\"sampling_rate\"], \\\n f\"{utt_id} seems to have a different sampling rate.\"\n\n # trim silence\n if config[\"trim_silence\"]:\n audio, _ = librosa.effects.trim(audio,\n top_db=config[\"trim_threshold_in_db\"],\n frame_length=config[\"trim_frame_size\"],\n hop_length=config[\"trim_hop_size\"])\n\n if \"sampling_rate_for_feats\" not in config:\n x = audio\n sampling_rate = config[\"sampling_rate\"]\n hop_size = config[\"hop_size\"]\n else:\n x = librosa.resample(audio, rate, config[\"sampling_rate_for_feats\"])\n sampling_rate = config[\"sampling_rate_for_feats\"]\n assert config[\"hop_size\"] * config[\"sampling_rate_for_feats\"] % rate == 0, \\\n \"hop_size must be int value. please check sampling_rate_for_feats is correct.\"\n hop_size = config[\"hop_size\"] * config[\"sampling_rate_for_feats\"] // rate\n\n # extract feature\n mel, x_stft = logmelfilterbank(x,\n sampling_rate=sampling_rate,\n hop_size=hop_size,\n fft_size=config[\"fft_size\"],\n win_length=config[\"win_length\"],\n window=config[\"window\"],\n num_mels=config[\"num_mels\"],\n fmin=config[\"fmin\"],\n fmax=config[\"fmax\"])\n\n # make sure the audio length and feature length\n audio = np.pad(audio, (0, config[\"fft_size\"]), mode='edge')\n audio = audio[:len(mel) * config[\"hop_size\"]]\n\n # extract raw pitch\n f0, _ = pw.dio(x.astype(np.double),\n fs=config[\"sampling_rate\"],\n f0_ceil=config[\"fmax\"],\n frame_period=1000 * config[\"hop_size\"] / config[\"sampling_rate\"])\n\n if len(f0) >= len(mel):\n f0 = f0[:len(mel)]\n else:\n f0 = np.pad(f0, ((0, len(mel) - len(f0))))\n\n # extract energy\n S = librosa.magphase(x_stft)[0]\n energy = np.sqrt(np.sum(S ** 2, axis=0))\n\n assert len(mel) * config[\"hop_size\"] == len(audio)\n assert len(mel) == len(f0) == len(energy)\n\n # apply global gain\n if config[\"global_gain_scale\"] > 0.0:\n audio *= config[\"global_gain_scale\"]\n if np.abs(audio).max() >= 1.0:\n logging.warn(f\"{utt_id} causes clipping. \"\n f\"it is better to re-consider global gain scale.\")\n\n # save\n if config[\"format\"] == \"npy\":\n if idx in idx_train:\n subdir = 'train'\n elif idx in idx_valid:\n subdir = 'valid'\n\n np.save(os.path.join(args.outdir, subdir, \"wavs\", f\"{utt_id}-wave.npy\"),\n audio.astype(np.float32), allow_pickle=False)\n np.save(os.path.join(args.outdir, subdir, \"raw-feats\", f\"{utt_id}-raw-feats.npy\"),\n mel.astype(np.float32), allow_pickle=False)\n np.save(os.path.join(args.outdir, subdir, \"ids\", f\"{utt_id}-ids.npy\"),\n text_ids.astype(np.int32), allow_pickle=False)\n np.save(os.path.join(args.outdir, subdir, \"raw-f0\", f\"{utt_id}-raw-f0.npy\"),\n f0.astype(np.float32), allow_pickle=False)\n np.save(os.path.join(args.outdir, subdir, \"raw-energies\", f\"{utt_id}-raw-energy.npy\"),\n energy.astype(np.float32), allow_pickle=False)\n else:\n raise ValueError(\"support only npy format.\")\n\n # apply multi-processing Pool\n p = Pool(nodes=args.n_cpus)\n work = tqdm(range(len(processor.items)), desc=\"[Preprocessing]\")\n list(p.imap(save_to_file, work))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.dot", "numpy.sum", "numpy.abs", "numpy.pad" ] ]
yaroslavsobolev/sonnenhase
[ "c0b3ed5cbea32944162e748c42c24359a6c58f9a" ]
[ "reflections_3d.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage\nimport json\nimport pytz\nfrom datetime import datetime\n\ndef has_refl(target_filename):\n image = ndimage.imread(target_filename, mode='RGB')\n image[image < 50] = 0\n plt.imshow(image)\n maxes = np.amax(image, axis=(0,1))\n means = np.mean(image, axis=(0,1))\n if np.any(maxes > 220):\n if means[2] > means[0] + means[1]:\n return True\n else:\n return False\n else:\n return False\n\n# target_filename = 'data/3d/by_month/03/direct_vB__Camera0080029.jpg'\n# target_filename = 'data/3d/by_month/03/direct_vB__Camera0050010.jpg'\n# target_filename = 'data/3d/by_month/03/direct_vB__Camera0040040.jpg'\n\n\n# fig = plt.figure(1)\n# months = 2*np.arange(5)\n# data_by_cams = []\n# for camera_id in 1+np.arange(18):\n# data_for_cam = np.zeros(shape=(12, 40), dtype=float)\n# for month in months:\n# for frame in range(40):\n# target_filename = 'data/3d/by_month/{0:02d}/direct_vB__Camera{1:03d}{2:04d}.jpg'.format(month+1, camera_id, frame)\n# if has_refl(target_filename):\n# data_for_cam[month, frame] = 1\n# for month in [1,3,5,7,9]:\n# data_for_cam[month] = (data_for_cam[month-1] + data_for_cam[month+1])/2\n# data_for_cam[11] = (data_for_cam[10] + data_for_cam[0]) / 2\n# plt.imshow(data_for_cam)\n# fig.savefig('figures/by_cam/{0:03d}.png'.format(camera_id))\n# plt.cla()\n# data_by_cams.append(np.copy(data_for_cam))\n# np.save('data_by_cams_filtered', data_by_cams)\n#\n# # print(has_refl(target_filename))\n# plt.show()\n\n\n# # ================ Parsing weather data\n# with open('data/weather/moscow_weather_history.json') as json_file:\n# wdata = json.load(json_file)\n#\n# by_mh = list()\n# for i in range(12):\n# temp_ = []\n# for j in range(24):\n# temp_.append(list())\n# by_mh.append(temp_.copy())\n# for r in wdata:\n# dt = r['dt']\n# clouds = r['clouds']['all']\n# loctime = datetime.fromtimestamp(dt, tz=pytz.timezone('America/New_York'))\n# month = int(loctime.strftime('%m'))\n# hour = int(loctime.strftime('%H'))\n# by_mh[month-1][hour].append(clouds)\n# for i in range(12):\n# for j in range(24):\n# temp_ = np.array(by_mh[i][j])\n# by_mh[i][j] = np.mean(temp_)\n# # by_mh_2 = np.mean(by_mh, axis=2)\n# by_mh = np.array(by_mh)\n# np.save('by_mh', by_mh)\n# print(1)\n\n\n## ====================== main plotting\n\nby_mh = np.load('by_mh.npy')\n\ndata = np.load('data_by_cams_filtered.npy')\ndata[10, 6, 17:22] = 0\ndata_with_clouds = np.copy(data)\nfor i in range(18):\n for frame in range(40):\n hour_here = int(np.floor(3 + frame/2))\n data_with_clouds[i, :, frame] = data_with_clouds[i, :, frame]*(100-by_mh[:, hour_here])/100\n\nfig1 = plt.figure(1) # , figsize=(3,3)\navg_by_road = np.max(data[:11, :, :], axis=0)\nplt.imshow(avg_by_road*100, interpolation='bicubic', cmap='inferno', aspect = 1.5)\nplt.yticks(range(12), ['Янв', 'Фев', 'Мар', 'Апр', 'Май', 'Июн', 'Июл', 'Авг', 'Сен', 'Окт', 'Ноя', 'Дек'])\nplt.xticks(range(0, 40, 2), range(3, 24, 1))\nplt.xlabel('Время суток, часы')\nplt.ylabel('Время года, месяц')\n# plt.colorbar()\nfig1.savefig('figures/noclouds_nocolorbar.png', dpi=600)\n# plt.show()\n\nfig2 = plt.figure(2)\nplt.imshow(by_mh, interpolation='bicubic', cmap='inferno', aspect = 0.9, vmin=0, vmax=100)\nplt.yticks(range(12), ['Янв', 'Фев', 'Мар', 'Апр', 'Май', 'Июн', 'Июл', 'Авг', 'Сен', 'Окт', 'Ноя', 'Дек'])\n# plt.xticks(range(0, 40, 2), range(3, 24, 1))\nplt.xlabel('Время суток, часы')\nplt.ylabel('Время года, месяц')\n# plt.colorbar()\nfig2.savefig('figures/clouds_cover.png', dpi=600)\n\nfig3 = plt.figure(3)\navg_by_road = np.max(data_with_clouds[:11, :, :], axis=0)\nplt.imshow(avg_by_road*100, interpolation='bicubic', cmap='inferno', aspect = 1.5, vmax=100)\nplt.yticks(range(12), ['Янв', 'Фев', 'Мар', 'Апр', 'Май', 'Июн', 'Июл', 'Авг', 'Сен', 'Окт', 'Ноя', 'Дек'])\nplt.xticks(range(0, 40, 2), range(3, 24, 1))\nplt.xlabel('Время суток, часы')\nplt.ylabel('Время года, месяц')\n# plt.colorbar()\nfig3.savefig('figures/with_clouds.png', dpi=600)\n\n\nprint(np.sum(avg_by_road*0.5*30))\nplt.show()\nprint(1)\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.amax", "scipy.ndimage.imread", "numpy.max", "numpy.copy", "matplotlib.pyplot.ylabel", "numpy.mean", "matplotlib.pyplot.xlabel", "numpy.any", "numpy.floor", "numpy.load", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.figure" ] ]
liuzenan1217/High-Frequency-Predictor
[ "267e5abd4ca29c22344d145eb6d32c5442b37708" ]
[ "Model/model_regression.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jan 14 09:58:35 2022\r\n\r\n@author: liuzenan\r\n\"\"\"\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom torch.utils import data\r\nfrom torchinfo import summary\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\n\r\n\r\nclass LSTM_model(nn.Module):\r\n def __init__(self,num_layers, input_size, hidden_size):\r\n super().__init__()\r\n self.input_size = input_size\r\n self.hidden_size = hidden_size\r\n self.lstm = nn.LSTM(num_layers=num_layers,\r\n input_size = self.input_size,\r\n hidden_size = self.hidden_size)\r\n self.fc = nn.Linear(hidden_size, 1)\r\n\r\n \r\n def forward(self, our_data):\r\n \"\"\"\r\n our_data: [batch_size, sequence_len, dims]:[256,100,40]\r\n \"\"\"\r\n lstm_output, _ = self.lstm(our_data, None)\r\n output = self.fc(lstm_output)\r\n return output\r\n\r\n\r\n\r\nclass LSTM_stacked_model(nn.Module):\r\n def __init__(self,num_layers_s, input_size_s, hidden_size_s_1,hidden_size_s_2,hidden_size_s_3):\r\n super().__init__() \r\n self.lstm1 = nn.LSTM(num_layers=num_layers_s,\r\n input_size = input_size_s,\r\n hidden_size = hidden_size_s_1)\r\n self.lstm2 = nn.LSTM(num_layers=num_layers_s,\r\n input_size = hidden_size_s_1,\r\n hidden_size = hidden_size_s_2)\r\n self.lstm3 = nn.LSTM(num_layers=num_layers_s,\r\n input_size = hidden_size_s_2,\r\n hidden_size = hidden_size_s_3)\r\n self.fc = nn.Linear(hidden_size_s_3,1)\r\n \r\n def forward(self, our_data):\r\n \"\"\"\r\n our_data: [batch_size, sequence_len, dims]:[256,100,40]\r\n \"\"\"\r\n lstm_output, _ = self.lstm1(our_data, None)\r\n lstm_output, _ = self.lstm2(lstm_output, None)\r\n lstm_output, _ = self.lstm3(lstm_output, None)\r\n output = self.fc(lstm_output)\r\n return output\r\n\r\n\r\n\r\n \r\nclass MLP_model(nn.Module):\r\n def __init__(self,inputsize,layer1,layer2,layer3):\r\n super().__init__()\r\n self.fc1 = nn.Linear(inputsize,layer1)\r\n self.fc2 = nn.Linear(layer1,layer2)\r\n self.fc3 = nn.Linear(layer2,layer3)\r\n self.fc4 = nn.Linear(layer3,1)\r\n \r\n def forward(self,our_data):\r\n \"\"\"\r\n our_data: [batch_size,1,4000]:[256,1,4000]\r\n \"\"\"\r\n mlp_output = nn.functional.relu(self.fc1(our_data))\r\n mlp_output = nn.functional.relu(self.fc2(mlp_output))\r\n mlp_output = nn.functional.relu(self.fc3(mlp_output))\r\n forecast_y = (self.fc4(mlp_output))\r\n return forecast_y\r\n \r\n\r\n\r\n \r\nclass LSTM_MLP_model(nn.Module):\r\n def __init__(self,num_layers, input_size, hidden_size, fc_size):\r\n super().__init__()\r\n self.input_size = input_size\r\n self.hidden_size = hidden_size\r\n self.lstm = nn.LSTM(num_layers=num_layers,\r\n input_size = self.input_size,\r\n hidden_size = self.hidden_size)\r\n self.fc1 = nn.Linear(hidden_size, fc_size)\r\n self.fc2 = nn.Linear(fc_size, 1)\r\n \r\n def forward(self, our_data):\r\n \"\"\"\r\n our_data: [batch_size, sequence_len, dims]:[256,100,40]\r\n \"\"\"\r\n lstm_output, _ = self.lstm(our_data, None)\r\n output = nn.functional.relu(self.fc1(lstm_output))\r\n output = self.fc2(output)\r\n return output\r\n \r\n \r\n\r\nclass CNN_LSTM_model(nn.Module):\r\n def __init__(self,conv_filter_num,inception_num,leaky_relu_alpha,LSTM_num,device):\r\n super().__init__()\r\n self.conv_filter_num = conv_filter_num\r\n self.inception_num = inception_num\r\n self.leaky_relu_alpha = leaky_relu_alpha\r\n self.LSTM_num = LSTM_num\r\n self.device = device\r\n \r\n # convolution blocks\r\n self.conv1 = nn.Sequential(\r\n nn.Conv2d(in_channels=1, out_channels=conv_filter_num, kernel_size=(1,2), stride=(1,2)),\r\n nn.LeakyReLU(negative_slope=leaky_relu_alpha),\r\n nn.BatchNorm2d(conv_filter_num),\r\n nn.Conv2d(in_channels=conv_filter_num, out_channels=conv_filter_num, kernel_size=(4,1)),\r\n nn.LeakyReLU(negative_slope=leaky_relu_alpha),\r\n nn.BatchNorm2d(conv_filter_num),\r\n nn.Conv2d(in_channels=conv_filter_num, out_channels=conv_filter_num, kernel_size=(4,1)),\r\n nn.LeakyReLU(negative_slope=leaky_relu_alpha),\r\n nn.BatchNorm2d(conv_filter_num),\r\n )\r\n self.conv2 = nn.Sequential(\r\n nn.Conv2d(in_channels=conv_filter_num, out_channels=conv_filter_num, kernel_size=(1,2), stride=(1,2)),\r\n nn.Tanh(),\r\n nn.BatchNorm2d(conv_filter_num),\r\n nn.Conv2d(in_channels=conv_filter_num, out_channels=conv_filter_num, kernel_size=(4,1)),\r\n nn.Tanh(),\r\n nn.BatchNorm2d(conv_filter_num),\r\n nn.Conv2d(in_channels=conv_filter_num, out_channels=conv_filter_num, kernel_size=(4,1)),\r\n nn.Tanh(),\r\n nn.BatchNorm2d(conv_filter_num),\r\n )\r\n self.conv3 = nn.Sequential(\r\n nn.Conv2d(in_channels=conv_filter_num, out_channels=conv_filter_num, kernel_size=(1,10)),\r\n nn.LeakyReLU(negative_slope=leaky_relu_alpha),\r\n nn.BatchNorm2d(conv_filter_num),\r\n nn.Conv2d(in_channels=conv_filter_num, out_channels=conv_filter_num, kernel_size=(4,1)),\r\n nn.LeakyReLU(negative_slope=leaky_relu_alpha),\r\n nn.BatchNorm2d(conv_filter_num),\r\n nn.Conv2d(in_channels=conv_filter_num, out_channels=conv_filter_num, kernel_size=(4,1)),\r\n nn.LeakyReLU(negative_slope=leaky_relu_alpha),\r\n nn.BatchNorm2d(conv_filter_num),\r\n )\r\n \r\n # inception moduels\r\n self.inp1 = nn.Sequential(\r\n nn.Conv2d(in_channels=conv_filter_num, out_channels=inception_num, kernel_size=(1,1), padding='same'),\r\n nn.LeakyReLU(negative_slope=leaky_relu_alpha),\r\n nn.BatchNorm2d(inception_num),\r\n nn.Conv2d(in_channels=inception_num, out_channels=inception_num, kernel_size=(3,1), padding='same'),\r\n nn.LeakyReLU(negative_slope=leaky_relu_alpha),\r\n nn.BatchNorm2d(inception_num),\r\n )\r\n self.inp2 = nn.Sequential(\r\n nn.Conv2d(in_channels=conv_filter_num, out_channels=inception_num, kernel_size=(1,1), padding='same'),\r\n nn.LeakyReLU(negative_slope=leaky_relu_alpha),\r\n nn.BatchNorm2d(inception_num),\r\n nn.Conv2d(in_channels=inception_num, out_channels=inception_num, kernel_size=(5,1), padding='same'),\r\n nn.LeakyReLU(negative_slope=leaky_relu_alpha),\r\n nn.BatchNorm2d(inception_num),\r\n )\r\n self.inp3 = nn.Sequential(\r\n nn.MaxPool2d((3, 1), stride=(1, 1), padding=(1, 0)),\r\n nn.Conv2d(in_channels=conv_filter_num, out_channels=inception_num, kernel_size=(1,1), padding='same'),\r\n nn.LeakyReLU(negative_slope=leaky_relu_alpha),\r\n nn.BatchNorm2d(inception_num),\r\n )\r\n \r\n # lstm layers\r\n self.lstm = nn.LSTM(input_size=3 * inception_num, hidden_size=LSTM_num, num_layers=1, batch_first=True)\r\n self.fc1 = nn.Linear(LSTM_num, 1)\r\n\r\n def forward(self, x):\r\n \"\"\"\r\n our_data: [batch_size,1,100,40]:[256,1,100,40]\r\n \"\"\"\r\n # h0: (number of hidden layers, batch size, hidden size)\r\n h0 = torch.zeros(1, x.size(0), self.LSTM_num).to(self.device)\r\n c0 = torch.zeros(1, x.size(0), self.LSTM_num).to(self.device)\r\n \r\n x = self.conv1(x)\r\n x = self.conv2(x)\r\n x = self.conv3(x)\r\n \r\n x_inp1 = self.inp1(x)\r\n x_inp2 = self.inp2(x)\r\n x_inp3 = self.inp3(x) \r\n \r\n x = torch.cat((x_inp1, x_inp2, x_inp3), dim=1)\r\n x = x.permute(0, 2, 1, 3)\r\n x = torch.reshape(x, (-1, x.shape[1], x.shape[2]))\r\n \r\n x, _ = self.lstm(x, (h0, c0))\r\n x = x[:, -1, :]\r\n x = self.fc1(x)\r\n #forecast_y = torch.softmax(x, dim=1)\r\n forecast_y = x\r\n \r\n return forecast_y" ]
[ [ "torch.cat", "torch.nn.LSTM", "torch.reshape", "torch.nn.Conv2d", "torch.nn.Tanh", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.LeakyReLU", "torch.nn.BatchNorm2d" ] ]
JRFeldman/hymo
[ "d57c3ada8888b1dfa8060a23b361a0dfe43fa30e" ]
[ "hymo/tests/test_swmmreport.py" ]
[ "import os\nimport pytest\nfrom pkg_resources import resource_filename\n\nimport pytest\nimport pandas as pd\nimport pandas.util.testing as pdtest\n\nfrom hymo import SWMMReportFile\nfrom .utils import data_path\n\nclass base_ReportFileMixin(object):\n def teardown(self):\n None\n\n def test_attributes(self):\n assert hasattr(self.rpt, 'path')\n assert isinstance(self.rpt.path, str)\n\n assert hasattr(self.rpt, 'orig_file')\n assert isinstance(self.rpt.orig_file, list)\n\n assert hasattr(self.rpt, 'subcatchment_runoff_results')\n assert isinstance(self.rpt.subcatchment_runoff_results, pd.DataFrame)\n\n assert hasattr(self.rpt, 'node_depth_results')\n assert isinstance(self.rpt.node_depth_results, pd.DataFrame)\n\n assert hasattr(self.rpt, 'node_inflow_results')\n assert isinstance(self.rpt.node_inflow_results, pd.DataFrame)\n\n assert hasattr(self.rpt, 'node_surcharge_results')\n assert isinstance(self.rpt.node_surcharge_results, pd.DataFrame)\n \n assert hasattr(self.rpt, 'node_flooding_results')\n assert isinstance(self.rpt.node_flooding_results, pd.DataFrame)\n\n assert hasattr(self.rpt, 'storage_volume_results')\n assert isinstance(self.rpt.storage_volume_results, pd.DataFrame)\n\n assert hasattr(self.rpt, 'outfall_loading_results')\n assert isinstance(self.rpt.outfall_loading_results, pd.DataFrame)\n\n assert hasattr(self.rpt, 'link_flow_results')\n assert isinstance(self.rpt.link_flow_results, pd.DataFrame)\n\n assert hasattr(self.rpt, 'flow_classification_results')\n assert isinstance(self.rpt.flow_classification_results, pd.DataFrame)\n \n assert hasattr(self.rpt, 'conduit_surcharge_results')\n assert isinstance(self.rpt.conduit_surcharge_results, pd.DataFrame)\n\n\nclass Test_ReportFile(base_ReportFileMixin):\n def setup(self):\n # TODO\n # subcatchment results\n # pollutants\n\n self.known_path = data_path(os.path.join('swmm', 'test_rpt.rpt'))\n self.node_surcharge_file = data_path(os.path.join('swmm', 'test_node_surcharge_data.csv'))\n self.node_depth_file = data_path(os.path.join('swmm', 'test_node_depth_data.csv'))\n self.node_inflow_file = data_path(os.path.join('swmm', 'test_node_inflow_data.csv'))\n self.node_flooding_file = data_path(os.path.join('swmm', 'test_node_flooding_data.csv'))\n self.storage_volume_file = data_path(os.path.join('swmm', 'test_storage_volume_data.csv'))\n self.outfall_loading_file = data_path(os.path.join('swmm', 'test_outfall_loading_data.csv'))\n self.link_flow_file = data_path(os.path.join('swmm', 'test_link_flow_data.csv'))\n self.flow_classification_file = data_path(os.path.join('swmm', 'test_flow_classification_data.csv'))\n self.conduit_surcharge_file = data_path(os.path.join('swmm', 'test_conduit_surcharge_data.csv'))\n\n self.rpt = SWMMReportFile(self.known_path)\n\n self.known_node_surcharge_results = pd.read_csv(\n self.node_surcharge_file, index_col=[0])\n self.known_node_depth_results = pd.read_csv(\n self.node_depth_file, index_col=[0])\n self.known_node_inflow_results = pd.read_csv(\n self.node_inflow_file, index_col=[0])\n self.known_node_flooding_results = pd.read_csv(\n self.node_flooding_file, index_col=[0])\n self.known_storage_volume_results = pd.read_csv(\n self.storage_volume_file, index_col=[0])\n self.known_outfall_loading_results = pd.read_csv(\n self.outfall_loading_file, index_col=[0])\n self.known_link_flow_results = pd.read_csv(\n self.link_flow_file, index_col=[0])\n self.known_flow_classification_results = pd.read_csv(\n self.flow_classification_file, index_col=[0])\n self.known_conduit_surcharge_results = pd.read_csv(\n self.conduit_surcharge_file, index_col=[0])\n\n\n def test_node_depth_results(self):\n pdtest.assert_frame_equal(\n self.rpt.node_depth_results,\n self.known_node_depth_results\n )\n\n def test_node_surcharge_results(self):\n pdtest.assert_frame_equal(\n self.rpt.node_surcharge_results,\n self.known_node_surcharge_results\n )\n\n def test_node_inflow_results(self):\n pdtest.assert_frame_equal(\n self.rpt.node_inflow_results,\n self.known_node_inflow_results\n )\n \n def test_node_flooding_results(self):\n pdtest.assert_frame_equal(\n self.rpt.node_flooding_results,\n self.known_node_flooding_results\n )\n\n def test_storage_volume_results(self):\n pdtest.assert_frame_equal(\n self.rpt.storage_volume_results,\n self.known_storage_volume_results\n )\n def test_outfall_loading_results(self):\n pdtest.assert_frame_equal(\n self.rpt.outfall_loading_results,\n self.known_outfall_loading_results\n )\n def test_link_flow_results(self):\n pdtest.assert_frame_equal(\n self.rpt.link_flow_results,\n self.known_link_flow_results\n )\n def test_flow_classification_results(self):\n pdtest.assert_frame_equal(\n self.rpt.flow_classification_results,\n self.known_flow_classification_results\n )\n def test_conduit_surcharge_results(self):\n pdtest.assert_frame_equal(\n self.rpt.conduit_surcharge_results,\n self.known_conduit_surcharge_results\n )" ]
[ [ "pandas.util.testing.assert_frame_equal", "pandas.read_csv" ] ]
davidvhill/ccd
[ "2eaf9933d76b861fc243327b323b80fbcf301932" ]
[ "test/test_ccd_detect.py" ]
[ "\"\"\"\nTests for running ccd from the top level __init__.py/detect()\n\nSanity checks to make sure test data sets run to completion\n\"\"\"\nimport numpy as np\n\nfrom test.shared import read_data\n# from shared import two_change_data\n#\nimport ccd\n\nparams = ccd.app.get_default_params()\n\n\ndef test_sample_data_sets():\n \"\"\"\n Sanity test to ensure all test data sets run to completion\n \"\"\"\n samples = ['test/resources/sample_1.csv',\n 'test/resources/sample_2.csv',\n 'test/resources/sample_WA_grid08_row9_col2267_persistent_snow.csv',\n 'test/resources/sample_WA_grid08_row12_col2265_fmask_fail.csv',\n 'test/resources/sample_WA_grid08_row999_col1_normal.csv',\n 'test/resources/test_3657_3610_observations.csv']\n\n params = {'QA_BITPACKED': False,\n 'QA_FILL': 255,\n 'QA_CLEAR': 0,\n 'QA_WATER': 1,\n 'QA_SHADOW': 2,\n 'QA_SNOW': 3,\n 'QA_CLOUD': 4}\n\n for sample in samples:\n data = read_data(sample)\n results = ccd.detect(data[0], data[1], data[2], data[3], data[4],\n data[5], data[6], data[7], data[8],\n params=params)\n\n\ndef test_npy():\n \"\"\"\n Sanity tests for npy test data sets\n \"\"\"\n samples = ['test/resources/h03v09_-2010765_1964625_pixel.npy'] # Main loop failure in LF\n\n for sample in samples:\n dat = np.load(sample)\n results = ccd.detect(**dat[1])\n\n\ndef test_insuff_clear():\n \"\"\"\n Flex the insufficient clear procedure code to make sure it runs.\n \"\"\"\n sample = 'test/resources/h04v03_-1947075_2846265_pixel_insuff.npy'\n\n result = ccd.detect(**np.load(sample)[1])\n\n assert result['change_models'][0]['curve_qa'] == params.CURVE_QA['INSUF_CLEAR']\n\n\ndef test_perm_snow():\n \"\"\"\n Flex the permanent snow procedure code.\n \"\"\"\n sample = 'test/resources/h04v03_-1947105_2846265_pixel_snow.npy'\n\n result = ccd.detect(**np.load(sample)[1])\n\n assert result['change_models'][0]['curve_qa'] == params.CURVE_QA['PERSIST_SNOW']\n\n\ndef test_startfit():\n \"\"\"\n Flex the code the generates start fits.\n \"\"\"\n sample = 'test/resources/h04v03_-1945155_2844645_pixel_startfit.npy'\n\n result = ccd.detect(**np.load(sample)[1])\n\n assert result['change_models'][0]['curve_qa'] == params.CURVE_QA['START']\n\n\ndef test_endfit():\n \"\"\"\n Flex the code the generates start fits.\n \"\"\"\n sample = 'test/resources/h04v03_-1945125_2844645_pixel_endfit.npy'\n\n result = ccd.detect(**np.load(sample)[1])\n\n assert result['change_models'][-1]['curve_qa'] == params.CURVE_QA['END']\n\n\ndef test_sort_dates():\n arr = [1, 3, 2, 5, 2]\n ans = np.array([0, 2, 4, 1, 3])\n\n assert np.array_equal(ans, ccd.__sort_dates(arr))\n" ]
[ [ "numpy.load", "numpy.array" ] ]
vsokolov00/speechbrain
[ "ff3bca4c05cbf01aff50139b0d61b5687482ad45" ]
[ "speechbrain/nnet/losses.py" ]
[ "\"\"\"\nLosses for training neural networks.\n\nAuthors\n * Mirco Ravanelli 2020\n * Samuele Cornell 2020\n * Hwidong Na 2020\n * Yan Gao 2020\n * Titouan Parcollet 2020\n\"\"\"\n\nimport math\nimport torch\nimport logging\nimport functools\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom itertools import permutations\nfrom speechbrain.dataio.dataio import length_to_mask\nfrom speechbrain.decoders.ctc import filter_ctc_output\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef transducer_loss(\n log_probs,\n targets,\n input_lens,\n target_lens,\n blank_index,\n reduction=\"mean\",\n use_torchaudio=True,\n):\n \"\"\"Transducer loss, see `speechbrain/nnet/loss/transducer_loss.py`.\n\n Arguments\n ---------\n predictions : torch.Tensor\n Predicted tensor, of shape [batch, maxT, maxU, num_labels].\n targets : torch.Tensor\n Target tensor, without any blanks, of shape [batch, target_len].\n input_lens : torch.Tensor\n Length of each utterance.\n target_lens : torch.Tensor\n Length of each target sequence.\n blank_index : int\n The location of the blank symbol among the label indices.\n reduction : str\n Specifies the reduction to apply to the output: 'mean' | 'batchmean' | 'sum'.\n use_torchaudio: bool\n If True, use Transducer loss implementation from torchaudio, otherwise,\n use Speechbrain Numba implementation.\n \"\"\"\n input_lens = (input_lens * log_probs.shape[1]).round().int()\n target_lens = (target_lens * targets.shape[1]).round().int()\n\n if use_torchaudio:\n try:\n from torchaudio.functional import rnnt_loss\n except ImportError:\n err_msg = \"The dependency torchaudio >= 0.10.0 is needed to use Transducer Loss\\n\"\n err_msg += \"Cannot import torchaudio.functional.rnnt_loss.\\n\"\n err_msg += \"To use it, please install torchaudio >= 0.10.0\\n\"\n err_msg += \"==================\\n\"\n err_msg += \"Otherwise, you can use our numba implementation, set `use_torchaudio=False`.\\n\"\n raise ImportError(err_msg)\n\n return rnnt_loss(\n log_probs,\n targets.int(),\n input_lens,\n target_lens,\n blank=blank_index,\n reduction=reduction,\n )\n else:\n from speechbrain.nnet.loss.transducer_loss import Transducer\n\n return Transducer.apply(\n log_probs, targets, input_lens, target_lens, blank_index, reduction,\n )\n\n\nclass PitWrapper(nn.Module):\n \"\"\"\n Permutation Invariant Wrapper to allow Permutation Invariant Training\n (PIT) with existing losses.\n\n Permutation invariance is calculated over the sources/classes axis which is\n assumed to be the rightmost dimension: predictions and targets tensors are\n assumed to have shape [batch, ..., channels, sources].\n\n Arguments\n ---------\n base_loss : function\n Base loss function, e.g. torch.nn.MSELoss. It is assumed that it takes\n two arguments:\n predictions and targets and no reduction is performed.\n (if a pytorch loss is used, the user must specify reduction=\"none\").\n\n Returns\n ---------\n pit_loss : torch.nn.Module\n Torch module supporting forward method for PIT.\n\n Example\n -------\n >>> pit_mse = PitWrapper(nn.MSELoss(reduction=\"none\"))\n >>> targets = torch.rand((2, 32, 4))\n >>> p = (3, 0, 2, 1)\n >>> predictions = targets[..., p]\n >>> loss, opt_p = pit_mse(predictions, targets)\n >>> loss\n tensor([0., 0.])\n \"\"\"\n\n def __init__(self, base_loss):\n super(PitWrapper, self).__init__()\n self.base_loss = base_loss\n\n def _fast_pit(self, loss_mat):\n \"\"\"\n Arguments\n ----------\n loss_mat : torch.Tensor\n Tensor of shape [sources, source] containing loss values for each\n possible permutation of predictions.\n\n Returns\n -------\n loss : torch.Tensor\n Permutation invariant loss for the current batch, tensor of shape [1]\n\n assigned_perm : tuple\n Indexes for optimal permutation of the input over sources which\n minimizes the loss.\n \"\"\"\n\n loss = None\n assigned_perm = None\n for p in permutations(range(loss_mat.shape[0])):\n c_loss = loss_mat[range(loss_mat.shape[0]), p].mean()\n if loss is None or loss > c_loss:\n loss = c_loss\n assigned_perm = p\n return loss, assigned_perm\n\n def _opt_perm_loss(self, pred, target):\n \"\"\"\n Arguments\n ---------\n pred : torch.Tensor\n Network prediction for the current example, tensor of\n shape [..., sources].\n target : torch.Tensor\n Target for the current example, tensor of shape [..., sources].\n\n Returns\n -------\n loss : torch.Tensor\n Permutation invariant loss for the current example, tensor of shape [1]\n\n assigned_perm : tuple\n Indexes for optimal permutation of the input over sources which\n minimizes the loss.\n\n \"\"\"\n\n n_sources = pred.size(-1)\n\n pred = pred.unsqueeze(-2).repeat(\n *[1 for x in range(len(pred.shape) - 1)], n_sources, 1\n )\n target = target.unsqueeze(-1).repeat(\n 1, *[1 for x in range(len(target.shape) - 1)], n_sources\n )\n\n loss_mat = self.base_loss(pred, target)\n assert (\n len(loss_mat.shape) >= 2\n ), \"Base loss should not perform any reduction operation\"\n mean_over = [x for x in range(len(loss_mat.shape))]\n loss_mat = loss_mat.mean(dim=mean_over[:-2])\n\n return self._fast_pit(loss_mat)\n\n def reorder_tensor(self, tensor, p):\n \"\"\"\n Arguments\n ---------\n tensor : torch.Tensor\n Tensor to reorder given the optimal permutation, of shape\n [batch, ..., sources].\n p : list of tuples\n List of optimal permutations, e.g. for batch=2 and n_sources=3\n [(0, 1, 2), (0, 2, 1].\n\n Returns\n -------\n reordered : torch.Tensor\n Reordered tensor given permutation p.\n \"\"\"\n\n reordered = torch.zeros_like(tensor, device=tensor.device)\n for b in range(tensor.shape[0]):\n reordered[b] = tensor[b][..., p[b]].clone()\n return reordered\n\n def forward(self, preds, targets):\n \"\"\"\n Arguments\n ---------\n preds : torch.Tensor\n Network predictions tensor, of shape\n [batch, channels, ..., sources].\n targets : torch.Tensor\n Target tensor, of shape [batch, channels, ..., sources].\n\n Returns\n -------\n loss : torch.Tensor\n Permutation invariant loss for current examples, tensor of\n shape [batch]\n\n perms : list\n List of indexes for optimal permutation of the inputs over\n sources.\n e.g., [(0, 1, 2), (2, 1, 0)] for three sources and 2 examples\n per batch.\n \"\"\"\n losses = []\n perms = []\n for pred, label in zip(preds, targets):\n loss, p = self._opt_perm_loss(pred, label)\n perms.append(p)\n losses.append(loss)\n loss = torch.stack(losses)\n return loss, perms\n\n\ndef ctc_loss(\n log_probs, targets, input_lens, target_lens, blank_index, reduction=\"mean\"\n):\n \"\"\"CTC loss.\n\n Arguments\n ---------\n predictions : torch.Tensor\n Predicted tensor, of shape [batch, time, chars].\n targets : torch.Tensor\n Target tensor, without any blanks, of shape [batch, target_len]\n input_lens : torch.Tensor\n Length of each utterance.\n target_lens : torch.Tensor\n Length of each target sequence.\n blank_index : int\n The location of the blank symbol among the character indexes.\n reduction : str\n What reduction to apply to the output. 'mean', 'sum', 'batch',\n 'batchmean', 'none'.\n See pytorch for 'mean', 'sum', 'none'. The 'batch' option returns\n one loss per item in the batch, 'batchmean' returns sum / batch size.\n \"\"\"\n input_lens = (input_lens * log_probs.shape[1]).round().int()\n target_lens = (target_lens * targets.shape[1]).round().int()\n log_probs = log_probs.transpose(0, 1)\n\n if reduction == \"batchmean\":\n reduction_loss = \"sum\"\n elif reduction == \"batch\":\n reduction_loss = \"none\"\n else:\n reduction_loss = reduction\n loss = torch.nn.functional.ctc_loss(\n log_probs,\n targets,\n input_lens,\n target_lens,\n blank_index,\n zero_infinity=True,\n reduction=reduction_loss,\n )\n\n if reduction == \"batchmean\":\n return loss / targets.shape[0]\n elif reduction == \"batch\":\n N = loss.size(0)\n return loss.view(N, -1).sum(1) / target_lens.view(N, -1).sum(1)\n else:\n return loss\n\n\ndef l1_loss(\n predictions, targets, length=None, allowed_len_diff=3, reduction=\"mean\"\n):\n \"\"\"Compute the true l1 loss, accounting for length differences.\n\n Arguments\n ---------\n predictions : torch.Tensor\n Predicted tensor, of shape ``[batch, time, *]``.\n targets : torch.Tensor\n Target tensor with the same size as predicted tensor.\n length : torch.Tensor\n Length of each utterance for computing true error with a mask.\n allowed_len_diff : int\n Length difference that will be tolerated before raising an exception.\n reduction : str\n Options are 'mean', 'batch', 'batchmean', 'sum'.\n See pytorch for 'mean', 'sum'. The 'batch' option returns\n one loss per item in the batch, 'batchmean' returns sum / batch size.\n\n Example\n -------\n >>> probs = torch.tensor([[0.9, 0.1, 0.1, 0.9]])\n >>> l1_loss(probs, torch.tensor([[1., 0., 0., 1.]]))\n tensor(0.1000)\n \"\"\"\n predictions, targets = truncate(predictions, targets, allowed_len_diff)\n loss = functools.partial(torch.nn.functional.l1_loss, reduction=\"none\")\n return compute_masked_loss(\n loss, predictions, targets, length, reduction=reduction\n )\n\n\ndef mse_loss(\n predictions, targets, length=None, allowed_len_diff=3, reduction=\"mean\"\n):\n \"\"\"Compute the true mean squared error, accounting for length differences.\n\n Arguments\n ---------\n predictions : torch.Tensor\n Predicted tensor, of shape ``[batch, time, *]``.\n targets : torch.Tensor\n Target tensor with the same size as predicted tensor.\n length : torch.Tensor\n Length of each utterance for computing true error with a mask.\n allowed_len_diff : int\n Length difference that will be tolerated before raising an exception.\n reduction : str\n Options are 'mean', 'batch', 'batchmean', 'sum'.\n See pytorch for 'mean', 'sum'. The 'batch' option returns\n one loss per item in the batch, 'batchmean' returns sum / batch size.\n\n Example\n -------\n >>> probs = torch.tensor([[0.9, 0.1, 0.1, 0.9]])\n >>> mse_loss(probs, torch.tensor([[1., 0., 0., 1.]]))\n tensor(0.0100)\n \"\"\"\n predictions, targets = truncate(predictions, targets, allowed_len_diff)\n loss = functools.partial(torch.nn.functional.mse_loss, reduction=\"none\")\n return compute_masked_loss(\n loss, predictions, targets, length, reduction=reduction\n )\n\n\ndef classification_error(\n probabilities, targets, length=None, allowed_len_diff=3, reduction=\"mean\"\n):\n \"\"\"Computes the classification error at frame or batch level.\n\n Arguments\n ---------\n probabilities : torch.Tensor\n The posterior probabilities of shape\n [batch, prob] or [batch, frames, prob]\n targets : torch.Tensor\n The targets, of shape [batch] or [batch, frames]\n length : torch.Tensor\n Length of each utterance, if frame-level loss is desired.\n allowed_len_diff : int\n Length difference that will be tolerated before raising an exception.\n reduction : str\n Options are 'mean', 'batch', 'batchmean', 'sum'.\n See pytorch for 'mean', 'sum'. The 'batch' option returns\n one loss per item in the batch, 'batchmean' returns sum / batch size.\n\n Example\n -------\n >>> probs = torch.tensor([[[0.9, 0.1], [0.1, 0.9]]])\n >>> classification_error(probs, torch.tensor([1, 1]))\n tensor(0.5000)\n \"\"\"\n if len(probabilities.shape) == 3 and len(targets.shape) == 2:\n probabilities, targets = truncate(\n probabilities, targets, allowed_len_diff\n )\n\n def error(predictions, targets):\n predictions = torch.argmax(probabilities, dim=-1)\n return (predictions != targets).float()\n\n return compute_masked_loss(\n error, probabilities, targets.long(), length, reduction=reduction\n )\n\n\ndef nll_loss(\n log_probabilities,\n targets,\n length=None,\n label_smoothing=0.0,\n allowed_len_diff=3,\n reduction=\"mean\",\n):\n \"\"\"Computes negative log likelihood loss.\n\n Arguments\n ---------\n log_probabilities : torch.Tensor\n The probabilities after log has been applied.\n Format is [batch, log_p] or [batch, frames, log_p].\n targets : torch.Tensor\n The targets, of shape [batch] or [batch, frames].\n length : torch.Tensor\n Length of each utterance, if frame-level loss is desired.\n allowed_len_diff : int\n Length difference that will be tolerated before raising an exception.\n reduction : str\n Options are 'mean', 'batch', 'batchmean', 'sum'.\n See pytorch for 'mean', 'sum'. The 'batch' option returns\n one loss per item in the batch, 'batchmean' returns sum / batch size.\n\n Example\n -------\n >>> probs = torch.tensor([[0.9, 0.1], [0.1, 0.9]])\n >>> nll_loss(torch.log(probs), torch.tensor([1, 1]))\n tensor(1.2040)\n \"\"\"\n if len(log_probabilities.shape) == 3:\n log_probabilities, targets = truncate(\n log_probabilities, targets, allowed_len_diff\n )\n log_probabilities = log_probabilities.transpose(1, -1)\n\n # Pass the loss function but apply reduction=\"none\" first\n loss = functools.partial(torch.nn.functional.nll_loss, reduction=\"none\")\n return compute_masked_loss(\n loss,\n log_probabilities,\n targets.long(),\n length,\n label_smoothing=label_smoothing,\n reduction=reduction,\n )\n\n\ndef bce_loss(\n inputs,\n targets,\n length=None,\n weight=None,\n pos_weight=None,\n reduction=\"mean\",\n allowed_len_diff=3,\n label_smoothing=0.0,\n):\n \"\"\"Computes binary cross-entropy (BCE) loss. It also applies the sigmoid\n function directly (this improves the numerical stability).\n\n Arguments\n ---------\n inputs : torch.Tensor\n The output before applying the final softmax\n Format is [batch[, 1]?] or [batch, frames[, 1]?].\n (Works with or without a singleton dimension at the end).\n targets : torch.Tensor\n The targets, of shape [batch] or [batch, frames].\n length : torch.Tensor\n Length of each utterance, if frame-level loss is desired.\n weight : torch.Tensor\n A manual rescaling weight if provided it’s repeated to match input\n tensor shape.\n pos_weight : torch.Tensor\n A weight of positive examples. Must be a vector with length equal to\n the number of classes.\n allowed_len_diff : int\n Length difference that will be tolerated before raising an exception.\n reduction: str\n Options are 'mean', 'batch', 'batchmean', 'sum'.\n See pytorch for 'mean', 'sum'. The 'batch' option returns\n one loss per item in the batch, 'batchmean' returns sum / batch size.\n\n Example\n -------\n >>> inputs = torch.tensor([10.0, -6.0])\n >>> targets = torch.tensor([1, 0])\n >>> bce_loss(inputs, targets)\n tensor(0.0013)\n \"\"\"\n # Squeeze singleton dimension so inputs + targets match\n if len(inputs.shape) == len(targets.shape) + 1:\n inputs = inputs.squeeze(-1)\n\n # Make sure tensor lengths match\n if len(inputs.shape) >= 2:\n inputs, targets = truncate(inputs, targets, allowed_len_diff)\n elif length is not None:\n raise ValueError(\"length can be passed only for >= 2D inputs.\")\n\n # Pass the loss function but apply reduction=\"none\" first\n loss = functools.partial(\n torch.nn.functional.binary_cross_entropy_with_logits,\n weight=weight,\n pos_weight=pos_weight,\n reduction=\"none\",\n )\n return compute_masked_loss(\n loss,\n inputs,\n targets.float(),\n length,\n label_smoothing=label_smoothing,\n reduction=reduction,\n )\n\n\ndef kldiv_loss(\n log_probabilities,\n targets,\n length=None,\n label_smoothing=0.0,\n allowed_len_diff=3,\n pad_idx=0,\n reduction=\"mean\",\n):\n \"\"\"Computes the KL-divergence error at the batch level.\n This loss applies label smoothing directly to the targets\n\n Arguments\n ---------\n probabilities : torch.Tensor\n The posterior probabilities of shape\n [batch, prob] or [batch, frames, prob].\n targets : torch.Tensor\n The targets, of shape [batch] or [batch, frames].\n length : torch.Tensor\n Length of each utterance, if frame-level loss is desired.\n allowed_len_diff : int\n Length difference that will be tolerated before raising an exception.\n reduction : str\n Options are 'mean', 'batch', 'batchmean', 'sum'.\n See pytorch for 'mean', 'sum'. The 'batch' option returns\n one loss per item in the batch, 'batchmean' returns sum / batch size.\n\n Example\n -------\n >>> probs = torch.tensor([[0.9, 0.1], [0.1, 0.9]])\n >>> kldiv_loss(torch.log(probs), torch.tensor([1, 1]))\n tensor(1.2040)\n \"\"\"\n if label_smoothing > 0:\n if log_probabilities.dim() == 2:\n log_probabilities = log_probabilities.unsqueeze(1)\n\n bz, time, n_class = log_probabilities.shape\n targets = targets.long().detach()\n\n confidence = 1 - label_smoothing\n\n log_probabilities = log_probabilities.view(-1, n_class)\n targets = targets.view(-1)\n with torch.no_grad():\n true_distribution = log_probabilities.clone()\n true_distribution.fill_(label_smoothing / (n_class - 1))\n ignore = targets == pad_idx\n targets = targets.masked_fill(ignore, 0)\n true_distribution.scatter_(1, targets.unsqueeze(1), confidence)\n\n loss = torch.nn.functional.kl_div(\n log_probabilities, true_distribution, reduction=\"none\"\n )\n loss = loss.masked_fill(ignore.unsqueeze(1), 0)\n\n # return loss according to reduction specified\n if reduction == \"mean\":\n return loss.sum().mean()\n elif reduction == \"batchmean\":\n return loss.sum() / bz\n elif reduction == \"batch\":\n return loss.view(bz, -1).sum(1) / length\n elif reduction == \"sum\":\n return loss.sum()\n else:\n return loss\n else:\n return nll_loss(log_probabilities, targets, length, reduction=reduction)\n\n\ndef truncate(predictions, targets, allowed_len_diff=3):\n \"\"\"Ensure that predictions and targets are the same length.\n\n Arguments\n ---------\n predictions : torch.Tensor\n First tensor for checking length.\n targets : torch.Tensor\n Second tensor for checking length.\n allowed_len_diff : int\n Length difference that will be tolerated before raising an exception.\n \"\"\"\n len_diff = predictions.shape[1] - targets.shape[1]\n if len_diff == 0:\n return predictions, targets\n elif abs(len_diff) > allowed_len_diff:\n raise ValueError(\n \"Predictions and targets should be same length, but got %s and \"\n \"%s respectively.\" % (predictions.shape[1], targets.shape[1])\n )\n elif len_diff < 0:\n return predictions, targets[:, : predictions.shape[1]]\n else:\n return predictions[:, : targets.shape[1]], targets\n\n\ndef compute_masked_loss(\n loss_fn,\n predictions,\n targets,\n length=None,\n label_smoothing=0.0,\n reduction=\"mean\",\n):\n \"\"\"Compute the true average loss of a set of waveforms of unequal length.\n\n Arguments\n ---------\n loss_fn : function\n A function for computing the loss taking just predictions and targets.\n Should return all the losses, not a reduction (e.g. reduction=\"none\").\n predictions : torch.Tensor\n First argument to loss function.\n targets : torch.Tensor\n Second argument to loss function.\n length : torch.Tensor\n Length of each utterance to compute mask. If None, global average is\n computed and returned.\n label_smoothing: float\n The proportion of label smoothing. Should only be used for NLL loss.\n Ref: Regularizing Neural Networks by Penalizing Confident Output\n Distributions. https://arxiv.org/abs/1701.06548\n reduction : str\n One of 'mean', 'batch', 'batchmean', 'none' where 'mean' returns a\n single value and 'batch' returns one per item in the batch and\n 'batchmean' is sum / batch_size and 'none' returns all.\n \"\"\"\n mask = torch.ones_like(targets)\n if length is not None:\n length_mask = length_to_mask(\n length * targets.shape[1], max_len=targets.shape[1],\n )\n\n # Handle any dimensionality of input\n while len(length_mask.shape) < len(mask.shape):\n length_mask = length_mask.unsqueeze(-1)\n length_mask = length_mask.type(mask.dtype)\n mask *= length_mask\n\n # Compute, then reduce loss\n loss = loss_fn(predictions, targets) * mask\n N = loss.size(0)\n if reduction == \"mean\":\n loss = loss.sum() / torch.sum(mask)\n elif reduction == \"batchmean\":\n loss = loss.sum() / N\n elif reduction == \"batch\":\n loss = loss.reshape(N, -1).sum(1) / mask.reshape(N, -1).sum(1)\n\n if label_smoothing == 0:\n return loss\n else:\n loss_reg = torch.mean(predictions, dim=1) * mask\n if reduction == \"mean\":\n loss_reg = torch.sum(loss_reg) / torch.sum(mask)\n elif reduction == \"batchmean\":\n loss_reg = torch.sum(loss_reg) / targets.shape[0]\n elif reduction == \"batch\":\n loss_reg = loss_reg.sum(1) / mask.sum(1)\n\n return -label_smoothing * loss_reg + (1 - label_smoothing) * loss\n\n\ndef get_si_snr_with_pitwrapper(source, estimate_source):\n \"\"\"This function wraps si_snr calculation with the speechbrain pit-wrapper.\n\n Arguments:\n ---------\n source: [B, T, C],\n Where B is the batch size, T is the length of the sources, C is\n the number of sources the ordering is made so that this loss is\n compatible with the class PitWrapper.\n\n estimate_source: [B, T, C]\n The estimated source.\n\n Example:\n ---------\n >>> x = torch.arange(600).reshape(3, 100, 2)\n >>> xhat = x[:, :, (1, 0)]\n >>> si_snr = -get_si_snr_with_pitwrapper(x, xhat)\n >>> print(si_snr)\n tensor([135.2284, 135.2284, 135.2284])\n \"\"\"\n\n pit_si_snr = PitWrapper(cal_si_snr)\n loss, perms = pit_si_snr(source, estimate_source)\n\n return loss\n\n\ndef cal_si_snr(source, estimate_source):\n \"\"\"Calculate SI-SNR.\n\n Arguments:\n ---------\n source: [T, B, C],\n Where B is batch size, T is the length of the sources, C is the number of sources\n the ordering is made so that this loss is compatible with the class PitWrapper.\n\n estimate_source: [T, B, C]\n The estimated source.\n\n Example:\n ---------\n >>> import numpy as np\n >>> x = torch.Tensor([[1, 0], [123, 45], [34, 5], [2312, 421]])\n >>> xhat = x[:, (1, 0)]\n >>> x = x.unsqueeze(-1).repeat(1, 1, 2)\n >>> xhat = xhat.unsqueeze(1).repeat(1, 2, 1)\n >>> si_snr = -cal_si_snr(x, xhat)\n >>> print(si_snr)\n tensor([[[ 25.2142, 144.1789],\n [130.9283, 25.2142]]])\n \"\"\"\n EPS = 1e-8\n assert source.size() == estimate_source.size()\n device = estimate_source.device.type\n\n source_lengths = torch.tensor(\n [estimate_source.shape[0]] * estimate_source.shape[1], device=device\n )\n mask = get_mask(source, source_lengths)\n estimate_source *= mask\n\n num_samples = (\n source_lengths.contiguous().reshape(1, -1, 1).float()\n ) # [1, B, 1]\n mean_target = torch.sum(source, dim=0, keepdim=True) / num_samples\n mean_estimate = (\n torch.sum(estimate_source, dim=0, keepdim=True) / num_samples\n )\n zero_mean_target = source - mean_target\n zero_mean_estimate = estimate_source - mean_estimate\n # mask padding position along T\n zero_mean_target *= mask\n zero_mean_estimate *= mask\n\n # Step 2. SI-SNR with PIT\n # reshape to use broadcast\n s_target = zero_mean_target # [T, B, C]\n s_estimate = zero_mean_estimate # [T, B, C]\n # s_target = <s', s>s / ||s||^2\n dot = torch.sum(s_estimate * s_target, dim=0, keepdim=True) # [1, B, C]\n s_target_energy = (\n torch.sum(s_target ** 2, dim=0, keepdim=True) + EPS\n ) # [1, B, C]\n proj = dot * s_target / s_target_energy # [T, B, C]\n # e_noise = s' - s_target\n e_noise = s_estimate - proj # [T, B, C]\n # SI-SNR = 10 * log_10(||s_target||^2 / ||e_noise||^2)\n si_snr_beforelog = torch.sum(proj ** 2, dim=0) / (\n torch.sum(e_noise ** 2, dim=0) + EPS\n )\n si_snr = 10 * torch.log10(si_snr_beforelog + EPS) # [B, C]\n\n return -si_snr.unsqueeze(0)\n\n\ndef get_mask(source, source_lengths):\n \"\"\"\n Arguments\n ---------\n source : [T, B, C]\n source_lengths : [B]\n\n Returns\n -------\n mask : [T, B, 1]\n\n Example:\n ---------\n >>> source = torch.randn(4, 3, 2)\n >>> source_lengths = torch.Tensor([2, 1, 4]).int()\n >>> mask = get_mask(source, source_lengths)\n >>> print(mask)\n tensor([[[1.],\n [1.],\n [1.]],\n <BLANKLINE>\n [[1.],\n [0.],\n [1.]],\n <BLANKLINE>\n [[0.],\n [0.],\n [1.]],\n <BLANKLINE>\n [[0.],\n [0.],\n [1.]]])\n \"\"\"\n T, B, _ = source.size()\n mask = source.new_ones((T, B, 1))\n for i in range(B):\n mask[source_lengths[i] :, i, :] = 0\n return mask\n\n\nclass AngularMargin(nn.Module):\n \"\"\"\n An implementation of Angular Margin (AM) proposed in the following\n paper: '''Margin Matters: Towards More Discriminative Deep Neural Network\n Embeddings for Speaker Recognition''' (https://arxiv.org/abs/1906.07317)\n\n Arguments\n ---------\n margin : float\n The margin for cosine similiarity\n scale : float\n The scale for cosine similiarity\n\n Return\n ---------\n predictions : torch.Tensor\n\n Example\n -------\n >>> pred = AngularMargin()\n >>> outputs = torch.tensor([ [1., -1.], [-1., 1.], [0.9, 0.1], [0.1, 0.9] ])\n >>> targets = torch.tensor([ [1., 0.], [0., 1.], [ 1., 0.], [0., 1.] ])\n >>> predictions = pred(outputs, targets)\n >>> predictions[:,0] > predictions[:,1]\n tensor([ True, False, True, False])\n \"\"\"\n\n def __init__(self, margin=0.0, scale=1.0):\n super(AngularMargin, self).__init__()\n self.margin = margin\n self.scale = scale\n\n def forward(self, outputs, targets):\n \"\"\"Compute AM between two tensors\n\n Arguments\n ---------\n outputs : torch.Tensor\n The outputs of shape [N, C], cosine similarity is required.\n targets : torch.Tensor\n The targets of shape [N, C], where the margin is applied for.\n\n Return\n ---------\n predictions : torch.Tensor\n \"\"\"\n outputs = outputs - self.margin * targets\n return self.scale * outputs\n\n\nclass AdditiveAngularMargin(AngularMargin):\n \"\"\"\n An implementation of Additive Angular Margin (AAM) proposed\n in the following paper: '''Margin Matters: Towards More Discriminative Deep\n Neural Network Embeddings for Speaker Recognition'''\n (https://arxiv.org/abs/1906.07317)\n\n Arguments\n ---------\n margin : float\n The margin for cosine similiarity.\n scale: float\n The scale for cosine similiarity.\n\n Returns\n -------\n predictions : torch.Tensor\n Tensor.\n Example\n -------\n >>> outputs = torch.tensor([ [1., -1.], [-1., 1.], [0.9, 0.1], [0.1, 0.9] ])\n >>> targets = torch.tensor([ [1., 0.], [0., 1.], [ 1., 0.], [0., 1.] ])\n >>> pred = AdditiveAngularMargin()\n >>> predictions = pred(outputs, targets)\n >>> predictions[:,0] > predictions[:,1]\n tensor([ True, False, True, False])\n \"\"\"\n\n def __init__(self, margin=0.0, scale=1.0, easy_margin=False):\n super(AdditiveAngularMargin, self).__init__(margin, scale)\n self.easy_margin = easy_margin\n\n self.cos_m = math.cos(self.margin)\n self.sin_m = math.sin(self.margin)\n self.th = math.cos(math.pi - self.margin)\n self.mm = math.sin(math.pi - self.margin) * self.margin\n\n def forward(self, outputs, targets):\n \"\"\"\n Compute AAM between two tensors\n\n Arguments\n ---------\n outputs : torch.Tensor\n The outputs of shape [N, C], cosine similarity is required.\n targets : torch.Tensor\n The targets of shape [N, C], where the margin is applied for.\n\n Return\n ---------\n predictions : torch.Tensor\n \"\"\"\n cosine = outputs.float()\n cosine = torch.clamp(cosine, -1 + 1e-7, 1 - 1e-7)\n sine = torch.sqrt(1.0 - torch.pow(cosine, 2))\n phi = cosine * self.cos_m - sine * self.sin_m # cos(theta + m)\n if self.easy_margin:\n phi = torch.where(cosine > 0, phi, cosine)\n else:\n phi = torch.where(cosine > self.th, phi, cosine - self.mm)\n outputs = (targets * phi) + ((1.0 - targets) * cosine)\n return self.scale * outputs\n\n\nclass LogSoftmaxWrapper(nn.Module):\n \"\"\"\n Arguments\n ---------\n Returns\n ---------\n loss : torch.Tensor\n Learning loss\n predictions : torch.Tensor\n Log probabilities\n Example\n -------\n >>> outputs = torch.tensor([ [1., -1.], [-1., 1.], [0.9, 0.1], [0.1, 0.9] ])\n >>> outputs = outputs.unsqueeze(1)\n >>> targets = torch.tensor([ [0], [1], [0], [1] ])\n >>> log_prob = LogSoftmaxWrapper(nn.Identity())\n >>> loss = log_prob(outputs, targets)\n >>> 0 <= loss < 1\n tensor(True)\n >>> log_prob = LogSoftmaxWrapper(AngularMargin(margin=0.2, scale=32))\n >>> loss = log_prob(outputs, targets)\n >>> 0 <= loss < 1\n tensor(True)\n >>> outputs = torch.tensor([ [1., -1.], [-1., 1.], [0.9, 0.1], [0.1, 0.9] ])\n >>> log_prob = LogSoftmaxWrapper(AdditiveAngularMargin(margin=0.3, scale=32))\n >>> loss = log_prob(outputs, targets)\n >>> 0 <= loss < 1\n tensor(True)\n \"\"\"\n\n def __init__(self, loss_fn):\n super(LogSoftmaxWrapper, self).__init__()\n self.loss_fn = loss_fn\n self.criterion = torch.nn.KLDivLoss(reduction=\"sum\")\n\n def forward(self, outputs, targets, length=None):\n \"\"\"\n Arguments\n ---------\n outputs : torch.Tensor\n Network output tensor, of shape\n [batch, 1, outdim].\n targets : torch.Tensor\n Target tensor, of shape [batch, 1].\n\n Returns\n -------\n loss: torch.Tensor\n Loss for current examples.\n \"\"\"\n outputs = outputs.squeeze(1)\n targets = targets.squeeze(1)\n targets = F.one_hot(targets.long(), outputs.shape[1]).float()\n try:\n predictions = self.loss_fn(outputs, targets)\n except TypeError:\n predictions = self.loss_fn(outputs)\n\n predictions = F.log_softmax(predictions, dim=1)\n loss = self.criterion(predictions, targets) / targets.sum()\n return loss\n\n\ndef ctc_loss_kd(log_probs, targets, input_lens, blank_index, device):\n \"\"\"Knowledge distillation for CTC loss.\n\n Reference\n ---------\n Distilling Knowledge from Ensembles of Acoustic Models for Joint CTC-Attention End-to-End Speech Recognition.\n https://arxiv.org/abs/2005.09310\n\n Arguments\n ---------\n log_probs : torch.Tensor\n Predicted tensor from student model, of shape [batch, time, chars].\n targets : torch.Tensor\n Predicted tensor from single teacher model, of shape [batch, time, chars].\n input_lens : torch.Tensor\n Length of each utterance.\n blank_index : int\n The location of the blank symbol among the character indexes.\n device : str\n Device for computing.\n \"\"\"\n scores, predictions = torch.max(targets, dim=-1)\n\n pred_list = []\n pred_len_list = []\n for j in range(predictions.shape[0]):\n # Getting current predictions\n current_pred = predictions[j]\n\n actual_size = (input_lens[j] * log_probs.shape[1]).round().int()\n current_pred = current_pred[0:actual_size]\n current_pred = filter_ctc_output(\n list(current_pred.cpu().numpy()), blank_id=blank_index\n )\n current_pred_len = len(current_pred)\n pred_list.append(current_pred)\n pred_len_list.append(current_pred_len)\n\n max_pred_len = max(pred_len_list)\n for j in range(predictions.shape[0]):\n diff = max_pred_len - pred_len_list[j]\n for n in range(diff):\n pred_list[j].append(0)\n\n # generate soft label of teacher model\n fake_lab = torch.from_numpy(np.array(pred_list))\n fake_lab.to(device)\n fake_lab = fake_lab.int()\n fake_lab_lengths = torch.from_numpy(np.array(pred_len_list)).int()\n fake_lab_lengths.to(device)\n\n input_lens = (input_lens * log_probs.shape[1]).round().int()\n log_probs = log_probs.transpose(0, 1)\n return torch.nn.functional.ctc_loss(\n log_probs,\n fake_lab,\n input_lens,\n fake_lab_lengths,\n blank_index,\n zero_infinity=True,\n )\n\n\ndef ce_kd(inp, target):\n \"\"\"Simple version of distillation for cross-entropy loss.\n\n Arguments\n ---------\n inp : torch.Tensor\n The probabilities from student model, of shape [batch_size * length, feature]\n target : torch.Tensor\n The probabilities from teacher model, of shape [batch_size * length, feature]\n \"\"\"\n return (-target * inp).sum(1)\n\n\ndef nll_loss_kd(\n probabilities, targets, rel_lab_lengths,\n):\n \"\"\"Knowledge distillation for negative log-likelihood loss.\n\n Reference\n ---------\n Distilling Knowledge from Ensembles of Acoustic Models for Joint CTC-Attention End-to-End Speech Recognition.\n https://arxiv.org/abs/2005.09310\n\n Arguments\n ---------\n probabilities : torch.Tensor\n The predicted probabilities from the student model.\n Format is [batch, frames, p]\n targets : torch.Tensor\n The target probabilities from the teacher model.\n Format is [batch, frames, p]\n rel_lab_lengths : torch.Tensor\n Length of each utterance, if the frame-level loss is desired.\n\n Example\n -------\n >>> probabilities = torch.tensor([[[0.8, 0.2], [0.2, 0.8]]])\n >>> targets = torch.tensor([[[0.9, 0.1], [0.1, 0.9]]])\n >>> rel_lab_lengths = torch.tensor([1.])\n >>> nll_loss_kd(probabilities, targets, rel_lab_lengths)\n tensor(-0.7400)\n \"\"\"\n # Getting the number of sentences in the minibatch\n N_snt = probabilities.shape[0]\n\n # Getting the maximum length of label sequence\n max_len = probabilities.shape[1]\n\n # Getting the label lengths\n lab_lengths = torch.round(rel_lab_lengths * targets.shape[1]).int()\n\n # Reshape to [batch_size * length, feature]\n prob_curr = probabilities.reshape(N_snt * max_len, probabilities.shape[-1])\n\n # Generating mask\n mask = length_to_mask(\n lab_lengths, max_len=max_len, dtype=torch.float, device=prob_curr.device\n )\n\n # Reshape to [batch_size * length, feature]\n lab_curr = targets.reshape(N_snt * max_len, targets.shape[-1])\n\n loss = ce_kd(prob_curr, lab_curr)\n # Loss averaging\n loss = torch.sum(loss.reshape(N_snt, max_len) * mask) / torch.sum(mask)\n return loss\n" ]
[ [ "torch.nn.functional.kl_div", "torch.mean", "torch.max", "torch.sum", "torch.no_grad", "torch.where", "torch.log10", "torch.pow", "torch.round", "torch.tensor", "torch.ones_like", "torch.zeros_like", "torch.stack", "numpy.array", "torch.nn.functional.ctc_loss", "torch.nn.KLDivLoss", "torch.nn.functional.log_softmax", "torch.clamp", "torch.argmax" ] ]
ParsaHejabi/USC-CSCI561-FoundationsOfArtificialIntelligence
[ "b80564c7065f0a09e21ac40e3ced514ab3031384" ]
[ "HW2/resource/startercode/Board.py" ]
[ "import numpy as np\n\nBOARD_SIZE = 3\n\nONGOING = -1\nDRAW = 0\nX_WIN = 1\nO_WIN = 2\n\n\nclass Board:\n\n def __init__(self, state=None, show_board=False, show_result=False):\n \"\"\" board cell:\n Empty -> 0\n X -> 1\n O -> 2\n \"\"\"\n if state is None:\n self.state = np.zeros((BOARD_SIZE, BOARD_SIZE), dtype=np.int)\n else:\n self.state = state.copy()\n self.game_result = ONGOING\n self.show_board = show_board\n self.show_result = show_result\n\n def set_show_board(self, show_board):\n self.show_board = show_board\n\n def encode_state(self):\n \"\"\" Encode the current state of the board as a string\n \"\"\"\n return ''.join([str(self.state[i][j]) for i in range(BOARD_SIZE) for j in range(BOARD_SIZE)])\n\n def reset(self):\n self.state.fill(0)\n self.game_result = ONGOING\n\n def is_valid_move(self, row, col):\n return row < BOARD_SIZE and row >= 0 and col < BOARD_SIZE and col >=0 and self.state[row][col] == 0\n\n def move(self, row, col, player):\n \"\"\"\n Parameters\n ----------\n row : 0, 1, 2\n col : 0, 1, 2\n player: X -> 1, O -> 2\n\n Returns\n -------\n state: state after the move\n result: game result after the move\n \"\"\"\n if not self.is_valid_move(row, col):\n print (row, col)\n self.print_board()\n raise ValueError(\"Invalid Move\")\n\n self.state[row][col] = player\n self.game_result = self._check_winner()\n\n if self.show_board:\n p = 'X' if player == 1 else 'O'\n print('player {} moved: {}, {}'.format(p, row, col))\n self.print_board()\n\n if self.show_result:\n self.game_result_report()\n\n return self.state, self.game_result\n\n def game_over(self):\n return self.game_result != ONGOING\n\n\n def print_board(self):\n board = self.encode_state()\n board = board.replace('0', ' ')\n board = board.replace('1', 'X')\n board = board.replace('2', 'O')\n print(' ' + board[0] + ' | ' + board[1] + ' | ' + board[2])\n print('--- --- ---')\n print(' ' + board[3] + ' | ' + board[4] + ' | ' + board[5])\n print('--- --- ---')\n print(' ' + board[6] + ' | ' + board[7] + ' | ' + board[8])\n print()\n\n def game_result_report(self):\n if self.game_result is ONGOING:\n return\n print ('=' * 30)\n if self.game_result is DRAW:\n print ('Game Over : Draw'.center(30))\n elif self.game_result is X_WIN:\n print ('Game Over : Winner X'.center(30))\n elif self.game_result is O_WIN:\n print ('Game Over : Winner O'.center(30))\n print ('=' * 30)\n\n def _check_winner(self):\n # check each row and column\n for i in range(0, 3):\n if self.state[i][0] > 0 and self.state[i][0] == self.state[i][1] and self.state[i][1] == self.state[i][2]:\n return X_WIN if self.state[i][0] == 1 else O_WIN\n if self.state[0][i] > 0 and self.state[0][i] == self.state[1][i] and self.state[1][i] == self.state[2][i]:\n return X_WIN if self.state[0][i] == 1 else O_WIN\n\n # check diagonal\n if self.state[1][1] > 0 and self.state[0][0] == self.state[1][1] and self.state[1][1] == self.state[2][2]:\n return X_WIN if self.state[1][1] == 1 else O_WIN\n if self.state[1][1] > 0 and self.state[2][0] == self.state[1][1] and self.state[1][1] == self.state[0][2]:\n return X_WIN if self.state[1][1] == 1 else O_WIN\n\n # draw\n if (self.state == 0).sum() == 0:\n return DRAW\n\n return ONGOING\n\n\nif __name__ == \"__main__\":\n board = Board()\n board.move(0, 0, 1)\n board.move(0, 1, 1)\n board.move(2, 2, 2)\n board.move(2, 1, 2)\n print()\n print(board.state)\n print()\n board.print_board()\n\n" ]
[ [ "numpy.zeros" ] ]
todnewman/pyclasses
[ "94126f92ce49e3b7d5c67656bd28e519fed11579" ]
[ "class_LR.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 15 13:23:53 2020\r\n\r\n@author: rat9289\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport statsmodels.api as sm\r\n\r\n\r\nclass Metrics:\r\n \"\"\"\r\n Methods for computing useful regression metrics\r\n \r\n sse: Sum of squared errors\r\n sst: Total sum of squared errors (actual vs avg(actual))\r\n r_squared: Regression coefficient (R^2)\r\n adj_r_squared: Adjusted R^2\r\n mse: Mean sum of squared errors\r\n AIC: Akaike information criterion\r\n BIC: Bayesian information criterion\r\n \"\"\"\r\n\r\n def sse(self):\r\n \"\"\"Returns sum of squared errors (model vs. actual)\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n squared_errors = (self.resid_) ** 2\r\n self.sq_error_ = np.sum(squared_errors)\r\n return self.sq_error_\r\n\r\n def sst(self):\r\n \"\"\"Returns total sum of squared errors (actual vs avg(actual))\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n avg_y = np.mean(self.target_)\r\n squared_errors = (self.target_ - avg_y) ** 2\r\n self.sst_ = np.sum(squared_errors)\r\n return self.sst_\r\n\r\n def r_squared(self):\r\n \"\"\"Returns calculated value of r^2\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n self.r_sq_ = 1 - self.sse() / self.sst()\r\n return self.r_sq_\r\n\r\n def adj_r_squared(self):\r\n \"\"\"Returns calculated value of adjusted r^2\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n self.adj_r_sq_ = 1 - (self.sse() / self.dfe_) / (self.sst() / self.dft_)\r\n return self.adj_r_sq_\r\n\r\n def mse(self):\r\n \"\"\"Returns calculated value of mse\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n self.mse_ = np.mean((self.predict(self.features_) - self.target_) ** 2)\r\n return self.mse_\r\n\r\n def aic(self):\r\n \"\"\"\r\n Returns AIC (Akaike information criterion)\r\n \"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()\r\n return lm.aic\r\n\r\n def bic(self):\r\n \"\"\"\r\n Returns BIC (Bayesian information criterion)\r\n \"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()\r\n return lm.bic\r\n\r\n def print_metrics(self):\r\n \"\"\"Prints a report of the useful metrics for a given model object\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n items = (\r\n (\"sse:\", self.sse()),\r\n (\"sst:\", self.sst()),\r\n (\"mse:\", self.mse()),\r\n (\"r^2:\", self.r_squared()),\r\n (\"adj_r^2:\", self.adj_r_squared()),\r\n (\"AIC:\", self.aic()),\r\n (\"BIC:\", self.bic()),\r\n )\r\n for item in items:\r\n print(\"{0:8} {1:.4f}\".format(item[0], item[1]))\r\n\r\n def summary_metrics(self):\r\n \"\"\"Returns a dictionary of the useful metrics\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n metrics = {}\r\n items = (\r\n (\"sse\", self.sse()),\r\n (\"sst\", self.sst()),\r\n (\"mse\", self.mse()),\r\n (\"r^2\", self.r_squared()),\r\n (\"adj_r^2:\", self.adj_r_squared()),\r\n (\"AIC:\", self.aic()),\r\n (\"BIC:\", self.bic()),\r\n )\r\n for item in items:\r\n metrics[item[0]] = item[1]\r\n return metrics\r\n\r\n\r\nclass Inference:\r\n \"\"\"\r\n Inferential statistics: \r\n standard error, \r\n p-values\r\n t-test statistics\r\n F-statistics and p-value of F-test\r\n \"\"\"\r\n\r\n def __init__():\r\n pass\r\n\r\n def std_err(self):\r\n \"\"\"\r\n Returns standard error values of the features\r\n \"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()\r\n return lm.bse\r\n\r\n def pvalues(self):\r\n \"\"\"\r\n Returns p-values of the features\r\n \"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()\r\n return lm.pvalues\r\n\r\n def tvalues(self):\r\n \"\"\"\r\n Returns t-test values of the features\r\n \"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()\r\n return lm.tvalues\r\n\r\n def ftest(self):\r\n \"\"\"\r\n Returns the F-statistic of the overall regression and corresponding p-value\r\n \"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()\r\n return (lm.fvalue, lm.f_pvalue)\r\n\r\n\r\nclass Diagnostics_plots:\r\n \"\"\"\r\n Diagnostics plots and methods\r\n \r\n Arguments:\r\n fitted_vs_residual: Plots fitted values vs. residuals\r\n fitted_vs_features: Plots residuals vs all feature variables in a grid\r\n histogram_resid: Plots a histogram of the residuals (can be normalized)\r\n shapiro_test: Performs Shapiro-Wilk normality test on the residuals\r\n qqplot_resid: Creates a quantile-quantile plot for residuals comparing with a normal distribution \r\n \"\"\"\r\n\r\n def __init__():\r\n pass\r\n\r\n def fitted_vs_residual(self):\r\n \"\"\"Plots fitted values vs. residuals\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n fig, ax = plt.subplots(figsize =(15,9))\r\n plt.title(\"Fitted vs. residuals plot\", fontsize=14)\r\n plt.scatter(self.fitted_, self.resid_, edgecolor=\"k\")\r\n plt.hlines(\r\n y=0,\r\n xmin=np.amin(self.fitted_),\r\n xmax=np.amax(self.fitted_),\r\n color=\"k\",\r\n linestyle=\"dashed\",\r\n )\r\n plt.xlabel(\"Fitted values\")\r\n plt.ylabel(\"Residuals\")\r\n plt.show()\r\n\r\n def fitted_vs_features(self):\r\n \"\"\"Plots residuals vs all feature variables in a grid\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n num_plots = self.features_.shape[1]\r\n if num_plots % 3 == 0:\r\n nrows = int(num_plots / 3)\r\n else:\r\n nrows = int(num_plots / 3) + 1\r\n ncols = 3\r\n fig, ax = plt.subplots(nrows, ncols, figsize=(15, nrows * 3.5))\r\n axes = ax.ravel()\r\n for i in range(num_plots, nrows * ncols):\r\n axes[i].set_visible(False)\r\n for i in range(num_plots):\r\n axes[i].scatter(\r\n self.features_.T[i],\r\n self.resid_,\r\n color=\"orange\",\r\n edgecolor=\"k\",\r\n alpha=0.8,\r\n )\r\n axes[i].grid(True)\r\n #axes[i].set_xlabel(\"Feature X[{}]\".format(i))\r\n axes[i].set_xlabel(f\"{self.feature_names[i]}\")\r\n axes[i].set_ylabel(\"Residuals\")\r\n axes[i].hlines(\r\n y=0,\r\n xmin=np.amin(self.features_.T[i]),\r\n xmax=np.amax(self.features_.T[i]),\r\n color=\"k\",\r\n linestyle=\"dashed\",\r\n )\r\n plt.title(\"Fitted vs. Features plot\", fontsize=14)\r\n plt.show()\r\n\r\n def histogram_resid(self, normalized=True):\r\n \"\"\"Plots a histogram of the residuals (can be normalized)\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n if normalized:\r\n norm_r = self.resid_ / np.linalg.norm(self.resid_)\r\n else:\r\n norm_r = self.resid_\r\n num_bins = min(20, int(np.sqrt(self.features_.shape[0])))\r\n fig, ax = plt.subplots(figsize =(15,9))\r\n plt.title(\"Histogram of the normalized residuals\")\r\n plt.hist(norm_r, bins=num_bins, edgecolor=\"k\")\r\n plt.xlabel(\"Normalized residuals\")\r\n plt.ylabel(\"Count\")\r\n plt.show()\r\n\r\n def shapiro_test(self, normalized=True):\r\n \"\"\"Performs Shapiro-Wilk normality test on the residuals\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n from scipy.stats import shapiro\r\n\r\n if normalized:\r\n norm_r = self.resid_ / np.linalg.norm(self.resid_)\r\n else:\r\n norm_r = self.resid_\r\n _, p = shapiro(norm_r)\r\n if p > 0.01:\r\n print(\"Shapiro Test: The residuals seem to have come from a Gaussian process\")\r\n else:\r\n print(\r\n \"Shapiro Test: The residuals does not seem to have come from a Gaussian process.\\nNormality assumptions of the linear regression may have been violated.\"\r\n )\r\n\r\n def qqplot_resid(self, normalized=True):\r\n \"\"\"Creates a quantile-quantile plot for residuals comparing with a normal distribution\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n from scipy.stats import probplot\r\n\r\n if normalized:\r\n norm_r = self.resid_ / np.linalg.norm(self.resid_)\r\n else:\r\n norm_r = self.resid_\r\n fig, ax = plt.subplots(figsize =(15,9))\r\n \r\n probplot(norm_r, dist=\"norm\", plot=plt)\r\n plt.title(\"Q-Q plot of the normalized residuals\", fontsize=14)\r\n plt.xlabel(\"Theoretical quantiles\")\r\n plt.ylabel(\"Residual quantiles\")\r\n plt.show()\r\n\r\n\r\nclass Data_plots:\r\n \"\"\"\r\n Methods for data related plots\r\n \r\n pairplot: Creates pairplot of all variables and the target\r\n plot_fitted: Plots fitted values against the true output values from the data\r\n \"\"\"\r\n\r\n def __init__():\r\n pass\r\n\r\n def pairplot(self):\r\n \"\"\"Creates pairplot of all variables and the target using the Seaborn library\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n\r\n print(\"This may take a little time. Have patience...\")\r\n from seaborn import pairplot\r\n from pandas import DataFrame\r\n df = DataFrame(np.hstack((self.features_, (self.target_).values.reshape(-1, 1))))\r\n pairplot(df)\r\n plt.show()\r\n\r\n\r\n\r\nclass Outliers:\r\n \"\"\"\r\n Methods for plotting outliers, leverage, influence points\r\n \r\n cook_distance: Computes and plots Cook's distance\r\n influence_plot: Creates the influence plot\r\n leverage_resid_plot: Plots leverage vs normalized residuals' square\r\n \"\"\"\r\n\r\n def __init__():\r\n pass\r\n\r\n def cook_distance(self):\r\n \"\"\"Computes and plots Cook's distance. Cook's distance can be used \r\n in several ways: to indicate influential data points that are \r\n particularly worth checking for validity; or to indicate regions of \r\n the design space where it would be good to be able to obtain more \r\n data points\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n import statsmodels.api as sm\r\n from statsmodels.stats.outliers_influence import OLSInfluence as influence\r\n\r\n lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()\r\n inf = influence(lm)\r\n (c, p) = inf.cooks_distance\r\n plt.figure(figsize=(15, 8))\r\n plt.title(\"Cook's distance plot for the residuals\", fontsize=14)\r\n plt.stem(np.arange(len(c)), c, markerfmt=\",\", use_line_collection=True)\r\n plt.xlabel('Record ID Number', fontsize=12)\r\n plt.ylabel(\"Cook's Number\", fontsize=12)\r\n plt.grid(True)\r\n plt.show()\r\n\r\n def influence_plot(self, **kwargs):\r\n \"\"\"Creates the influence plot. Influence plots show the (externally) \r\n studentized residuals vs. the leverage of each observation as measured\r\n by the hat matrix\r\n \r\n Residuals help to locate sample outliers whose responses do not follow \r\n the general trend of the rest of the data.\r\n \r\n Leverage, on the other hand, helps to locate sample outliers in terms \r\n of our independent variables. High leverage observations, those with \r\n leverage > 2k/n , are observations where the independent variables are far \r\n from other observations.\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n import statsmodels.api as sm\r\n\r\n lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()\r\n fig, ax = plt.subplots(figsize=(15, 9))\r\n fig = sm.graphics.influence_plot(lm, ax=ax, criterion=\"cooks\", **kwargs)\r\n plt.title('Influence Plot', fontsize=14)\r\n plt.legend(loc=\"upper left\")\r\n plt.show()\r\n\r\n def leverage_resid_plot(self):\r\n \"\"\"Plots leverage vs normalized residuals' square\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n import statsmodels.api as sm\r\n \r\n lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()\r\n fig, ax = plt.subplots(figsize=(15, 9))\r\n fig = sm.graphics.plot_leverage_resid2(lm, ax=ax, label=\"Record ID\")\r\n plt.title('Leverage Residual Plot', fontsize=14)\r\n plt.legend(loc=\"upper right\")\r\n plt.show()\r\n\r\n\r\nclass Multicollinearity:\r\n \"\"\"\r\n Methods for checking multicollinearity in the dataset features\r\n \r\n vif:Computes variance influence factors for each feature variable\r\n \"\"\"\r\n\r\n def __init__():\r\n pass\r\n\r\n def vif(self):\r\n \"\"\"Computes variance influence factors for each feature variable\"\"\"\r\n if not self.is_fitted:\r\n print(\"Model not fitted yet!\")\r\n return None\r\n import statsmodels.api as sm\r\n from statsmodels.stats.outliers_influence import (\r\n variance_inflation_factor as vif,\r\n )\r\n\r\n lm = sm.OLS(self.target_, sm.add_constant(self.features_)).fit()\r\n for i in range(self.features_.shape[1]):\r\n v = vif(np.matrix(self.features_), i)\r\n print(\"Variance inflation factor for feature {}: {}\".format(i, round(v, 2)))\r\n\r\n\r\nclass MyLinearRegression(\r\n Metrics, Diagnostics_plots, Data_plots, Outliers, Multicollinearity, Inference\r\n):\r\n def __init__(self, fit_intercept=True, **kwargs):\r\n self.coef_ = None\r\n self.intercept_ = None\r\n self.fit_intercept_ = fit_intercept\r\n self.is_fitted = False\r\n self.features_ = None # This is the predictive X parameters \r\n self.target_ = None # This is the y parameter (target, label, etc.)\r\n self.feature_names = kwargs.get('feature_names')\r\n self.record_id = kwargs.get('record_id')\r\n self.model = None\r\n\r\n def __repr__(self):\r\n return \"Class containing lots of Linear Regression tools\"\r\n\r\n def ingest_data(self, X, y):\r\n \"\"\"\r\n Ingests the given data\r\n \r\n Arguments:\r\n X: 1D or 2D numpy array \r\n y: 1D numpy array\r\n \"\"\"\r\n # check if X is 1D or 2D array\r\n if len(X.shape) == 1:\r\n X = X.reshape(-1, 1)\r\n\r\n # features and data\r\n self.features_ = X\r\n self.target_ = y\r\n \r\n\r\n def fit(self, X,y, fit_intercept_=True):\r\n \"\"\"\r\n Fit model coefficients.\r\n Arguments:\r\n X: 1D or 2D numpy array \r\n y: 1D numpy array\r\n \"\"\"\r\n '''if X.all() != None:\r\n if len(X.shape) == 1:\r\n X = X.reshape(-1, 1)\r\n self.features_ = X\r\n if y != None:\r\n self.target_ = y'''\r\n \r\n from sklearn.linear_model import LinearRegression\r\n from sklearn.neural_network import MLPRegressor\r\n \r\n self.model = MLPRegressor()\r\n \r\n # check if X is 1D or 2D array\r\n if len(X.shape) == 1:\r\n X = X.reshape(-1,1)\r\n \r\n # features and data\r\n self.features_ = X\r\n self.target_ = y\r\n\r\n # degrees of freedom of population dependent variable variance\r\n self.dft_ = self.features_.shape[0] - 1\r\n # degrees of freedom of population error variance\r\n self.dfe_ = self.features_.shape[0] - self.features_.shape[1] - 1\r\n\r\n # add bias if fit_intercept is True\r\n if self.fit_intercept_:\r\n X_biased = np.c_[np.ones(self.features_.shape[0]), self.features_]\r\n else:\r\n X_biased = self.features_\r\n # Assign target_ to a local variable y\r\n y = self.target_\r\n\r\n # closed form solution\r\n xTx = np.dot(X_biased.T, X_biased)\r\n inverse_xTx = np.linalg.inv(xTx)\r\n xTy = np.dot(X_biased.T, y)\r\n coef = np.dot(inverse_xTx, xTy)\r\n\r\n # set attributes\r\n if self.fit_intercept_:\r\n self.intercept_ = coef[0]\r\n self.coef_ = coef[1:]\r\n else:\r\n self.intercept_ = 0\r\n self.coef_ = coef\r\n\r\n # Predicted/fitted y\r\n #self.fitted_ = np.dot(self.features_, self.coef_) + self.intercept_\r\n self.model.fit(self.features_, self.target_)\r\n self.fitted_ = self.model.predict(self.features_)\r\n # Residuals\r\n residuals = self.target_ - self.fitted_\r\n self.resid_ = residuals\r\n\r\n # Set is_fitted to True\r\n self.is_fitted = True\r\n\r\n def fit_dataframe(self, X, y, dataframe, fit_intercept_=True):\r\n \"\"\"\r\n Fit model coefficients from a Pandas DataFrame.\r\n \r\n Arguments:\r\n X: A list of columns of the dataframe acting as features. Must be only numerical.\r\n y: Name of the column of the dataframe acting as the target\r\n fit_intercept: Boolean, whether an intercept term will be included in the fit\r\n \"\"\"\r\n\r\n assert (\r\n type(X) == list\r\n ), \"X must be a list of the names of the numerical feature/predictor columns\"\r\n assert (\r\n type(y) == str\r\n ), \"y must be a string - name of the column you want as target\"\r\n\r\n self.features_ = np.array(dataframe[X])\r\n self.target_ = np.array(dataframe[y])\r\n\r\n # degrees of freedom of population dependent variable variance\r\n self.dft_ = self.features_.shape[0] - 1\r\n # degrees of freedom of population error variance\r\n self.dfe_ = self.features_.shape[0] - self.features_.shape[1] - 1\r\n\r\n # add bias if fit_intercept is True\r\n if self.fit_intercept_:\r\n X_biased = np.c_[np.ones(self.features_.shape[0]), self.features_]\r\n else:\r\n X_biased = self.features_\r\n # Assign target_ to a local variable y\r\n y = self.target_\r\n\r\n # closed form solution\r\n xTx = np.dot(X_biased.T, X_biased)\r\n inverse_xTx = np.linalg.inv(xTx)\r\n xTy = np.dot(X_biased.T, y)\r\n coef = np.dot(inverse_xTx, xTy)\r\n\r\n # set attributes\r\n if self.fit_intercept_:\r\n self.intercept_ = coef[0]\r\n self.coef_ = coef[1:]\r\n else:\r\n self.intercept_ = 0\r\n self.coef_ = coef\r\n\r\n # Predicted/fitted y\r\n self.fitted_ = np.dot(self.features_, self.coef_) + self.intercept_\r\n\r\n # Residuals\r\n residuals = self.target_ - self.fitted_\r\n self.resid_ = residuals\r\n\r\n # Set is_fitted to True\r\n self.is_fitted = True\r\n\r\n def predict(self, X):\r\n \"\"\"Output model prediction.\r\n Arguments:\r\n X: 1D or 2D numpy array\r\n \"\"\"\r\n # check if X is 1D or 2D array\r\n if len(X.shape) == 1:\r\n X = X.reshape(-1, 1)\r\n self.predicted_ = self.model.predict(X)\r\n #self.predicted_ = self.intercept_ + np.dot(X, self.coef_)\r\n return self.predicted_\r\n\r\n def run_diagnostics(self):\r\n \"\"\"Runs diagnostics tests and plots\"\"\"\r\n Diagnostics_plots.fitted_vs_residual(self)\r\n print()\r\n print('Fitted Data to Features Plots')\r\n Diagnostics_plots.fitted_vs_features(self)\r\n Diagnostics_plots.histogram_resid(self)\r\n Diagnostics_plots.qqplot_resid(self)\r\n print()\r\n Diagnostics_plots.shapiro_test(self)\r\n\r\n def outlier_plots(self):\r\n \"\"\"Creates various outlier plots\"\"\"\r\n \r\n Outliers.cook_distance(self)\r\n Outliers.influence_plot(self, label=\"Record ID\")\r\n Outliers.leverage_resid_plot(self)\r\n \r\n def plot_fitted(self,X,y,reference_line=False):\r\n \"\"\"\r\n Plots fitted values against the true output values from the data\r\n \r\n Arguments:\r\n reference_line: A Boolean switch to draw a 45-degree reference line on the plot\r\n \"\"\"\r\n plt.figure(figsize=(15, 8))\r\n plt.title(\"True vs. fitted values\",fontsize=14)\r\n plt.scatter(y,self.fitted_,s=100,alpha=0.75,color='red',edgecolor='k')\r\n if reference_line:\r\n plt.plot(y,y,c='k',linestyle='dotted')\r\n plt.xlabel(\"True values\")\r\n plt.ylabel(\"Fitted values\")\r\n plt.grid(True)\r\n plt.show()\r\n \r\n " ]
[ [ "numpy.matrix", "matplotlib.pyplot.legend", "numpy.dot", "numpy.amax", "numpy.sqrt", "matplotlib.pyplot.plot", "numpy.mean", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.linalg.inv", "numpy.amin", "scipy.stats.probplot", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.hist", "numpy.sum", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter", "matplotlib.pyplot.subplots", "numpy.linalg.norm", "numpy.ones", "scipy.stats.shapiro", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "sklearn.neural_network.MLPRegressor" ] ]
JiaheZhang/HazDesNet
[ "1f3fed86f21c13e4ba264e728b70b86cf87df710" ]
[ "eval.py" ]
[ "import cv2\nimport numpy as np\nimport scipy.io as scio\nimport pandas as pd\n\nimport model \n\nif __name__ == \"__main__\":\n HazDesNet = model.load_HazDesNet()\n HazDesNet.summary()\n\n y_pred = np.zeros((100, 1))\n\n data = scio.loadmat('./dataset/LIVE_Defogging/gt.mat')\n y_true = data['subjective_study_mean'].T\n\n for k in range(100):\n data_file = './dataset/LIVE_Defogging/%d.bmp' % (k + 1)\n img_test = cv2.imread(data_file)\n\n img_test = np.expand_dims(img_test, axis=0)\n \n y_temp = HazDesNet.predict(img_test)\n\n y_pred[k, 0] = np.mean(y_temp)\n\n df = pd.DataFrame({'true': y_true[:, 0], 'pred': y_pred[:, 0]})\n\n print(\"SROCC: %.4f\" % df.corr('spearman').ix[[0]].values[0][1])\n print(\"PLCC: %.4f\" % df.corr('pearson').ix[[0]].values[0][1])\n\n\n " ]
[ [ "numpy.expand_dims", "scipy.io.loadmat", "pandas.DataFrame", "numpy.mean", "numpy.zeros" ] ]
kavenbc/MLExamples
[ "e6e0eb3bf6d8e416e6d13f90b4b9f670d4676e0d" ]
[ "cleandata.py" ]
[ "import os\nfrom os.path import isfile, join\nimport pandas as pd\n\ncol_names = ['id','member_id','loan_amnt','funded_amnt','funded_amnt_inv','term','int_rate','installment',\n 'grade','sub_grade','emp_title','emp_length','home_ownership','annual_inc','verification_status',\n 'issue_d','loan_status','pymnt_plan','url','desc','purpose','title','zip_code',\n 'addr_state','dti','delinq_2yrs','earliest_cr_line','fico_range_low','fico_range_high',\n 'inq_last_6mths','mths_since_last_delinq','mths_since_last_record','open_acc','pub_rec','revol_bal',\n 'revol_util','total_acc','initial_list_status','out_prncp','out_prncp_inv','total_pymnt',\n 'total_pymnt_inv','total_rec_prncp','total_rec_int','total_rec_late_fee','recoveries',\n 'collection_recovery_fee','last_pymnt_d','last_pymnt_amnt','next_pymnt_d','last_credit_pull_d',\n 'last_fico_range_high','last_fico_range_low','collections_12_mths_ex_med','mths_since_last_major_derog',\n 'policy_code','application_type','annual_inc_joint','dti_joint','verification_status_joint',\n 'acc_now_delinq','tot_coll_amt','tot_cur_bal','open_acc_6m','open_act_il','open_il_12m',\n 'open_il_24m','mths_since_rcnt_il','total_bal_il','il_util','open_rv_12m','open_rv_24m','max_bal_bc',\n 'all_util','total_rev_hi_lim','inq_fi','total_cu_tl','inq_last_12m','acc_open_past_24mths',\n 'avg_cur_bal','bc_open_to_buy','bc_util','chargeoff_within_12_mths','delinq_amnt',\n 'mo_sin_old_il_acct','mo_sin_old_rev_tl_op','mo_sin_rcnt_rev_tl_op','mo_sin_rcnt_tl','mort_acc',\n 'mths_since_recent_bc','mths_since_recent_bc_dlq','mths_since_recent_inq','mths_since_recent_revol_delinq',\n 'num_accts_ever_120_pd','num_actv_bc_tl','num_actv_rev_tl','num_bc_sats','num_bc_tl','num_il_tl',\n 'num_op_rev_tl','num_rev_accts','num_rev_tl_bal_gt_0','num_sats','num_tl_120dpd_2m','num_tl_30dpd',\n 'num_tl_90g_dpd_24m','num_tl_op_past_12m','pct_tl_nvr_dlq','percent_bc_gt_75','pub_rec_bankruptcies',\n 'tax_liens','tot_hi_cred_lim','total_bal_ex_mort','total_bc_limit','total_il_high_credit_limit',\n 'revol_bal_joint','sec_app_fico_range_low','sec_app_fico_range_high','sec_app_earliest_cr_line',\n 'sec_app_inq_last_6mths','sec_app_mort_acc','sec_app_open_acc','sec_app_revol_util','sec_app_open_act_il',\n 'sec_app_num_rev_accts','sec_app_chargeoff_within_12_mths','sec_app_collections_12_mths_ex_med',\n 'sec_app_mths_since_last_major_derog','hardship_flag','hardship_type','hardship_reason','hardship_status',\n 'deferral_term','hardship_amount','hardship_start_date','hardship_end_date','payment_plan_start_date',\n 'hardship_length','hardship_dpd','hardship_loan_status','orig_projected_additional_accrued_interest',\n 'hardship_payoff_balance_amount','hardship_last_payment_amount','disbursement_method','debt_settlement_flag',\n 'debt_settlement_flag_date','settlement_status','settlement_date','settlement_amount','settlement_percentage','settlement_term']\n\ndrop_col_name = ['member_id','grade','emp_title','pymnt_plan','desc','title','url','next_pymnt_d','policy_code',\n 'last_pymnt_d','last_pymnt_amnt','next_pymnt_d', 'last_credit_pull_d', 'funded_amnt', 'funded_amnt_inv',\n 'initial_list_status','total_pymnt','total_pymnt_inv','total_rec_late_fee','annual_inc_joint',\n 'dti_joint','revol_bal_joint','hardship_flag','hardship_type','hardship_reason','hardship_status',\n 'deferral_term','hardship_amount','hardship_start_date','hardship_end_date','payment_plan_start_date',\n 'hardship_length','hardship_dpd','hardship_loan_status','orig_projected_additional_accrued_interest',\n 'hardship_payoff_balance_amount','hardship_last_payment_amount','disbursement_method','debt_settlement_flag',\n 'debt_settlement_flag_date','settlement_status','settlement_date','settlement_amount','settlement_percentage',\n 'settlement_term']\n\nmonths_dic = {\"Jan\" : 1, \"Feb\" : 2, \"Mar\": 3, \"Apr\": 4, \"May\": 5, \"Jun\": 6, \"Jul\" : 7, \"Aug\" : 8, \"Sep\": 9,\n \"Oct\" : 10, \"Nov\" : 11, \"Dec\" : 12}\n\ndef intstring(str) :\n iStr = 0\n for item in str:\n iStr = iStr * 62 # 2 char -capital/small + digital\n iStr = iStr + ord(item)\n\n return iStr\n\n\ndef handleYears(istr):\n if False == isinstance(istr, str):\n return 0\n istr = istr.replace(\" years\", \"\")\n if \"<\" in istr:\n istr = \"0\"\n elif \"+\" in istr:\n istr = \"11\"\n\n return istr\n\n\ndef homeownership(s):\n if \"RENT\" in s :\n return 1\n elif \"OWN\" in s :\n return 2\n elif \"MORTGAGE\" in s :\n return 3\n elif \"OTHER\" in s:\n return 4\n\n\ndef verificationstatus(s):\n if \"Not\" in s:\n return 1\n elif \"Source\" in s:\n return 2\n else:\n return 3\n\n\ndef loanstatus(s):\n if \"Full\" in s:\n return 1\n elif \"Current\" in s:\n return 2\n elif \"30\" in s:\n return 3\n else:\n return 4\n\n\ndef purpose(s):\n return len(s) + intstring(s[0:3])\n\n\ndef yearsmonth(s):\n if False == isinstance(s, str):\n return 0\n\n ss = s.split(\"-\")\n\n return ss[1]+str(months_dic[ss[0]])\n\n\ndef yearcount(s):\n if False == isinstance(s, str):\n return 0\n\n ss = s.split(\"-\")\n\n return int(ss[1])-1900\n\n\ndef appicationtype(s):\n if False == isinstance(s,str):\n return 0\n\n if \"Individual\" in s:\n return 1\n else:\n return 2\n\n\ndef parseCSV(file_path, filepoint):\n df = pd.read_csv(file_path, names=col_names, skipinitialspace=True)\n irow = df.shape[0]\n\n #Remove space\n for col in df:\n df[col] = df[col].str.strip()\n\n #Delete first 2 and last 2 rows\n df = df.drop(df.index[[0,1,irow - 2, irow-1]])\n\n #Modify terms to month number only\n df['term'] = df['term'].str.split(' ').str[0]\n\n #modify rate to remove %\n df['int_rate'] = df['int_rate'].str.replace('%', '')\n\n #modify sub_grade with number\n df['sub_grade'] = df['sub_grade'].apply(intstring)\n\n #Modify emp_length with year only\n df['emp_length'] = df['emp_length'].apply(handleYears)\n\n #Modify Home Ownership : RENT - 0, OWN -1, MORTGAGE-2, OTHER-3\n df['home_ownership'] = df['home_ownership'].apply(homeownership)\n\n #Update verification: Not\n df['verification_status'] = df['verification_status'].apply(verificationstatus)\n\n #Update issue date\n df['issue_d'] = df['issue_d'].apply(yearsmonth)\n\n #Update loan status\n df['loan_status'] = df['loan_status'].apply(loanstatus)\n\n #Update purpose\n df['purpose'] = df['purpose'].apply(purpose)\n\n #Update Zipcode\n df['zip_code'] = df['zip_code'].str.replace(\"xx\",\"\")\n\n #Update addr_state\n df['addr_state'] = df['addr_state'].apply(intstring)\n\n #Update earliest_cr_line\n df['earliest_cr_line'] = df['earliest_cr_line'].apply(yearcount)\n\n #Update revol_util\n df['revol_util'] = df['revol_util'].str.replace(\"%\",\"\")\n\n #Update Annual income, dti and revol_bal to merge individual and joint\n df.annual_inc = df.annual_inc_joint.where(df.application_type == 'Joint', df.annual_inc)\n df.dti = df.dti_joint.where(df.application_type == 'Joint', df.dti)\n df.revol_bal = df.revol_bal_joint.where(df.application_type == 'Joint', df.revol_bal)\n\n #Update Application Type\n df['application_type'] = df['application_type'].apply(appicationtype)\n\n #Delete unused column\n df = df.drop(columns=drop_col_name)\n\n #Handle missing data Nan\n df = df.fillna(0)\n\n #Save to CSV file\n df.to_csv(filepoint, mode=\"a\", index=False, header=False)\n print(irow, \"done\")\n\ndef joindata():\n data_path = join(os.path.dirname(os.path.realpath(__file__)), \"data\")\n target_file = join(os.path.dirname(os.path.realpath(__file__)), \"data\", \"target.csv\")\n for f in os.listdir(data_path):\n data_file = join(data_path,f)\n if isfile(data_file):\n parseCSV(data_file, target_file)\n print(\"Complete\",data_file)\n\ndef dedup():\n target_file = join(os.path.dirname(os.path.realpath(__file__)), \"data\", \"target.csv\")\n filepoint = join(os.path.dirname(os.path.realpath(__file__)), \"data\", \"targetDedup.csv\")\n names = [x for x in col_names if x not in drop_col_name]\n df = pd.read_csv(target_file, names =names)\n\n df.drop_duplicates(subset=['id'], keep='last', inplace=True)\n\n df = df.drop(columns=['id'])\n df.to_csv(filepoint, mode=\"w\", index=False, header=False)\n" ]
[ [ "pandas.read_csv" ] ]
gyyang/bio-kvm
[ "1db65ec3250879d16b7906edf4833670d170ec9c" ]
[ "models/hopfield.py" ]
[ "\"\"\"Classical and modern Hopfield networks.\"\"\"\nimport os\nimport sys\n\nimport torch\nimport torch.nn as nn\n\nrootpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(rootpath)\n\nclass ClassicHopfieldLayer(nn.Module):\n def __init__( self, input_size, output_size, batch_size=1, steps=1,\n clamp_val=1., decay_rate=0.9999, learning_rate=0.5,\n learn_params=False, zero_in_detach=True,\n take_sign_in_output=True):\n super().__init__()\n self.steps = steps\n self.input_size = input_size\n self.zero_in_detach = zero_in_detach\n self.clamp_val = clamp_val\n self.take_sign_in_output = take_sign_in_output\n\n if output_size is None:\n self.output_size = input_size\n else:\n self.output_size = output_size\n\n if self.input_size != self.output_size:\n self.diff_in_out_dim = True\n else:\n self.diff_in_out_dim = False\n\n self.batch_size = batch_size\n self.hebb_shape = (batch_size, self.output_size, self.input_size)\n self.register_buffer('hebb', torch.zeros(self.hebb_shape))\n\n if learn_params:\n self.lamb = nn.Parameter(torch.tensor(decay_rate))\n self.eta = nn.Parameter(torch.tensor(learning_rate))\n else:\n self.lamb = decay_rate\n self.eta = learning_rate\n\n def detach_hebb(self):\n if self.zero_in_detach:\n nn.init.zeros_(self.hebb)\n self.hebb.detach_()\n\n def reset_hebb(self):\n \"\"\"Reset hebb to zero weights.\"\"\"\n nn.init.zeros_(self.hebb)\n\n def update_hebb(self, pre, post, third_factor=None):\n \"\"\"Compute Hebbian updates.\n\n Update Hebb with function of pre and post\n\n Args:\n pre: (batch_size, in_features)\n post: (batch_size, out_features)\n third_factor: (batch_size, 1) or (batch_size, out_features, in_features)\n \"\"\"\n batch_size = pre.shape[0]\n deltahebb = torch.bmm(post.view(batch_size, self.output_size, 1),\n pre.view(batch_size, 1, self.input_size))\n\n self.hebb = self.hebb * self.lamb\n if self.batch_size > 1:\n self.hebb = self.hebb + self.eta*third_factor.squeeze()[:,None,None]*deltahebb\n else:\n self.hebb = self.hebb + self.eta*third_factor*deltahebb\n self.hebb = torch.clamp(self.hebb, min=-self.clamp_val,\n max=self.clamp_val)\n\n def forward(self, input):\n \"\"\"\n Args:\n input: tensor (batch_size, input_features)\n Return:\n output: tensor (batch_size, output_features)\n \"\"\"\n batch_size = input.shape[0]\n output = input.view(batch_size, self.input_size, 1)\n output_old = torch.zeros_like(output)\n step = 0\n\n while step < self.steps:\n if not self.diff_in_out_dim and (output == output_old).all():\n break # This condition is only for square HEBB\n\n output_old = output\n output = torch.matmul(self.hebb, output_old)\n if self.take_sign_in_output:\n output = torch.sign(output)\n step += 1\n return output.squeeze(dim=2)\n\n\nclass ClassicHopfield(nn.Module):\n def __init__(self, input_size, output_size=None, config=None,\n recalc_output_in_forward=False):\n super().__init__()\n\n self.layer = ClassicHopfieldLayer(input_size=input_size,\n output_size=output_size, **config)\n self.recalc_output_in_forward = recalc_output_in_forward\n\n\n def forward(self, input, modu_input=None, rnn_state=None, target=None):\n \"\"\"\n Args:\n input: tensor (seq_len, batch_size, input_size)\n modu_input: tensor (seq_len, batch_size, modu_input_size)\n Modulatory input.\n \"\"\"\n seq_len, batch_size, _ = input.shape\n\n output = []\n steps = range(input.size(0))\n for i in steps:\n out = self.layer(input[i])\n if target is not None:\n post = target[i]\n else:\n post = input[i]\n self.layer.update_hebb(pre=input[i],\n post=post,\n third_factor=modu_input[i])\n if self.recalc_output_in_forward:\n out = self.layer(input[i])\n output.append(out)\n\n # output (seq_len, batch_size, input_size)\n output = torch.stack(output, 0)\n return output, None\n\n def reset_weights(self, batch):\n nn.init.zeros_(self.layer.hebb[batch])\n\n" ]
[ [ "torch.zeros", "torch.sign", "torch.zeros_like", "torch.tensor", "torch.matmul", "torch.nn.init.zeros_", "torch.stack", "torch.clamp" ] ]
niliur/magenta
[ "8e5da380a1cd39d14c5bcbbae0691e7983f833fa" ]
[ "magenta/models/gansynth/lib/train_util.py" ]
[ "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Train a progressive GAN model.\n\nSee https://arxiv.org/abs/1710.10196 for details about the model.\n\nSee https://github.com/tkarras/progressive_growing_of_gans for the original\ntheano implementation.\n\"\"\"\n\nimport os\nimport time\n\nfrom absl import logging\nfrom magenta.models.gansynth.lib import networks\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nimport tensorflow_gan as tfgan\n\n\ndef make_train_sub_dir(stage_id, **kwargs):\n \"\"\"Returns the log directory for training stage `stage_id`.\"\"\"\n return os.path.join(kwargs['train_root_dir'], 'stage_{:05d}'.format(stage_id))\n\n\ndef make_resolution_schedule(**kwargs):\n \"\"\"Returns an object of `ResolutionSchedule`.\"\"\"\n return networks.ResolutionSchedule(\n scale_mode=kwargs['scale_mode'],\n start_resolutions=(kwargs['start_height'], kwargs['start_width']),\n scale_base=kwargs['scale_base'],\n num_resolutions=kwargs['num_resolutions'])\n\n\ndef get_stage_ids(**kwargs):\n \"\"\"Returns a list of stage ids.\n\n Args:\n **kwargs: A dictionary of\n 'train_root_dir': A string of root directory of training logs.\n 'num_resolutions': An integer of number of progressive resolutions.\n \"\"\"\n train_sub_dirs = [\n sub_dir for sub_dir in tf.gfile.ListDirectory(kwargs['train_root_dir'])\n if sub_dir.startswith('stage_')\n ]\n\n # If fresh start, start with start_stage_id = 0\n # If has been trained for n = len(train_sub_dirs) stages, start with the last\n # stage, i.e. start_stage_id = n - 1.\n start_stage_id = max(0, len(train_sub_dirs) - 1)\n\n return list(range(start_stage_id, get_total_num_stages(**kwargs)))\n\n\ndef get_total_num_stages(**kwargs):\n \"\"\"Returns total number of training stages.\"\"\"\n return 2 * kwargs['num_resolutions'] - 1\n\n\ndef get_batch_size(stage_id, **kwargs):\n \"\"\"Returns batch size for each stage.\n\n It is expected that `len(batch_size_schedule) == num_resolutions`. Each stage\n corresponds to a resolution and hence a batch size. However if\n `len(batch_size_schedule) < num_resolutions`, pad `batch_size_schedule` in the\n beginning with the first batch size.\n\n Args:\n stage_id: An integer of training stage index.\n **kwargs: A dictionary of\n 'batch_size_schedule': A list of integer, each element is the batch size\n for the current training image resolution.\n 'num_resolutions': An integer of number of progressive resolutions.\n\n Returns:\n An integer batch size for the `stage_id`.\n \"\"\"\n batch_size_schedule = kwargs['batch_size_schedule']\n num_resolutions = kwargs['num_resolutions']\n if len(batch_size_schedule) < num_resolutions:\n batch_size_schedule = (\n [batch_size_schedule[0]] * (num_resolutions - len(batch_size_schedule))\n + batch_size_schedule)\n\n return int(batch_size_schedule[(stage_id + 1) // 2])\n\n\ndef get_stage_info(stage_id, **kwargs):\n \"\"\"Returns information for a training stage.\n\n Args:\n stage_id: An integer of training stage index.\n **kwargs: A dictionary of\n 'num_resolutions': An integer of number of progressive resolutions.\n 'stable_stage_num_images': An integer of number of training images in\n the stable stage.\n 'transition_stage_num_images': An integer of number of training images\n in the transition stage.\n 'total_num_images': An integer of total number of training images.\n\n Returns:\n A tuple of integers. The first entry is the number of blocks. The second\n entry is the accumulated total number of training images when stage\n `stage_id` is finished.\n\n Raises:\n ValueError: If `stage_id` is not in [0, total number of stages).\n \"\"\"\n total_num_stages = get_total_num_stages(**kwargs)\n valid_stage_id = (0 <= stage_id < total_num_stages)\n if not valid_stage_id:\n raise ValueError(\n '`stage_id` must be in [0, {0}), but instead was {1}'.format(\n total_num_stages, stage_id))\n\n # Even stage_id: stable training stage.\n # Odd stage_id: transition training stage.\n num_blocks = (stage_id + 1) // 2 + 1\n num_images = ((stage_id // 2 + 1) * kwargs['stable_stage_num_images'] + (\n (stage_id + 1) // 2) * kwargs['transition_stage_num_images'])\n\n total_num_images = kwargs['total_num_images']\n if stage_id >= total_num_stages - 1:\n num_images = total_num_images\n num_images = min(num_images, total_num_images)\n\n return num_blocks, num_images\n\n\ndef make_latent_vectors(num, **kwargs):\n \"\"\"Returns a batch of `num` random latent vectors.\"\"\"\n return tf.random_normal([num, kwargs['latent_vector_size']], dtype=tf.float32)\n\n\ndef make_interpolated_latent_vectors(num_rows, num_columns, **kwargs):\n \"\"\"Returns a batch of linearly interpolated latent vectors.\n\n Given two randomly generated latent vector za and zb, it can generate\n a row of `num_columns` interpolated latent vectors, i.e.\n [..., za + (zb - za) * i / (num_columns - 1), ...] where\n i = 0, 1, ..., `num_columns` - 1.\n\n This function produces `num_rows` such rows and returns a (flattened)\n batch of latent vectors with batch size `num_rows * num_columns`.\n\n Args:\n num_rows: An integer. Number of rows of interpolated latent vectors.\n num_columns: An integer. Number of interpolated latent vectors in each row.\n **kwargs: A dictionary of\n 'latent_vector_size': An integer of latent vector size.\n\n Returns:\n A `Tensor` of shape `[num_rows * num_columns, latent_vector_size]`.\n \"\"\"\n ans = []\n for _ in range(num_rows):\n z = tf.random_normal([2, kwargs['latent_vector_size']])\n r = tf.reshape(\n tf.to_float(tf.range(num_columns)) / (num_columns - 1), [-1, 1])\n dz = z[1] - z[0]\n ans.append(z[0] + tf.stack([dz] * num_columns) * r)\n return tf.concat(ans, axis=0)\n\n\ndef define_loss(gan_model, **kwargs):\n \"\"\"Defines progressive GAN losses.\n\n The generator and discriminator both use wasserstein loss. In addition,\n a small penalty term is added to the discriminator loss to prevent it getting\n too large.\n\n Args:\n gan_model: A `GANModel` namedtuple.\n **kwargs: A dictionary of\n 'gradient_penalty_weight': A float of gradient norm target for\n wasserstein loss.\n 'gradient_penalty_target': A float of gradient penalty weight for\n wasserstein loss.\n 'real_score_penalty_weight': A float of Additional penalty to keep\n the scores from drifting too far from zero.\n\n Returns:\n A `GANLoss` namedtuple.\n \"\"\"\n gan_loss = tfgan.gan_loss(\n gan_model,\n generator_loss_fn=tfgan.losses.wasserstein_generator_loss,\n discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,\n gradient_penalty_weight=kwargs['gradient_penalty_weight'],\n gradient_penalty_target=kwargs['gradient_penalty_target'],\n gradient_penalty_epsilon=0.0)\n\n real_score_penalty = tf.reduce_mean(\n tf.square(gan_model.discriminator_real_outputs))\n tf.summary.scalar('real_score_penalty', real_score_penalty)\n\n return gan_loss._replace(\n discriminator_loss=(\n gan_loss.discriminator_loss +\n kwargs['real_score_penalty_weight'] * real_score_penalty))\n\n\ndef define_train_ops(gan_model, gan_loss, **kwargs):\n \"\"\"Defines progressive GAN train ops.\n\n Args:\n gan_model: A `GANModel` namedtuple.\n gan_loss: A `GANLoss` namedtuple.\n **kwargs: A dictionary of\n 'adam_beta1': A float of Adam optimizer beta1.\n 'adam_beta2': A float of Adam optimizer beta2.\n 'generator_learning_rate': A float of generator learning rate.\n 'discriminator_learning_rate': A float of discriminator learning rate.\n\n Returns:\n A tuple of `GANTrainOps` namedtuple and a list variables tracking the state\n of optimizers.\n \"\"\"\n with tf.variable_scope('progressive_gan_train_ops') as var_scope:\n beta1, beta2 = kwargs['adam_beta1'], kwargs['adam_beta2']\n gen_opt = tf.train.AdamOptimizer(kwargs['generator_learning_rate'], beta1,\n beta2)\n dis_opt = tf.train.AdamOptimizer(kwargs['discriminator_learning_rate'],\n beta1, beta2)\n gan_train_ops = tfgan.gan_train_ops(gan_model, gan_loss, gen_opt, dis_opt)\n return gan_train_ops, tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope.name)\n\n\ndef add_generator_smoothing_ops(generator_ema, gan_model, gan_train_ops):\n \"\"\"Adds generator smoothing ops.\"\"\"\n with tf.control_dependencies([gan_train_ops.generator_train_op]):\n new_generator_train_op = generator_ema.apply(gan_model.generator_variables)\n\n gan_train_ops = gan_train_ops._replace(\n generator_train_op=new_generator_train_op)\n generator_vars_to_restore = generator_ema.variables_to_restore(\n gan_model.generator_variables)\n return gan_train_ops, generator_vars_to_restore\n\n\ndef make_var_scope_custom_getter_for_ema(ema):\n \"\"\"Makes variable scope custom getter.\"\"\"\n\n def _custom_getter(getter, name, *args, **kwargs):\n var = getter(name, *args, **kwargs)\n ema_var = ema.average(var)\n return ema_var if ema_var else var\n\n return _custom_getter\n\n\ndef add_model_summaries(model, **kwargs):\n \"\"\"Adds model summaries.\n\n This function adds several useful summaries during training:\n - fake_images: A grid of fake images based on random latent vectors.\n - interp_images: A grid of fake images based on interpolated latent vectors.\n - real_images_blend: A grid of real images.\n - summaries for `gan_model` losses, variable distributions etc.\n\n Args:\n model: An model object having all information of progressive GAN model,\n e.g. the return of build_model().\n **kwargs: A dictionary of\n 'fake_grid_size': The fake image grid size for summaries.\n 'interp_grid_size': The latent space interpolated image grid size for\n summaries.\n 'colors': Number of image channels.\n 'latent_vector_size': An integer of latent vector size.\n \"\"\"\n fake_grid_size = kwargs['fake_grid_size']\n interp_grid_size = kwargs['interp_grid_size']\n colors = kwargs['colors']\n\n image_shape = list(model.resolution_schedule.final_resolutions)\n\n fake_batch_size = fake_grid_size**2\n fake_images_shape = [fake_batch_size] + image_shape + [colors]\n\n interp_batch_size = interp_grid_size**2\n interp_images_shape = [interp_batch_size] + image_shape + [colors]\n\n # When making prediction, use the ema smoothed generator vars.\n with tf.variable_scope(\n model.gan_model.generator_scope,\n reuse=True,\n custom_getter=make_var_scope_custom_getter_for_ema(model.generator_ema)):\n z_fake = make_latent_vectors(fake_batch_size, **kwargs)\n fake_images = model.gan_model.generator_fn(z_fake)\n fake_images.set_shape(fake_images_shape)\n\n z_interp = make_interpolated_latent_vectors(interp_grid_size,\n interp_grid_size, **kwargs)\n interp_images = model.gan_model.generator_fn(z_interp)\n interp_images.set_shape(interp_images_shape)\n\n tf.summary.image(\n 'fake_images',\n tfgan.eval.eval_utils.image_grid(\n fake_images,\n grid_shape=[fake_grid_size] * 2,\n image_shape=image_shape,\n num_channels=colors),\n max_outputs=1)\n\n tf.summary.image(\n 'interp_images',\n tfgan.eval.eval_utils.image_grid(\n interp_images,\n grid_shape=[interp_grid_size] * 2,\n image_shape=image_shape,\n num_channels=colors),\n max_outputs=1)\n\n real_grid_size = int(np.sqrt(model.batch_size))\n tf.summary.image(\n 'real_images_blend',\n tfgan.eval.eval_utils.image_grid(\n model.gan_model.real_data[:real_grid_size**2],\n grid_shape=(real_grid_size, real_grid_size),\n image_shape=image_shape,\n num_channels=colors),\n max_outputs=1)\n\n tfgan.eval.add_gan_model_summaries(model.gan_model)\n\n\ndef make_scaffold(stage_id, optimizer_var_list, **kwargs):\n \"\"\"Makes a custom scaffold.\n\n The scaffold\n - restores variables from the last training stage.\n - initializes new variables in the new block.\n\n Args:\n stage_id: An integer of stage id.\n optimizer_var_list: A list of optimizer variables.\n **kwargs: A dictionary of\n 'train_root_dir': A string of root directory of training logs.\n 'num_resolutions': An integer of number of progressive resolutions.\n 'stable_stage_num_images': An integer of number of training images in\n the stable stage.\n 'transition_stage_num_images': An integer of number of training images\n in the transition stage.\n 'total_num_images': An integer of total number of training images.\n\n Returns:\n A `Scaffold` object.\n \"\"\"\n # Holds variables that from the previous stage and need to be restored.\n restore_var_list = []\n prev_ckpt = None\n curr_ckpt = tf.train.latest_checkpoint(make_train_sub_dir(stage_id, **kwargs))\n if stage_id > 0 and curr_ckpt is None:\n prev_ckpt = tf.train.latest_checkpoint(\n make_train_sub_dir(stage_id - 1, **kwargs))\n\n num_blocks, _ = get_stage_info(stage_id, **kwargs)\n prev_num_blocks, _ = get_stage_info(stage_id - 1, **kwargs)\n\n # Holds variables created in the new block of the current stage. If the\n # current stage is a stable stage (except the initial stage), this list\n # will be empty.\n new_block_var_list = []\n for block_id in range(prev_num_blocks + 1, num_blocks + 1):\n new_block_var_list.extend(\n tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES,\n scope='.*/{}/'.format(networks.block_name(block_id))))\n\n # Every variables that are 1) not for optimizers and 2) from the new block\n # need to be restored.\n restore_var_list = [\n var for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n if var not in set(optimizer_var_list + new_block_var_list)\n ]\n\n # Add saver op to graph. This saver is used to restore variables from the\n # previous stage.\n saver_for_restore = tf.train.Saver(\n var_list=restore_var_list, allow_empty=True)\n # Add the op to graph that initializes all global variables.\n init_op = tf.global_variables_initializer()\n\n def _init_fn(unused_scaffold, sess):\n # First initialize every variables.\n sess.run(init_op)\n logging.info('\\n'.join([var.name for var in restore_var_list]))\n # Then overwrite variables saved in previous stage.\n if prev_ckpt is not None:\n saver_for_restore.restore(sess, prev_ckpt)\n\n # Use a dummy init_op here as all initialization is done in init_fn.\n return tf.train.Scaffold(init_op=tf.constant([]), init_fn=_init_fn)\n\n\ndef make_status_message(model):\n \"\"\"Makes a string `Tensor` of training status.\"\"\"\n return tf.string_join(\n [\n 'Starting train step: current_image_id: ',\n tf.as_string(model.current_image_id), ', progress: ',\n tf.as_string(model.progress), ', num_blocks: {}'.format(\n model.num_blocks), ', batch_size: {}'.format(model.batch_size)\n ],\n name='status_message')\n\n\nclass ProganDebugHook(tf.train.SessionRunHook):\n \"\"\"Prints summary statistics of all tf variables.\"\"\"\n\n def __init__(self):\n super(ProganDebugHook, self).__init__()\n self._fetches = [v for v in tf.global_variables()]\n\n def before_run(self, _):\n return tf.train.SessionRunArgs(self._fetches)\n\n def after_run(self, _, vals):\n print('=============')\n print('Weight stats:')\n for v, r in zip(self._fetches, vals.results):\n print('\\t', v.name, np.min(r), np.mean(r), np.max(r), r.shape)\n print('=============')\n\n\nclass TrainTimeHook(tf.train.SessionRunHook):\n \"\"\"Updates the train_time variable.\n\n Optionally stops training if we've passed a time limit.\n \"\"\"\n _last_run_start_time = Ellipsis # type: float\n\n def __init__(self, train_time, time_limit=None):\n super(TrainTimeHook, self).__init__()\n self._train_time = train_time\n self._time_limit = time_limit\n self._increment_amount = tf.placeholder(tf.float32, None)\n self._increment_op = tf.assign_add(train_time, self._increment_amount)\n self._last_run_duration = None\n\n def before_run(self, _):\n self._last_run_start_time = time.time()\n if self._last_run_duration is not None:\n return tf.train.SessionRunArgs(\n [self._train_time, self._increment_op],\n feed_dict={self._increment_amount: self._last_run_duration})\n else:\n return tf.train.SessionRunArgs([self._train_time])\n\n def after_run(self, run_context, vals):\n self._last_run_duration = time.time() - self._last_run_start_time\n train_time = vals.results[0]\n if (self._time_limit is not None) and (train_time > self._time_limit):\n run_context.request_stop()\n\n\ndef train(model, **kwargs):\n \"\"\"Trains progressive GAN for stage `stage_id`.\n\n Args:\n model: An model object having all information of progressive GAN model,\n e.g. the return of build_model().\n **kwargs: A dictionary of\n 'train_root_dir': A string of root directory of training logs.\n 'master': Name of the TensorFlow master to use.\n 'task': The Task ID. This value is used when training with multiple\n workers to identify each worker.\n 'save_summaries_num_images': Save summaries in this number of images.\n 'debug_hook': Whether to attach the debug hook to the training session.\n Returns:\n None.\n \"\"\"\n logging.info('stage_id=%d, num_blocks=%d, num_images=%d', model.stage_id,\n model.num_blocks, model.num_images)\n\n scaffold = make_scaffold(model.stage_id, model.optimizer_var_list, **kwargs)\n\n logdir = make_train_sub_dir(model.stage_id, **kwargs)\n print('starting training, logdir: {}'.format(logdir))\n hooks = []\n if model.stage_train_time_limit is None:\n hooks.append(tf.train.StopAtStepHook(last_step=model.num_images))\n hooks.append(tf.train.LoggingTensorHook(\n [make_status_message(model)], every_n_iter=1))\n hooks.append(TrainTimeHook(model.train_time, model.stage_train_time_limit))\n if kwargs['debug_hook']:\n hooks.append(ProganDebugHook())\n tfgan.gan_train(\n model.gan_train_ops,\n logdir=logdir,\n get_hooks_fn=tfgan.get_sequential_train_hooks(tfgan.GANTrainSteps(1, 1)),\n hooks=hooks,\n master=kwargs['master'],\n is_chief=(kwargs['task'] == 0),\n scaffold=scaffold,\n save_checkpoint_secs=600,\n save_summaries_steps=(kwargs['save_summaries_num_images']))\n" ]
[ [ "numpy.sqrt", "tensorflow.compat.v1.concat", "tensorflow.compat.v1.random_normal", "numpy.max", "numpy.mean", "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.constant", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.compat.v1.gfile.ListDirectory", "tensorflow.compat.v1.global_variables", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.square", "numpy.min", "tensorflow.compat.v1.get_collection", "tensorflow.compat.v1.summary.scalar", "tensorflow.compat.v1.as_string", "tensorflow.compat.v1.stack", "tensorflow.compat.v1.train.SessionRunArgs", "tensorflow.compat.v1.control_dependencies", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.assign_add", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.range", "tensorflow.compat.v1.train.StopAtStepHook" ] ]
asabyr/LensTools
[ "e155d6d39361e550906cec00dbbc57686a4bca5c" ]
[ "lenstools/statistics/constraints.py" ]
[ "\"\"\"\n\n.. module:: constraints\n\t:platform: Unix\n\t:synopsis: This module implements the usual statistical tools you need to calculate cosmological parameters confidence intervals\n\n\n.. moduleauthor:: Andrea Petri <[email protected]>\n\n\n\"\"\"\n\nfrom __future__ import division,print_function,with_statement\n\nimport sys\nfrom operator import mul\nfrom functools import reduce\n\nif sys.version_info.major>=3:\n\timport _pickle as pickle\nelse:\n\timport cPickle as pickle\n\n#########################################################\n\nimport numpy as np\nimport pandas as pd\n\nfrom scipy import stats,interpolate\n\nfrom emcee.ensemble import _function_wrapper\n\ntry:\n\tfrom matplotlib.patches import Ellipse\nexcept ImportError:\n\tEllipse = None\n\n#########################################################\n\nfrom ..utils.algorithms import precision_bias_correction\nfrom .ensemble import Series,Ensemble,Panel \nfrom . import samplers\n\n#########################################################\n#############Default Gaussian data likelihood############\n#########################################################\n\ndef gaussian_likelihood(chi2,norm=1.0):\n\treturn norm*np.exp(-0.5*chi2)\n\n######################################################################\n##########Default chi2 calculation with the sandwich product##########\n######################################################################\n\ndef chi2(parameters,*args,**kwargs):\n\n\tmodel_feature = _predict(parameters,kwargs[\"interpolator\"])\n\tinverse_covariance = kwargs[\"inverse_covariance\"]\n\n\tif model_feature.ndim==1:\n\t\tobserved_feature = kwargs[\"observed_feature\"]\n\telse: \n\t\tobserved_feature = kwargs[\"observed_feature\"][None,:]\n\n\tinverse_covariance_dot = np.dot(observed_feature - model_feature,inverse_covariance)\n\n\treturn ((observed_feature - model_feature) * inverse_covariance_dot).sum(-1)\n\t\n\n#######################################################################\n#############Feature prediction wrapper################################\n#######################################################################\n\n#Fast interpolation method\ndef _interpolate_fast(p,parameter_grid,method,weights,epsilon):\n\treturn method(((parameter_grid[None]-p[:,None])**2).sum(-1),epsilon).dot(weights)\n\ndef _predict(parameters,interpolator):\n\n\t#Cast to higher dimension\n\tparameters = np.atleast_2d(parameters)\n\n\tif isinstance(interpolator,list):\n\t\t#For each feature bin, compute its interpolated value\n\t\tinterpolated_feature = np.zeros((parameters.shape[0],len(interpolator)))\n\n\t\tfor n,i in enumerate(interpolator):\n\t\t\tinterpolated_feature[:,n] = i()(*parameters.T)\n\n\telse:\n\t\t#Compute fast interpolation\n\t\tinterpolated_feature = interpolator(parameters)\n\n\treturn np.squeeze(interpolated_feature)\n\n\n##############################################\n###########Analysis base class################\n##############################################\n\nclass Analysis(Ensemble):\n\t\n\t\"\"\"\n\tThe base class of this module; the idea in weak lensing analysis is that one has a set of simulated data, that serves as training model, and then uses that set to fit the observations for the best model parameters. Inherits from :py:class:`Ensemble`\n\n\t\"\"\"\n\n\t_analysis_type = None\n\n\tdef _check_valid(self):\n\t\tassert \"parameters\" in self.columns.levels[0],\"There are no parameters specified for this analysis!\"\n\n\t@classmethod\n\tdef from_features(cls,features,parameters=None,feature_index=None,parameter_index=None):\n\n\t\t#If features and parameters are already in DataFrame instances then just append them\n\t\tif isinstance(features,pd.DataFrame) and isinstance(parameters,pd.DataFrame):\n\t\t\tassert len(parameters.columns.levels[0])==1 and parameters.columns.levels[0][0]==\"parameters\"\n\t\t\treturn cls.concat((parameters,features),axis=1)\n\n\t\t#Cast shapes correctly\n\t\tif len(features.shape)==1:\n\t\t\tfeatures = features[None]\n\n\t\tif parameters is None:\n\t\t\tparameters = np.arange(len(features))[None]\n\n\t\tif len(parameters.shape)==1:\n\t\t\tparameters = parameters[None]\n\n\t\t#Make the indices\n\t\tif parameter_index is None:\n\t\t\tparameter_index = Series.make_index(pd.Index(range(parameters.shape[1]),name=\"parameters\"))\n\t\telif isinstance(parameter_index,list):\n\t\t\tparameter_index = Series.make_index(pd.Index(parameter_index,name=\"parameters\"))\n\n\t\tif feature_index is None:\n\t\t\tfeature_index = Series.make_index(pd.Index(range(features.shape[1]),name=\"features\"))\n\t\telif isinstance(feature_index,list):\n\t\t\tfeature_index = Series.make_index(pd.Index(feature_index,name=\"features\"))\n\n\t\t#Instantiate the parameter and feature part of the analysis\n\t\tanalysis_param = cls(parameters,columns=parameter_index)\n\t\tanalysis_features = cls(features,columns=feature_index)\n\n\t\t#Instantiate Analysis\n\t\treturn cls.concat((analysis_param,analysis_features),axis=1)\n\n\n\t##################\n\t####Properties####\n\t##################\n\n\t@property\n\tdef feature_names(self):\n\t\tall_names = list(self.columns.levels[0])\n\t\tall_names.remove(\"parameters\")\n\t\treturn all_names\n\n\t@property\n\tdef parameter_names(self):\n\t\treturn list(self[\"parameters\"].columns)\n\n\t@property\n\tdef parameter_set(self):\n\t\treturn self[\"parameters\"].values\n\n\t@property\n\tdef feature_set(self):\n\t\treturn self[self.feature_names].values\n\n\tdef parameters(self,names=None):\n\n\t\tif names is None:\n\t\t\treturn self\n\n\t\tparameter_names = self.parameter_names\n\t\tif isinstance(names,str):\n\t\t\tnames = [names]\n\n\t\tsubset = self.copy()\n\t\texclude_names = filter(lambda n:not n in names,parameter_names)\n\n\t\tfor n in exclude_names:\n\t\t\tsubset.pop((\"parameters\",n))\n\n\t\treturn subset\n\t\t\n\n\tdef features(self,names=None):\n\n\t\tif names is None:\n\t\t\treturn self\n\t\telif isinstance(names,str):\n\t\t\treturn self[[\"parameters\"]+[names]].copy()\n\t\telif isinstance(names,list):\n\t\t\treturn self[[\"parameters\"]+names].copy()\n\t\telif isinstance(names,dict):\n\n\t\t\tpieces = [self[[\"parameters\"]]]\n\t\t\tfor key in names.keys():\n\t\t\t\tpiece = self[key][names[key]]\n\t\t\t\tpiece.add_name(key)\n\t\t\t\tpieces.append(piece)\n\n\t\t\treturn self.__class__.concat(pieces,axis=1)\n\n\t\telse:\n\t\t\traise TypeError(\"names type not supported!\")\n\n\n\t##################\n\t####Operations####\n\t##################\n\n\tdef add_models(self,parameters,feature):\n\n\t\t\"\"\"\n\t\tAdd a model to the training set of the current analysis\n\n\t\t:param parameters: parameter set of the new model\n\t\t:type parameters: array\n\n\t\t:param feature: measured feature of the new model\n\t\t:type feature: array\n\n\t\t\"\"\"\n\n\t\t#Cast dimensions\n\t\tif len(parameters.shape)==1:\n\t\t\tparameters = parameters[None]\n\n\t\tif len(feature.shape)==1:\n\t\t\tfeature = feature[None]\n\n\t\t#Check for input valudity\n\t\tassert len(parameters)==len(feature)\n\t\tassert parameters.shape[1] == self.parameter_set.shape[1]\n\t\tassert feature.shape[1:] == self.feature_set.shape[1:]\n\n\t\t#hstack\n\t\tparameters_and_features = np.hstack((parameters,feature))\n\n\t\t#Return the newly created Analysis\n\t\treturn self.append(self._constructor(parameters_and_features,columns=self.columns),ignore_index=True)\n\n\n\tdef reparametrize(self,transformation,**kwargs):\n\n\t\t\"\"\"\n\t\tReparametrize the parameter set of the analysis by calling the formatter handle on the current parameter set (can be used to enlarge/shrink/relabel the parameter set)\n\n\t\t:param transformation: transformation function called on the parameters, must take in a row of parameters and return another row of parameters\n\t\t:type transformation: callable\n\n\t\t:param kwargs: the keyword arguments are passed to the transformation callable\n\t\t:type kwargs: dict.\n\n\t\t:returns: reparametrized Analysis\n\n\t\t\"\"\"\n\n\t\t#Apply the transformation\n\t\tnew_parameters = self[\"parameters\"].apply(transformation,axis=1,**kwargs)\n\t\tnew_parameters.columns.name = \"parameters\"\n\t\tnew_parameters.columns = Series.make_index(new_parameters.columns)\n\n\t\t#Return the reparametrized analysis\n\t\treparametrized_analysis = self.copy()\n\t\treparametrized_analysis.pop(\"parameters\")\n\t\treturn self.__class__.concat((new_parameters,reparametrized_analysis),axis=1)\n\n\n\tdef refeaturize(self,transformation,method=\"apply_row\",**kwargs):\n\n\t\t\"\"\"\n\t\tAllows a general transformation on the feature set of the analysis by calling an arbitrary transformation function\n\n\t\t:param transformation: callback function called on the feature_set; must take in a row of features and return a row of features. If a dictionary is passed, the keys must be the feature names\n\t\t:type transformation: callable or dict.\n\n\t\t:param kwargs: the keyword arguments are passed to the transformation callable\n\t\t:type kwargs: dict.\n\n\t\t:returns: transformed Analysis\n\n\t\t\"\"\"\n\n\t\t#Build transformation dictionary\n\t\tif isinstance(transformation,dict):\n\t\t\ttransformation_dict = dict((n,lambda x:x) for n in self.feature_names)\n\t\t\tfor n in transformation.keys():\n\t\t\t\ttransformation_dict[n] = transformation[n]\n\t\telse:\n\t\t\ttransformation_dict = dict((n,transformation) for n in self.feature_names)\n\n\t\t#Apply the transformations to each feature\n\t\ttransformed_features = list()\n\t\tfor n in self.feature_names:\n\t\t\t\n\t\t\tif method==\"apply_row\":\n\t\t\t\ttransformed_feature = self[[n]].apply(transformation_dict[n],axis=1,**kwargs)\n\t\t\telif method==\"apply_whole\":\n\t\t\t\ttransformed_feature = transformation_dict[n](self[[n]],**kwargs)\n\t\t\t\ttransformed_feature.add_name(n)\n\t\t\t\ttransformed_feature.index = self.index\n\t\t\telif method==\"dot\":\n\t\t\t\ttransformed_feature = self[[n]].dot(transformation_dict[n])\n\t\t\t\ttransformed_feature.add_name(n)\n\t\t\t\ttransformed_feature.index = self.index \n\t\t\telse:\n\t\t\t\traise NotImplementedError(\"transformation method {0} not implemented!\".format(method))\n\n\t\t\ttransformed_features.append(transformed_feature)\n\n\t\t#Concatenate and return\n\t\treturn self.__class__.concat([self[[\"parameters\"]]]+transformed_features,axis=1)\n\n\n\tdef combine_features(self,combinations):\n\n\t\t\"\"\"\n\t\tCombine features in the Analysis, according to a dictionary which keys are the name of the combined features\n\n\t\t:param combinations: mapping of combined features onto the old ones \n\t\t:type combinations: dict.\n\n\t\t\"\"\"\n\n\t\tcombined_features = list()\n\n\t\t#Cycle over the combinations keys\n\t\tfor n in combinations.keys():\n\n\t\t\t#Select\n\t\t\tcombined_feature = self[combinations[n]].copy()\n\t\t\t\n\t\t\t#Merge the column names\n\t\t\tcombined_feature_index = pd.Index(np.hstack([ combined_feature[c].columns.values for c in combinations[n] ]),name=n)\n\t\t\tcombined_feature_index = Series.make_index(combined_feature_index)\n\t\t\tcombined_feature.columns = combined_feature_index\n\n\t\t\t#Append to the combination list\n\t\t\tcombined_features.append(combined_feature)\n\n\t\t#Concatenate everything\n\t\treturn self.__class__.concat([self[[\"parameters\"]]]+combined_features,axis=1)\n\t\n\n\t###############################################################################################################################\n\n\n\tdef find(self,parameters,rtol=1.0e-05):\n\n\t\t\"\"\"\n\t\tFinds the location in the instance that has the specified combination of parameters\n\n\t\t:param parameters: the parameters of the model to find\n\t\t:type parameters: array.\n\n\t\t:param rtol: tolerance of the search (must be less than 1)\n\t\t:type rtol: float.\n\n\t\t:returns: array of int. with the indices of the corresponding models\n\n\t\t\"\"\"\n\n\t\tassert len(parameters)==self.parameter_set.shape[1]\n\n\t\tsearch_result = np.all(np.isclose(self.parameter_set,parameters,rtol=rtol),axis=1)\n\t\treturn np.where(search_result==True)[0]\n\n\t###############################################################################################################################\n\n\t@staticmethod\n\tdef ellipse(center,covariance,p_value=0.684,**kwargs):\n\n\t\t\"\"\"\n\n\t\tDraws a confidence ellipse using matplotlib Ellipse patch\n\n\t\t:param center: center of the ellipse\n\t\t:type center: tuple.\n\n\t\t:param covariance: parameters covariance matrix\n\t\t:type covariance: 2D-array.\n\n\t\t:param p_value: p-value to calculate\n\t\t:type p_value: float.\n\t\t\t\t\t\t\n\t\t:param kwargs: the keyword arguments are passed to the matplotlib Ellipse method\n\t\t:type kwargs: dict.\n\n\t\t:returns: matplotlib ellipse object\n\t\t:rtype: Ellipse\n\n\t\t\"\"\"\n\n\t\t#Check that ellipse patch is available\n\t\tif Ellipse is None:\n\t\t\traise ImportError(\"The matplotlib Ellipse patch is necessary to use this method!\")\n\n\t\t#Compute the directions and sizes of the ellipse axes\n\t\tw,v = np.linalg.eigh(covariance)\n\t\twidth,height = 2*np.sqrt(w * stats.chi2(2).ppf(p_value))\n\n\t\ttry:\n\t\t\tangle = 180.*np.arctan(v[1,0] / v[0,0]) / np.pi\n\t\texcept ZeroDivisionError:\n\t\t\tangle = 90.\n\n\t\t#Draw the ellipse\n\t\treturn Ellipse(center,width,height,angle=angle,**kwargs)\n\n\n###################################################\n#############Fisher matrix analysis################\n###################################################\n\nclass FisherSeries(Series):\n\n\t@property\n\tdef _constructor_expanddim(self):\n\t\treturn FisherAnalysis\n\nclass FisherAnalysis(Analysis):\n\n\t################################################################\n\t##############DataFrame subclassing#############################\n\t################################################################\n\n\t@property \n\tdef _constructor_sliced(self):\n\t\treturn FisherSeries\n\n\t@property\n\tdef _constructor_expanddim(self):\n\t\treturn FisherPanel\n\n\t#################################################################\n\n\t_analysis_type = \"Fisher\"\n\t_fiducial = 0\n\n\t\"\"\"\n\tThe class handler of a Fisher matrix analysis, inherits from the base class Analysis\n\n\t\"\"\"\n\n\tdef set_fiducial(self,n):\n\n\t\t\"\"\"\n\t\tSets the fiducial model (with respect to which to compute the derivatives), default is 0 (i.e. self.parameter_set[0])\n\n\t\t:param n: the parameter set you want to use as fiducial\n\t\t:type n: int.\n\n\t\t\"\"\"\n\n\t\tassert n < self.parameter_set.shape[0],\"There are less than {0} models in your analysis\".format(n+1)\n\n\t\tself._fiducial = n\n\n\t@property\n\tdef fiducial(self):\n\n\t\treturn self.feature_set[self._fiducial]\n\n\t@property\n\tdef _variations(self):\n\n\t\t\"\"\"\n\t\tChecks the parameter variations with respect to the fiducial cosmology\n\n\t\t:returns: bool array (True if the parameter is varied, False otherwise)\n\n\t\t\"\"\"\n\n\t\treturn self.parameter_set!=self.parameter_set[self._fiducial]\n\n\t@property\n\tdef variations(self):\n\n\t\t\"\"\"\n\t\tChecks the parameter variations with respect to the fiducial cosmology\n\n\t\t:returns: iterable with the positions of the variations\n\n\t\t\"\"\"\n\n\t\tfor n,b in enumerate(self._variations.sum(1)):\n\t\t\tif b:\n\t\t\t\tyield n\n\n\n\tdef check(self):\n\n\t\t\"\"\"\n\t\tAsserts that the parameters are varied one at a time, and that a parameter is not varied more than once\n\n\t\t:raises: AssertionError\n\n\t\t\"\"\"\n\n\t\tassert (self._variations.sum(1)<2).all(),\"You can vary only a parameter at a time!\"\n\n\t\t#Check how many variations are there for each parameter\n\t\tnum_par_variations = self._variations.sum(0)\n\t\tif (num_par_variations<2).all():\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn 1\n\n\tdef where(self,par=None):\n\n\t\t\"\"\"\n\t\tFinds the locations of the varied parameters in the parameter set\n\n\t\t:returns: dict. with the locations of the variations, for each parameter\n\n\t\t\"\"\"\n\n\t\tloc = dict()\n\t\tv = np.where(self._variations==1)\n\n\t\t#Decide if keys are lists or simple numbers\n\t\tif self.check():\n\n\t\t\tfor n in range(self.parameter_set.shape[1]):\n\t\t\t\tloc[n] = list()\n\n\t\t\tfor n in range(len(v[0])):\n\t\t\t\tloc[v[1][n]].append(v[0][n])\n\n\t\telse:\n\n\t\t\tfor n in range(len(v[0])):\n\t\t\t\tloc[v[1][n]] = v[0][n]\n\n\t\tif par is None:\n\t\t\treturn loc\n\t\telse:\n\t\t\treturn loc[par]\n\n\n\t@property\n\tdef varied(self):\n\n\t\t\"\"\"\n\t\tReturns the indices of the parameters that are varied \n\n\t\t:returns: list with the indices of the varied parameters\n\n\t\t\"\"\"\n\t\t\n\t\treturn list(sorted(self.where())) \n\n\n\tdef compute_derivatives(self):\n\n\t\t\"\"\"\n\t\tComputes the feature derivatives with respect to the parameter sets using one step finite differences; the derivatives are computed with respect to the fiducial parameter set\n\n\t\t:returns: array of shape (p,N), where N is the feature dimension and p is the number of varied parameters\n\n\t\t\"\"\"\n\n\t\tassert self.parameter_set.shape[0] > 1,\"You need at least 2 models to proceed in a Fisher Analysis!\"\n\t\tassert self.check()==0,\"Finite differences implemented only at first order! Cannot compute derivatives\"\n\n\t\t#Find the varied parameters and their locations\n\t\tloc_varied = self.where()\n\t\tpar_varied = list(sorted(loc_varied))\n\n\t\t#Allocate space for the derivatives\n\t\tderivatives = np.zeros((len(par_varied),)+self.feature_set.shape[1:])\n\n\t\t#cycle to parameters to calculate derivatives\n\t\tfor n,p in enumerate(par_varied):\n\t\t\t\n\t\t\t#Calculate the finite difference derivative with respect to this parameter\n\t\t\tderivatives[n] = (self.feature_set[loc_varied[p]] - self.feature_set[self._fiducial]) / (self.parameter_set[loc_varied[p],p] - self.parameter_set[self._fiducial,p])\n\n\t\t#set the derivatives attribute and return the result\n\t\tself._derivatives = self.__class__(derivatives,index=[self.parameter_names[n] for n in par_varied],columns=self[self.feature_names].columns)\n\n\t@property \n\tdef derivatives(self):\n\t\tif not hasattr(self,\"_derivatives\"):\n\t\t\tself.compute_derivatives()\t\n\t\treturn self._derivatives\n\n\n\tdef chi2(self,observed_feature,features_covariance,correct=None):\n\n\t\t\"\"\"\n\t\tComputes the chi2 between an observed feature and the fiducial feature, using the provided covariance\n\n\t\t:param observed_feature: observed feature to fit, its last dimension must have the same shape as self.feature_set[0] \n\t\t:type observed_feature: array\n\n\t\t:param features_covariance: covariance matrix of the simulated features, must be provided for a correct fit!\n\t\t:type features_covariance: 2 dimensional array (or 1 dimensional if diagonal)\n\n\t\t:param correct: if not None, correct for the bias in the inverse covariance estimator assuming the covariance was estimated by 'correct' simulations\n\t\t:type correct: int.\n\n\t\t:returns: chi2 of the comparison\n\t\t:rtype: float.\n\n\t\t\"\"\"\n\n\t\t#Cast pandas types\n\t\tassert features_covariance is not None,\"No science without the covariance matrix, you must provide one!\"\n\t\t\n\t\tif isinstance(observed_feature,pd.Series) or isinstance(observed_feature,pd.DataFrame):\n\t\t\tobserved_feature = observed_feature.values\n\n\t\tif isinstance(features_covariance,pd.DataFrame):\n\t\t\tfeatures_covariance = features_covariance.values\n\n\t\t#Cast the observed feature in suitable shape\n\t\tif len(observed_feature.shape)==1:\n\t\t\tobserved_feature = observed_feature[None]\n\t\t\tsingle = True\n\t\telse:\n\t\t\tsingle = False\n\n\t\t#Check for correct shape of input\n\t\tassert observed_feature.shape[-1:]==self.feature_set.shape[-1:]\n\t\tassert features_covariance.shape in [self.feature_set.shape[-1:],self.feature_set.shape[-1:]*2]\n\n\t\t#Compute the difference\n\t\tdifference = observed_feature - self.fiducial[None]\n\n\t\t#Compute the chi2\n\t\tif features_covariance.shape==self.feature_set.shape[-1:]:\n\t\t\tresult = ((difference**2)/features_covariance[None]).sum(-1)\n\t\telse:\n\t\t\tif correct is not None:\n\t\t\t\tresult = (difference * np.linalg.solve(features_covariance/precision_bias_correction(correct,len(features_covariance)),difference.transpose()).transpose()).sum(-1)\n\t\t\telse:\n\t\t\t\tresult = (difference * np.linalg.solve(features_covariance,difference.transpose()).transpose()).sum(-1)\n\n\t\t#Return the result\n\t\tif single:\n\t\t\treturn result[0]\n\t\telse:\n\t\t\treturn result\n\n\t\n\tdef fit(self,observed_feature,features_covariance):\n\n\t\t\"\"\"\n\t\tMaximizes the gaussian likelihood on which the Fisher matrix formalism is based, and returns the best fit for the parameters given the observed feature\n\n\t\t:param observed_feature: observed feature to fit, must have the same shape as self.feature_set[0]\n\t\t:type observed_feature: array\n\n\t\t:param features_covariance: covariance matrix of the simulated features, must be provided for a correct fit!\n\t\t:type features_covariance: 2 dimensional array (or 1 dimensional if assumed diagonal)\n\n\t\t:returns: array with the best fitted parameter values\n\n\t\t\"\"\"\n\n\t\tassert features_covariance is not None,\"No science without the covariance matrix, you must provide one!\"\n\n\t\t#Cast pandas types\n\t\tif isinstance(observed_feature,pd.Series) or isinstance(observed_feature,pd.DataFrame):\n\t\t\tobserved_feature = observed_feature.values\n\n\t\tif isinstance(features_covariance,pd.DataFrame):\n\t\t\tfeatures_covariance = features_covariance.values\n\n\t\t#Check for correct shape of input\n\t\tassert (observed_feature.shape==self.feature_set.shape[1:]) or (observed_feature.shape[1:]==self.feature_set.shape[1:]) \n\t\tassert (features_covariance.shape==self.feature_set.shape[1:] * 2) or (features_covariance.shape==self.feature_set.shape[1:])\n\n\t\t#Linear algebra manipulations (parameters = M x features)\n\t\tif features_covariance.shape==self.feature_set.shape[1:] * 2:\n\t\t\tY = np.linalg.solve(features_covariance,self.derivatives.values.transpose())\n\t\telse:\n\t\t\tY = (1/features_covariance[:,np.newaxis]) * self.derivatives.values.transpose()\n\n\t\tXY = np.dot(self.derivatives.values,Y)\n\t\tM = np.linalg.solve(XY,Y.transpose())\n\n\t\t#Compute difference in parameters (with respect to the fiducial model)\n\t\tif observed_feature.ndim==1:\n\t\t\tdP = np.dot(M,observed_feature - self.feature_set[self._fiducial])\n\t\telse:\n\t\t\tdP = np.dot(observed_feature - self.feature_set[[self._fiducial]],M.T)\n\n\t\t#Return the actual best fit\n\t\tif dP.ndim==1:\n\t\t\treturn self._constructor_sliced(self.parameter_set[self._fiducial,self.varied] + dP,index=self.derivatives.index)\n\t\telse:\n\t\t\treturn self.__class__(self.parameter_set[self._fiducial,self.varied][None] + dP,columns=self.derivatives.index)\n\n\n\tdef classify(self,observed_feature,features_covariance,correct=None,labels=range(2),confusion=False):\n\n\t\t\"\"\"\n\t\tPerforms a Fisher classification of the observed feature, choosing the most probable label based on the value of the chi2\n\t\t\n\t\t:param observed_feature: observed feature to fit, the last dimenstion must have the same shape as self.feature_set[0]\n\t\t:type observed_feature: array\n\n\t\t:param features_covariance: covariance matrix of the simulated features, must be provided for a correct classification!\n\t\t:type features_covariance: 2 dimensional array (or 1 dimensional if assumed diagonal)\n\n\t\t:param correct: if not None, correct for the bias in the inverse covariance estimator assuming the covariance was estimated by 'correct' simulations\n\t\t:type correct: int.\n\n\t\t:param labels: labels of the classification, must be the indices of the available classes (from 0 to feature_set.shape[0])\n\t\t:type labels: iterable\n\n\t\t:param confusion: if True, an array with the label percentage occurrences is returned; if False an array of labels is returned\n\t\t:type confusion: bool.\n\n\t\t:returns: array with the labels resulting from the classification\n\t\t:rtype: int.\n\n\t\t\"\"\"\n\n\t\tfiducial_original = self._fiducial\n\n\t\t#Compute all the chi squared values, for each observed feature and each label\n\t\tall_chi2 = list()\n\t\tfor l in labels:\n\t\t\tself.set_fiducial(l)\n\t\t\tall_chi2.append(self.chi2(observed_feature,features_covariance,correct=correct))\n\n\t\tself.set_fiducial(fiducial_original)\n\n\t\t#Cast the list into an array\n\t\tall_chi2 = np.array(all_chi2)\n\n\t\t#Find the minima\n\t\tchi2_min = all_chi2.argmin(0)\n\n\t\t#Translate into the corresponding classes\n\t\tclasses = np.zeros_like(chi2_min)\n\t\tfor n,l in enumerate(labels):\n\t\t\tclasses[chi2_min==n] = l\n\n\t\tif confusion:\n\n\t\t\t#Compute confusion array\n\t\t\tconfusion_array = np.zeros(n+1)\n\t\t\tfor n,l in enumerate(labels):\n\t\t\t\tconfusion_array[n] = (classes==l).sum() / len(classes)\n\n\t\t\t#Return\n\t\t\treturn confusion_array\n\t\t\n\t\telse:\t\n\t\t\t#Return\n\t\t\treturn classes\n\n\t###############################################################################################################################################################\n\n\tdef parameter_covariance(self,simulated_features_covariance,correct=None,observed_features_covariance=None):\n\n\t\t\"\"\"\n\t\tComputes the parameter covariance matrix using the associated features, that in the end allows to compute the parameter confidence contours (around the fiducial value)\n\n\t\t:param simulated_features_covariance: covariance matrix of the simulated features, must be provided for a correct fit!\n\t\t:type simulated_features_covariance: 2 dimensional array (or 1 dimensional if assumed diagonal)\n\n\t\t:param correct: if not None, correct for the bias in the inverse covariance estimator assuming the covariance was estimated by 'correct' simulations\n\t\t:type correct: int.\n\n\t\t:param observed_features_covariance: covariance matrix of the simulated features, if different from the simulated one; if None the simulated feature covariance is used\n\t\t:type observed_features_covariance: 2 dimensional array (or 1 dimensional if assumed diagonal)\n\n\t\t:returns: 2 dimensional array with the parameter covariance matrix of the analysis\n\n\t\t\"\"\"\n\n\t\tassert simulated_features_covariance is not None,\"No science without the covariance matrix, you must provide one!\"\n\n\t\t#Cast pandas types\n\t\tif isinstance(simulated_features_covariance,pd.DataFrame):\n\t\t\tsimulated_features_covariance = simulated_features_covariance.values\n\n\t\tif (observed_features_covariance is not None) and (isinstance(observed_features_covariance,pd.DataFrame)):\n\t\t\tobserved_features_covariance = observed_features_covariance.values\n\n\t\t#Check for correct shape of input\n\t\tassert simulated_features_covariance.shape == self.feature_set.shape[1:] * 2 or simulated_features_covariance.shape == self.feature_set.shape[1:]\n\n\t\t#Linear algebra manipulations (parameters = M x features)\n\t\tif simulated_features_covariance.shape == self.feature_set.shape[1:] * 2:\n\t\t\tY = np.linalg.solve(simulated_features_covariance,self.derivatives.values.transpose())\n\t\telse:\n\t\t\tY = (1/simulated_features_covariance[:,np.newaxis]) * self.derivatives.values.transpose()\n\t\t\n\t\tXY = np.dot(self.derivatives.values,Y)\n\n\t\t#If we are using the same covariance matrix for observations and simulations, then XY is the Fisher matrix; otherwise we need to compute M too\n\t\tif observed_features_covariance is None:\n\t\t\tif correct is not None:\n\t\t\t\treturn self.__class__(np.linalg.inv(XY),index=self.derivatives.index,columns=self.derivatives.index) / precision_bias_correction(correct,len(simulated_features_covariance))\n\t\t\telse:\n\t\t\t\treturn self.__class__(np.linalg.inv(XY),index=self.derivatives.index,columns=self.derivatives.index)\n\t\telse:\n\n\t\t\tassert observed_features_covariance.shape == self.feature_set.shape[1:] * 2 or observed_features_covariance.shape == self.feature_set.shape[1:]\n\t\t\t\n\t\t\tM = np.linalg.solve(XY,Y.transpose())\n\t\t\t\n\t\t\tif observed_features_covariance.shape == self.feature_set.shape[1:] * 2:\n\t\t\t\tparameter_covariance = np.dot(M,np.dot(observed_features_covariance,M.transpose()))\n\t\t\telse:\n\t\t\t\tparameter_covariance = np.dot(M * observed_features_covariance,M.transpose())\n\n\t\t\treturn self.__class__(parameter_covariance,index=self.derivatives.index,columns=self.derivatives.index)\n\n\t\n\tdef fisher_matrix(self,simulated_features_covariance,correct=None,observed_features_covariance=None):\n\n\t\t\"\"\"\n\t\tComputes the parameter Fisher matrix using the associated features, that in the end allows to compute the parameter confidence contours (around the fiducial value)\n\n\t\t:param simulated_features_covariance: covariance matrix of the simulated features, must be provided for a correct fit!\n\t\t:type simulated_features_covariance: 2 dimensional array (or 1 dimensional if assumed diagonal)\n\n\t\t:param correct: if not None, correct for the bias in the inverse covariance estimator assuming the covariance was estimated by 'correct' simulations\n\t\t:type correct: int.\n\n\t\t:param observed_features_covariance: covariance matrix of the simulated features, if different from the simulated one; if None the simulated feature covariance is used\n\t\t:type observed_features_covariance: 2 dimensional array (or 1 dimensional if assumed diagonal)\n\n\t\t:returns: 2 dimensional array with the Fisher matrix of the analysis\n\n\t\t\"\"\"\n\n\t\tparcov = self.parameter_covariance(simulated_features_covariance,correct,observed_features_covariance)\n\t\treturn self.__class__(np.linalg.inv(parcov.values),index=parcov.index,columns=parcov.columns)\n\n\n\tdef confidence_ellipse(self,simulated_features_covariance,correct=None,observed_feature=None,observed_features_covariance=None,parameters=[\"Om\",\"w\"],p_value=0.684,**kwargs):\n\n\t\t\"\"\"\n\n\t\tDraws a confidence ellipse of a specified p-value in parameter space, corresponding to fit an observed feature for the cosmological parameters\n\n\t\t:param observed_feature: observed feature to fit, the last dimenstion must have the same shape as self.feature_set[0]\n\t\t:type observed_feature: array\n\n\t\t:param simulated_features_covariance: covariance matrix of the simulated features, must be provided for a correct fit!\n\t\t:type simulated_features_covariance: 2 dimensional array (or 1 dimensional if assumed diagonal)\n\n\t\t:param correct: if not None, correct for the bias in the inverse covariance estimator assuming the covariance was estimated by 'correct' simulations\n\t\t:type correct: int.\n\n\t\t:param observed_features_covariance: covariance matrix of the simulated features, if different from the simulated one; if None the simulated feature covariance is used\n\t\t:type observed_features_covariance: 2 dimensional array (or 1 dimensional if assumed diagonal)\n\n\t\t:param parameters: parameters to compute the condifence contour of\n\t\t:type parameters: list.\n\n\t\t:param p_value: p-value to calculate\n\t\t:type p_value: float.\n\n\t\t:param kwargs: the keyword arguments are passed to the matplotlib Ellipse method\n\t\t:type kwargs: dict.\n\n\t\t:returns: matplotlib ellipse object\n\t\t:rtype: Ellipse\n\n\t\t\"\"\"\n\n\t\tif len(parameters)!=2:\n\t\t\traise ValueError(\"You must specify exactly two parameters to draw the ellipse of!\")\n\n\t\t#If the observed feature is not provided, the center of the ellipse is (0,0)\n\t\tif observed_feature is None:\n\t\t\tcenter = (0,0)\n\t\telse:\n\t\t\t#Fit the observed feature and put the center here\n\t\t\tp_fit = self.fit(observed_feature,simulated_features_covariance)\n\t\t\tcenter = tuple(p_fit[parameters])\n\n\t\t#The parameter covariance sets the size and orientation of the ellipse\n\t\tp_cov = self.parameter_covariance(simulated_features_covariance,correct,observed_features_covariance)[parameters].loc[parameters]\n\t\t\n\t\t#Return Ellipse object to user\n\t\treturn self.ellipse(center,p_cov.values,p_value,**kwargs)\n\n\n\t###########################################################################################################################################\n\n\n\tdef reparametrize(self,formatter,*args,**kwargs):\n\n\t\t#Call the parent method\n\t\tsuper(FisherAnalysis,self).reparametrize(formatter,*args,**kwargs)\n\n\t\t#Check that the format of the parameter set is valid\n\t\tself.check()\n\n\nclass FisherPanel(Panel):\n\n\t@property \n\tdef _constructor_sliced(self):\n\t\treturn FisherAnalysis\n\n\n#######################################################\n#############Full analysis#############################\n#######################################################\n\nclass EmulatorSeries(Series):\n\n\t@property\n\tdef _constructor_expanddim(self):\n\t\treturn Emulator\n\nclass Emulator(Analysis):\n\n\t_analysis_type = \"Emulator\"\n\n\t\"\"\"\n\tThe class handler of a full likelihood analysis; the parameter likelihood function is calculated with an interpolation of various kind between simulation points\n\n\t\"\"\"\n\n\t################################################################\n\t##############DataFrame subclassing#############################\n\t################################################################\n\n\t@property \n\tdef _constructor_sliced(self):\n\t\treturn EmulatorSeries\n\n\t@property\n\tdef _constructor_expanddim(self):\n\t\treturn EmulatorPanel\n\n\t##################################\n\t########Constructor###############\n\t##################################\n\n\tdef __init__(self,*args,**kwargs):\n\t\t\n\t\tsuper(Emulator,self).__init__(*args,**kwargs) \n\t\tself._likelihood_function = gaussian_likelihood\n\n\t\tif \"_likelihood_function\" not in self._metadata:\n\t\t\tself._metadata.append(\"_likelihood_function\")\n\n\t#######################################################################################################################################\n\n\tdef approximate_linear(self,center,derivative_precision=0.1):\n\n\t\t\"\"\"\n\t\tConstruct a FisherAnalysis by approximating the Emulator as a linear expansion along a chosen center\n\n\t\t:param center: center point in parameter space\n\t\t:type center: Series\n\n\t\t:param derivative_precision: percentage step for the finite difference derivatives\n\t\t:type derivative_precision: float.\n\n\t\t:returns: linearly approximated Fisher analysis\n\t\t:rtype: FisherAnalysis\n\n\t\t\"\"\"\n\n\t\tnpar = len(self.parameter_names)\n\n\t\t#Construct the parameter set for the Fisher Analysis\n\t\tparameters_fisher = Ensemble(np.zeros((npar+1,npar)),columns=self.parameter_names)\n\n\t\t#Fiducial\n\t\tfor n in range(len(parameters_fisher)):\n\t\t\tparameters_fisher.iloc[n] = center\n\n\t\t#Variations\n\t\tfor n in range(1,len(parameters_fisher)):\n\t\t\tparameters_fisher.iloc[n,n-1] += np.abs(parameters_fisher.iloc[n,n-1])*derivative_precision\n\n\t\t#Predict the features with the emulator, build the Fisher Analysis\n\t\tfeatures = self.predict(parameters_fisher)\n\t\tparameters_fisher.add_name(\"parameters\")\n\n\t\treturn FisherAnalysis.from_features(features,parameters=parameters_fisher)\n\n\t#######################################################################################################################################\n\n\tdef set_likelihood(self,function=None):\n\n\t\t\"\"\"\n\t\tSets the likelihood function to a custom function input by the user: the default is the usual exp(-0.5*chi^2)\n\n\t\t\"\"\"\n\n\t\tassert function is not None\n\t\tself._likelihood_function = function\n\n\tdef train(self,use_parameters=\"all\",method=\"Rbf\",**kwargs):\n\n\t\t\"\"\"\n\t\tBuilds the interpolators for each of the feature bins using a radial basis function approach\n\n\t\t:param use_parameters: which parameters actually vary in the supplied parameter set (it doesn't make sense to interpolate over the constant ones)\n\t\t:type use_parameters: list. or \"all\"\n\n\t\t:param method: interpolation method; can be 'Rbf' or callable. If callable, it must take two arguments, a square distance and a square length smoothing scale\n\t\t:type method: str. or callable\n\n\t\t:param kwargs: keyword arguments to be passed to the interpolator constructor\n\n\t\t\"\"\"\n\n\t\t#input sanity check\n\t\tif use_parameters != \"all\":\n\t\t\tassert type(use_parameters) == list\n\t\t\tused_parameters = self.parameter_set[:,use_parameters]\n\t\telse:\n\t\t\tused_parameters = self.parameter_set\n\n\t\t#Compute total number of feature bins and reshape the training set accordingly\n\t\tif \"_num_bins\" not in self._metadata:\n\t\t\tself._metadata.append(\"_num_bins\")\n\t\tself._num_bins = reduce(mul,self.feature_set.shape[1:])\n\n\t\tflattened_feature_set = self.feature_set.reshape((self.feature_set.shape[0],self._num_bins))\n\n\t\t#Build the interpolator\n\t\tif \"_interpolator\" not in self._metadata:\n\t\t\tself._metadata.append(\"_interpolator\")\n\n\t\tif method==\"Rbf\":\n\n\t\t\t#Scipy Rbf method\n\t\t\tself._interpolator = list()\n\n\t\t\tfor n in range(self._num_bins):\n\t\t\t\tself._interpolator.append(_interpolate_wrapper(interpolate.Rbf,args=(tuple(used_parameters.T) + (flattened_feature_set[:,n],)),kwargs=kwargs))\n\n\t\telse:\n\n\t\t\t#Compute pairwise square distance between points\n\t\t\tdistances = ((used_parameters[None] - used_parameters[:,None])**2).sum(-1)\n\t\t\tepsilon = distances[np.triu_indices(len(distances),k=1)].mean()\n\t\t\tkernel = method(distances,epsilon)\n\t\t\tweights = np.linalg.solve(kernel,self.feature_set)\n\n\t\t\t#Wrap interpolator\n\t\t\tself._interpolator = _function_wrapper(_interpolate_fast,args=[],kwargs={\"parameter_grid\":used_parameters,\"method\":method,\"weights\":weights,\"epsilon\":epsilon})\n\n\n\t###############################################################################################################################################################\n\n\tdef predict(self,parameters,raw=False):\n\n\t\t\"\"\"\n\t\tPredicts the feature at a new point in parameter space using the bin interpolators, trained with the simulated features\n\n\t\t:param parameters: new points in parameter space on which to compute the chi2 statistic; it'a (N,p) array where N is the number of points and p the number of parameters, or array of size p if there is only one point\n\t\t:type parameters: array \n\n\t\t:param raw: if True returns raw numpy arrays\n\t\t:type raw: bool.\n\n\t\t:returns: predicted features\n\t\t:rtype: array or :py:class:`Ensemble`\n\n\t\t\"\"\"\n\n\t\t#If you didn't do training before, train now with the default settings\n\t\tif not hasattr(self,\"_interpolator\"):\n\t\t\tself.train()\n\n\t\t#Cast DataFrames to numpy arrays\n\t\tif isinstance(parameters,pd.DataFrame):\n\t\t\tassert (parameters.columns==self[\"parameters\"].columns).all(),\"Parameters do not match!\"\n\t\t\tparameters = parameters.values\n\t\telif isinstance(parameters,pd.Series):\n\t\t\tassert (parameters.index==self[\"parameters\"].columns).all(),\"Parameters do not match!\"\n\t\t\tparameters = parameters.values\n\n\t\t#####################################\n\t\t#Interpolate to compute the features#\n\t\t#####################################\n\n\t\tinterpolated_feature = _predict(parameters,self._interpolator)\n\n\t\t############################################################################################\n\n\t\t#Return the result\n\t\tif raw:\n\t\t\treturn interpolated_feature\n\t\telse:\n\t\t\tif parameters.ndim==1:\n\t\t\t\treturn Series(interpolated_feature.reshape(self.feature_set.shape[1:]),index=self[self.feature_names].columns)\n\t\t\telse:\n\t\t\t\treturn Ensemble(interpolated_feature.reshape((parameters.shape[0],) + self.feature_set.shape[1:]),columns=self[self.feature_names].columns)\n\n\n\t###############################################################################################################################################################\n\n\n\tdef chi2(self,parameters,observed_feature,features_covariance,correct=None,split_chunks=None,pool=None):\n\n\t\t\"\"\"\n\t\tComputes the chi2 part of the parameter likelihood with the usual sandwich product with the covariance matrix; the model features are computed with the interpolators\n\n\t\t:param parameters: new points in parameter space on which to compute the chi2 statistic\n\t\t:type parameters: (N,p) array where N is the number of points and p the number of parameters\n\n\t\t:param observed_feature: observed feature on which to condition the parameter likelihood\n\t\t:type observed_feature: array\n\n\t\t:param features_covariance: covariance matrix of the features, must be supplied\n\t\t:type features_covariance: array\n\n\t\t:param correct: if not None, correct for the bias in the inverse covariance estimator assuming the covariance was estimated by 'correct' simulations\n\t\t:type correct: int.\n\n\t\t:param split_chunks: if set to an integer bigger than 0, splits the calculation of the chi2 into subsequent chunks, each that takes care of an equal number of points. Each chunk could be taken care of by a different processor\n\t\t:type split_chunks: int.\n\n\t\t:returns: array with the chi2 values, with the same shape of the parameters input\n\n\t\t\"\"\"\n\n\t\t#Sanity checks\n\t\tassert observed_feature is not None \n\t\tassert features_covariance is not None,\"No science without the covariance matrix, you must provide one!\"\n\t\tassert observed_feature.shape == self.feature_set.shape[1:]\n\t\tassert features_covariance.shape == observed_feature.shape * 2\n\n\t\t#If you didn't do training before, train now with the default settings\n\t\tif not hasattr(self,\"_interpolator\"):\n\t\t\tself.train()\n\n\t\t#Reformat the parameter input into a list of chunks\n\t\tif parameters.ndim==1:\n\t\t\tnum_points = 1\n\t\telse:\n\t\t\tnum_points = parameters.shape[0]\n\n\t\tif split_chunks is None:\n\t\t\t\n\t\t\tparameter_chunks = [parameters]\n\t\t\n\t\telif split_chunks > 0:\n\t\t\t\n\t\t\tassert num_points%split_chunks == 0,\"split_chunks must divide exactly the number of points!!\"\n\t\t\tchunk_length = num_points//split_chunks\n\t\t\tparameter_chunks = [ parameters[n*chunk_length:(n+1)*chunk_length] for n in range(split_chunks) ]\n\n\t\telse:\n\n\t\t\traise ValueError(\"split_chunks must be >0!!\")\n\n\t\t#Compute the inverse of the covariance matrix once and for all\n\t\tcovinv = np.linalg.inv(features_covariance)\n\t\tif correct is not None:\n\t\t\tcovinv *= precision_bias_correction(correct,len(covinv))\n\n\t\t#Build the keyword argument dictionary to be passed to the chi2 calculator\n\t\tkwargs = {\"interpolator\":self._interpolator,\"inverse_covariance\":covinv,\"observed_feature\":observed_feature}\n\n\t\t#Hack to make the chi2 pickleable (from emcee)\n\t\tchi2_wrapper = _function_wrapper(chi2,tuple(),kwargs)\n\n\t\t#Finally map chi2 calculator on the list of chunks\n\t\tif pool is not None:\n\t\t\tM = pool.map\n\t\telse:\n\t\t\tM = map\n\t\t\n\t\tchi2_list = list(M(chi2_wrapper,parameter_chunks))\n\n\t\treturn np.array(chi2_list).reshape(num_points)\n\n\n\tdef chi2Contributions(self,parameters,observed_feature,features_covariance,correct=None): \n\n\t\t\"\"\"\n\t\tComputes the individual contributions of each feature bin to the chi2; the model features are computed with the interpolators. The full chi2 is the sum of the individual contributions\n\n\t\t:param parameters: new points in parameter space on which to compute the chi2 statistic\n\t\t:type parameters: (N,p) array where N is the number of points and p the number of parameters\n\n\t\t:param observed_feature: observed feature on which to condition the parameter likelihood\n\t\t:type observed_feature: array\n\n\t\t:param features_covariance: covariance matrix of the features, must be supplied\n\t\t:type features_covariance: array\n\n\t\t:param correct: if not None, correct for the bias in the inverse covariance estimator assuming the covariance was estimated by 'correct' simulations\n\t\t:type correct: int.\n\n\t\t:returns: numpy 2D array with the contributions to the chi2 (off diagonal elements are the contributions of the cross correlation between bins)\n\n\t\t\"\"\"\n\n\t\t#Sanity checks\n\t\tassert observed_feature is not None \n\t\tassert features_covariance is not None,\"No science without the covariance matrix, you must provide one!\"\n\t\tassert observed_feature.shape == self.feature_set.shape[1:]\n\t\tassert features_covariance.shape == observed_feature.shape * 2\n\n\t\t#If you didn't do training before, train now with the default settings\n\t\tif not hasattr(self,\"_interpolator\"):\n\t\t\tself.train()\n\n\t\t#Compute each bin contribution to the chi2\n\t\tresiduals = observed_feature - self.predict(parameters)\n\n\t\t#Compute the inverse covariance\n\t\tcovinv = np.linalg.inv(features_covariance)\n\t\tif correct is not None:\n\t\t\tcovinv *= precision_bias_correction(correct,len(inverse_covariance_dot))\n\n\t\t#Compute the hits map\n\t\treturn np.outer(residuals,residuals) * covinv\n\n\n\n\tdef likelihood(self,chi2_value,**kwargs):\n\n\t\t\"\"\"\n\t\tComputes the likelihood value with the selected likelihood function, given the pre-computed chi2 value\n\n\t\t:param chi2_value: chi squared values \n\t\t:type chi2_value: array\n\n\t\t:param kwargs: keyword arguments to be passed to your likelihood function\n\n\t\t\"\"\"\n\n\t\treturn self._likelihood_function(chi2_value,**kwargs)\n\n\t############################################################################################################################################\n\n\tdef score(self,parameters,observed_feature,method=\"chi2\",**kwargs):\n\n\t\t\"\"\"\n\t\tCompute the score for an observed feature for each combination of the proposed parameters\n\n\t\t:param parameters: parameter combinations to score\n \t\t:type parameters: DataFrame or array\n\n \t\t:param observed_feature: observed feature to score\n \t\t:type observed_feature: Series\n\n \t\t:param method: scoring method to use (defaults to chi2): if callable, must take in the current instance, the parameter array and the observed feature and return a score for each parameter combination\n \t\t:type method: str. or callable\n\n \t\t:param kwargs: keyword arguments passed to the callable method\n \t\t:type kwargs: dict.\n\n\t\t:returns: ensemble with the scores, for each feature\n\t\t:rtype: :py:class:`Ensemble`\n\n\t\t\"\"\"\n\n\t\t#Get the names of the features to use\n\t\tfeature_names = list(observed_feature.index.levels[0])\n\t\ttry:\n\t\t\tfeature_names.remove(\"parameters\")\n\t\texcept ValueError:\n\t\t\tpass\n\n\t\t#Check that the observed feature columns and the Emulator columns correspond\n\t\tfor c in feature_names:\n\t\t\tassert c in self.feature_names,\"Feature '{0}' is not present in the Emulator!\".format(c)\n\n\t\t#Reorder the parameters according to the ones in the Emulator\n\t\tparameters = parameters[self.parameter_names]\n\n\t\t#If the method is chi2, a covariance matrix must be provided\n\t\tif method==\"chi2\":\n\t\t\tassert \"features_covariance\" in kwargs.keys()\n\t\t\tfeatures_covariance = kwargs[\"features_covariance\"]\n\t\t\tdel(kwargs[\"features_covariance\"])\n\t\t\tassert (features_covariance.index==observed_feature.index).all()\n\t\t\tassert (features_covariance.columns==observed_feature.index).all()\n\n\t\t#Build an Ensemble with the parameters\n\t\tscore_ensemble = Ensemble(parameters)\n\n\t\t#For each feature, compute the score\n\t\tfor c in feature_names:\n\n\t\t\t#Isolate the Emulator that concerns this feature only\n\t\t\tsub_emulator = self.features(c)\n\t\t\tsub_emulator_columns = sub_emulator[c].columns\n\n\t\t\t#Isolate the observed sub_feature\n\t\t\tsub_feature = observed_feature[c][sub_emulator_columns].values\n\n\t\t\t#If the method is chi2, use the already implemented version of it\n\t\t\tif method==\"chi2\":\n\t\t\t\tsub_emulator.train()\n\t\t\t\tsub_feature_covariance = features_covariance[c][sub_emulator_columns].loc[c].loc[sub_emulator_columns].values\n\t\t\t\tscore_ensemble[c] = sub_emulator.chi2(parameters=parameters.values,observed_feature=sub_feature,features_covariance=sub_feature_covariance,**kwargs)\n\t\t\telse:\n\t\t\t\tscore_ensemble[c] = method(sub_emulator,parameters.values,sub_feature,**kwargs)\n\n\t\t#Return the score ensemble\n\t\treturn score_ensemble\n\n\t############################################################################################################################################\n\n\tdef sample_posterior(self,observed_feature,sample=\"emcee\",**kwargs):\n\n\t\t\"\"\"\n\t\tSample the parameter posterior distribution\n\n \t\t:param observed_feature: observed feature to score\n \t\t:type observed_feature: Series\n\n \t\t:param sample: posterior sampling method\n \t\t:type sample: str. or callable\n\n \t\t:returns: samples from the posterior distribution\n \t\t:rtype: dict. \n\n\t\t\"\"\"\n\n\t\tif sample==\"emcee\":\n\t\t\tsample = samplers.emcee_sampler\n\n\t\t#Get the names of the features to use\n\t\tfeature_names = list(observed_feature.index.levels[0])\n\t\ttry:\n\t\t\tfeature_names.remove(\"parameters\")\n\t\texcept ValueError:\n\t\t\tpass\n\n\t\t#Check that the observed feature columns and the Emulator columns correspond\n\t\tfor c in feature_names:\n\t\t\tassert c in self.feature_names,\"Feature '{0}' is not present in the Emulator!\".format(c)\n\n\t\t#Check if the user provides a covariance matrix\n\t\tif \"features_covariance\" in kwargs.keys():\n\t\t\tfeatures_covariance = kwargs[\"features_covariance\"]\n\t\t\tdel(kwargs[\"features_covariance\"])\n\t\t\tassert (features_covariance.index==observed_feature.index).all()\n\t\t\tassert (features_covariance.columns==observed_feature.index).all()\n\t\telse:\n\t\t\tfeatures_covariance = None\n\n\t\t#Parameter samples\n\t\tsamples = dict()\n\n\t\t#For each feature, compute the score\n\t\tfor c in feature_names:\n\n\t\t\t#Isolate the Emulator that concerns this feature only\n\t\t\tsub_emulator = self.features(c)\n\t\t\tsub_emulator_columns = sub_emulator[c].columns\n\n\t\t\t#Isolate the observed sub_feature\n\t\t\tsub_feature = observed_feature[c][sub_emulator_columns].values\n\n\t\t\t#Train the emulator\n\t\t\tsub_emulator.train()\n\n\t\t\t#Isolate the sub feature covariance matrix, proceed with the sampling\n\t\t\tif features_covariance is not None:\n\t\t\t\tsub_feature_covariance = features_covariance[c][sub_emulator_columns].loc[c].loc[sub_emulator_columns].values\n\t\t\t\tsamples[c] = sample(emulator=sub_emulator,observed_feature=sub_feature,features_covariance=sub_feature_covariance,**kwargs)\n\t\t\telse:\n\t\t\t\tsamples[c] = sample(emulator=sub_emulator,observed_feature=sub_feature,**kwargs)\n\n\t\t#Done, return the sampled points\n\t\treturn samples\n\t\t\t\t\n\n\t############################################################################################################################################\n\n\tdef set_to_model(self,parameters):\n\n\t\t\"\"\"\n\t\tSet the current model of the emulator to the one specified by the parameter set\n\n\t\t:param parameters: parameters for which the feature will be emulated\n\t\t:type parameters: array.\n\n\t\t\"\"\"\n\n\t\t#assert parameters.shape[0]==self.parameter_set.shape[1]\n\t\t\n\t\t#if not hasattr(self,\"_interpolator\"):\n\t\t#\tself.train()\n\t\t\n\t\t#self._current_model_parameters = parameters\n\t\t#self._current_predicted_feature = self.predict(parameters)\n\t\t#self._current_interpolated_feature = interp1d(self.feature_label,self._current_predicted_feature)\n\n\t\traise NotImplementedError\n\n\n\tdef emulate(self,new_feature_label):\n\n\t\t\"\"\"\n\t\tCompute an emulated feature at the new feature label specified (multipoles, thresholds, ...) for the current model, using a linear interpolation between bins\n\n\t\t:param new_feature_label: new feature label for which you want to emulate the feature\n\t\t:type new_feature_label: array.\n\n\t\t:returns: the emulated feature\n\n\t\t\"\"\" \n\n\t\t#return self._current_interpolated_feature(new_feature_label)\n\t\traise NotImplementedError\n\n\nclass EmulatorPanel(Panel):\n\n\t@property \n\tdef _constructor_sliced(self):\n\t\treturn Emulator\n\n#########################################################################################################################################################################################\n\n###########################################################################\n###########Hack to make scipy interpolate objects pickleable###############\n###########################################################################\n\nclass _interpolate_wrapper(object):\n\n\tdef __init__(self,f,args,kwargs):\n\t\tself.f = f\n\t\tself.args = args\n\t\tself.kwargs = kwargs\n\t\n\tdef __call__(self):\n\t\ttry:\n\t\t\treturn self.f(*self.args,**self.kwargs)\n\t\texcept:\n\t\t\timport traceback\n\t\t\tprint(\"lenstools: Exception while building the interpolators\")\n\t\t\tprint(\" exception:\")\n\t\t\ttraceback.print_exc()\n\t\t\traise \n" ]
[ [ "numpy.dot", "numpy.arctan", "numpy.squeeze", "numpy.zeros_like", "numpy.exp", "numpy.where", "numpy.hstack", "scipy.stats.chi2", "pandas.Index", "numpy.outer", "numpy.zeros", "numpy.isclose", "numpy.linalg.inv", "numpy.atleast_2d", "numpy.linalg.eigh", "numpy.array", "matplotlib.patches.Ellipse", "numpy.linalg.solve", "numpy.abs" ] ]
frehage/AoC-2021
[ "4ab48763a72c24e2ab983ac2263647db03225297" ]
[ "14/solution.py" ]
[ "import pathlib\nimport numpy as np\n\ntest_data = 0\n\noriginal_pattern = \"\"\ninjections = dict()\n\npath = str(pathlib.Path(__file__).parent.resolve())\nwith open(path+\"/data{}.csv\".format(\"_test\" if test_data else \"\"), 'r') as file:\n lines = file.read().splitlines()\n original_pattern = lines[0]\n for line in lines[2:]:\n injections[line[:2]] = line[6]\nprint(\"Pattern:\", original_pattern)\n# for pair, injection in injections.items(): print(pair, \"->\", injection)\n\n\nprint(\"###### TASK 1 ######\")\nsteps = 10\npattern = original_pattern\nfor s in range(steps):\n next_pattern = pattern[0] + \"\".join([injections.get(pattern[i-1]+pattern[i], \"\") + pattern[i] for i in range(1,len(pattern))])\n pattern = next_pattern\npattern_array =np.array(list(pattern)) \nvalues, counts = np.unique(pattern_array, return_counts=True)\nanswer = max(counts)-min(counts)\nprint(\"Answer: \", answer)\n\n\n\nprint(\"###### TASK 2 ######\")\nsteps = 40\npattern = original_pattern\npairs = dict()\nfor i in range(1,len(pattern)):\n pairs[pattern[i-1:i+1]] = pairs.get(pattern[i-1:i+1],0) + 1\nfor _ in range(steps):\n next = dict()\n for pair, count in pairs.items():\n if injection := injections.get(pair, \"\"):\n next[pair[0]+injection] = next.get(pair[0]+injection,0) + count\n next[injection+pair[1]] = next.get(injection+pair[1],0) + count\n else:\n next[pair] = next.get(pair,0) + count\n pairs = next\ncounts = {pattern[0]: 1, pattern[-1]: 1}\nfor pair, count in pairs.items():\n for i in range(2):\n counts[pair[i]] = counts.get(pair[i],0) + count\nanswer = (max(counts.values())-min(counts.values()))//2\nprint(\"Answer: \", answer)" ]
[ [ "numpy.unique" ] ]
sametz/secondorder
[ "ad07f003f2045a474312abfe1510a382ae20d3aa" ]
[ "secondorder/initialize.py" ]
[ "\"\"\"\nThis module uses the default WINDNMR spinsystem variables for 3-spin through\n8-spin second-order calculations, plus the default AB quartet variables for \n2-spin calculations, and creates a list of (frequency, J couplings) tuples. \nThe WINDNMR defaults were chosen because they allow secondorder's output to be \nvisually checked against WINDNMR's output.\n\nThe frequencies v are in numpy arrays.\nThe J couplings are in sparse matrices. J[i,j] corresponds to the coupling\nbetween nuclei i and j, using the same ordering of nuclei as the frequencies\nin v.\nThe list of spinsystems begins with empty tuples. This allows intuitive\naccess to a particular spin system. So, spinsystem[4] is the data for the\n4-spin system.\n\"\"\"\n\nimport numpy as np\nfrom scipy.sparse import lil_matrix\n\n\ndef spin2():\n v = np.array([150-7.5, 150+7.5])\n J = lil_matrix((2, 2))\n J[0, 1] = 12\n J = J + J.T\n return v, J\n\n\ndef spin3():\n v = np.array([115, 140, 190])\n J = lil_matrix((3, 3))\n J[0, 1] = 6\n J[0, 2] = 12\n J[1, 2] = 3\n J = J + J.T\n return v, J\n\n\ndef spin4():\n v = np.array([105, 140, 180, 205])\n J = lil_matrix((4, 4))\n J[0, 1] = -12\n J[0, 2] = 6\n J[0, 3] = 8\n J[1, 2] = 3\n J[1, 3] = 3\n # J[2, 3] = 0\n J = J + J.T\n return v, J\n\n\ndef spin5():\n v = np.array([105, 140, 180, 205, 225])\n J = lil_matrix((5, 5))\n J[0, 1] = -12\n J[0, 2] = 6\n # J[0, 3] = 0\n J[0, 4] = 2\n J[1, 2] = 3\n # J[1, 3] = 0\n J[1, 4] = 14\n J[2, 3] = 1\n # J[2, 4] = 0\n J[3, 4] = 1.5\n J = J + J.T\n return v, J\n\n\ndef spin6():\n v = np.array([105, 140, 180, 205, 225, 235])\n J = lil_matrix((6, 6))\n J[0, 1] = -12\n J[0, 2] = 6\n # J[0, 3] = 0\n J[0, 4] = 2\n # J[0, 5] = 0\n J[1, 2] = 3\n # J[1, 3] = 0\n J[1, 4] = 14\n J[1, 5] = 6\n J[2, 3] = 1\n # J[2, 4] = 0\n J[2, 5] = 3\n J[3, 4] = 1.5\n J[3, 5] = 5\n J[4, 5] = 2\n J = J + J.T\n return v, J\n\n\ndef spin7():\n v = np.array([105, 140, 180, 205, 225, 235, 255])\n J = lil_matrix((7, 7))\n J[0, 1] = -12\n J[0, 2] = 6\n # J[0, 3] = 0\n J[0, 4] = 2\n # J[0, 5] = 0\n # J[0, 6] = 0\n J[1, 2] = 3\n # J[1, 3] = 0\n J[1, 4] = 14\n J[1, 5] = 6\n # J[1, 6] = 0\n J[2, 3] = 1\n # J[2, 4] = 0\n J[2, 5] = 3\n # J[2, 6] = 0\n J[3, 4] = 1.5\n J[3, 5] = 5\n # J[3, 6] = 0\n J[4, 5] = 2\n # J[4, 6] = 0\n J[5, 6] = 2\n J = J + J.T\n return v, J\n\n\ndef spin8():\n v = np.array([85, 120, 160, 185, 205, 215, 235, 260])\n J = lil_matrix((8, 8))\n J[0, 1] = -12\n J[0, 2] = 6\n J[0, 3] = 2\n # J[0, 4] = 0\n # J[0, 5] = 0\n # J[0, 6] = 0\n # J[0, 7] = 0\n # J[1, 2] = 0\n # J[1, 3] = 0\n J[1, 4] = 14\n # J[1, 5] = 0\n # J[1, 6] = 0\n J[1, 7] = 3\n # J[2, 3] = 0\n # J[2, 4] = 0\n J[2, 5] = 3\n # J[2, 6] = 0\n # J[2, 7] = 0\n # J[3, 4] = 0\n J[3, 5] = 5\n # J[3, 6] = 0\n # J[3, 7] = 0\n J[4, 5] = 2\n # J[4, 6] = 0\n # J[4, 7] = 0\n # J[5, 6] = 0\n # J[5, 7] = 0\n J[6, 7] = 12\n J = J + J.T\n return v, J\n\n\ndef getWINDNMRdefault(n):\n \"\"\"\n Fetches the default (frequencies, J) tuple for the n-spin second-order\n simulation.\n Currently returns a frequencies, J tuple where frequencies is a (0,\n n) 2D array (to easily work with main's ArrayBox), and J is a 2D array\n and not a sparse matrix (since sparse matrices are no longer used). Was\n easier to convert the above data this way than to rewrite it all.\n \"\"\"\n spinsystem = [(), (), spin2(), spin3(), spin4(), spin5(), spin6(), spin7(),\n spin8()]\n\n # Changes to modules require frequency to be a (0,n) 2D array, and J to\n # be an array and not a sparse matrix.\n freq, J = spinsystem[n]\n freq2D = np.array([freq]) # converts to 2D array\n J = J.todense()\n\n return freq2D, J\n\n\nif __name__ == '__main__':\n from model.nmrmath import nspinspec\n from model.nmrplot import nmrplot as nmrplt\n\n test_freqs, test_couplings = getWINDNMRdefault(8)\n print(test_freqs)\n print(test_couplings)\n test_spectrum = nspinspec(test_freqs[0], test_couplings)\n nmrplt(test_spectrum, y=25)\n" ]
[ [ "numpy.array", "scipy.sparse.lil_matrix" ] ]
gittripley/incubator-tvm
[ "122a4930f2bf818501d67b755f0cf21c79b85a21" ]
[ "tests/python/relay/test_op_level2.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\" Support level2 operator test cases.\n\"\"\"\nimport numpy as np\nimport tvm\nfrom tvm import autotvm\nfrom tvm import relay\nfrom tvm.relay import transform\nfrom tvm.relay.testing import ctx_list\nfrom tvm.contrib import util\nimport topi.testing\n\ndef run_infer_type(expr):\n mod = relay.Module.from_expr(expr)\n mod = transform.InferType()(mod)\n entry = mod[\"main\"]\n return entry if isinstance(expr, relay.Function) else entry.body\n\ndef test_conv2d_infer_type():\n # symbolic in batch dimension\n n, c, h, w = tvm.var(\"n\"), 10, 224, 224\n x = relay.var(\"x\", relay.ty.TensorType((n, c, h, w), \"float32\"))\n w = relay.var(\"w\")\n y = relay.nn.conv2d(x, w,\n kernel_size=(3, 3),\n padding=(1, 1),\n channels=2)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(\n (n, 2, 224, 224), \"float32\")\n assert yy.args[1].checked_type == relay.TensorType(\n (2, 10, 3, 3), \"float32\")\n\n # infer by shape of w, mixed precision\n n, c, h, w = tvm.var(\"n\"), 10, 224, 224\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"int8\"))\n w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3), \"int8\"))\n y = relay.nn.conv2d(x, w, out_dtype=\"int32\")\n assert \"out_dtype=\\\"int32\\\"\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(\n (n, 2, 222, 222), \"int32\")\n\n # infer shape in case of different dtypes for input and weight.\n n, c, h, w = tvm.var(\"n\"), 10, 224, 224\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"uint8\"))\n w = relay.var(\"w\", relay.TensorType((2, 10, 3, 3), \"int8\"))\n y = relay.nn.conv2d(x, w, out_dtype=\"int32\")\n assert \"out_dtype=\\\"int32\\\"\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(\n (n, 2, 222, 222), \"int32\")\n\n # Infer with a different layout\n n, c, h, w = 4, 32, 224, 224\n x = relay.var(\"x\", relay.TensorType((n//4, c//4, h, w, 4, 4), \"int8\"))\n wt = relay.var(\"w\")\n y = relay.nn.conv2d(x, wt,\n kernel_size=(3, 3),\n padding=(1, 1),\n channels=16,\n data_layout=\"NCHW4n4c\",\n kernel_layout=\"OIHW4o4i\",\n out_dtype=\"int32\")\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(\n (1, 4, 224, 224, 4, 4), \"int32\")\n assert yy.args[1].checked_type == relay.TensorType(\n (4, 8, 3, 3, 4, 4), \"int8\")\n\n # Infer with NHWC\n n, c, h, w = 4, 32, 224, 224\n x = relay.var(\"x\", relay.TensorType((n, h, w, c), \"int8\"))\n wt = relay.var(\"w\")\n y = relay.nn.conv2d(x, wt,\n kernel_size=(3, 3),\n padding=(1, 1),\n channels=16,\n data_layout=\"NHWC\",\n out_dtype=\"int32\")\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(\n (n, h, w, 16), \"int32\")\n\n\ndef test_conv2d_run():\n def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape,\n padding=(1, 1),\n fref=None,\n groups=1,\n dilation=(1, 1),\n except_targets=None,\n **attrs):\n if except_targets is None:\n except_targets = []\n\n x = relay.var(\"x\", shape=dshape, dtype=dtype)\n w = relay.var(\"w\", dtype=dtype)\n y = relay.nn.conv2d(x, w,\n padding=padding,\n dilation=dilation,\n groups=groups,\n **attrs)\n func = relay.Function([x, w], y)\n data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)\n kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)\n dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)\n if fref is None:\n ref_res = topi.testing.conv2d_nchw_python(\n data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding,\n groups=groups)\n else:\n ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))\n\n\n for target, ctx in ctx_list():\n if target in except_targets:\n continue\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(data, kernel)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)\n\n def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape,\n padding=(1, 1),\n groups=1,\n dilation=(1, 1),\n **attrs):\n x = relay.var(\"x\", shape=dshape, dtype=dtype)\n w = relay.var(\"w\", dtype=dtype)\n y = relay.nn.conv2d(x, w,\n padding=padding,\n dilation=dilation,\n groups=groups,\n **attrs)\n func = relay.Function([x, w], y)\n mod = tvm.relay.Module()\n mod[\"main\"] = func\n\n test_schedule='{\"i\": [\"llvm -device=arm_cpu\", \"topi_nn_depthwise_conv2d_nchw\", \\\n [[\"TENSOR\", [1, 512, 32, 32], \"float32\"], \\\n [\"TENSOR\", [512, 1, 3, 3], \"float32\"], \\\n [1, 1], [1, 1], [1, 1], \"float32\"], {}, \\\n [\"depthwise_conv2d_nchw\", [1, 512, 32, 32, \"float32\"], \\\n [512, 1, 3, 3, \"float32\"], [1, 1], [1, 1], [1, 1], \"float32\"], \\\n {\"i\": 743640, \"t\": \"contrib_spatial_pack\", \"c\": null, \\\n \"e\": [[\"tile_co\", \"sp\", [512, 1]], [\"tile_oh\", \"sp\", [8, 1]], \\\n [\"tile_ow\", \"sp\", [1, 8]], \\\n [\"reorder_0\", \"re\", [0, 1, 2, 3, 4, 5, 8, 6, 7]], \\\n [\"reorder_1\", \"re\", [0, 1, 2, 3, 6, 4, 5]], \\\n [\"ann_reduce\", \"an\", [\"unroll\", \"none\"]], \\\n [\"ann_spatial\", \"an\", [\"unroll\", \"unroll\", \"vec\"]], \\\n [\"data_pad_inline\", \"ot\", 4], [\"data_vec_inline\", \"ot\", 1], \\\n [\"conv_inline\", \"ot\", 0]]}], \"r\": [[0.0002933163], \\\n 0, 3.1976189613342285, 1570811630.6058347], \"v\": 0.1}'\n temp = util.tempdir()\n with open(temp.relpath(\"temp.log\"), \"w\") as log_file:\n log_file.write(test_schedule)\n with autotvm.apply_history_best(temp.relpath(\"temp.log\")):\n with relay.build_config(opt_level=3):\n print('Compiling...')\n graph_json, mod, params = tvm.relay.build(mod, target=\"llvm -device=arm_cpu\")\n\n # depthwise conv2d\n dshape = (1, 32, 18, 18)\n kshape = (32, 1, 3, 3)\n run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape,\n padding=(1, 1), channels=32, groups=32, kernel_size=(3 ,3),\n fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw(\n x, w, (1, 1), \"SAME\"))\n\n # depthwise conv2d for arm_cpu\n dshape = (1, 512, 32, 32)\n kshape = (512, 1, 3, 3)\n compile_test_conv2d_arm_cpu(\"float32\", \"float32\", 1, dshape, kshape,\n padding=(1, 1), channels=512, \n groups=512, kernel_size=(3 ,3))\n\n # CUDA is disabled for 'direct' schedule:\n # https://github.com/apache/incubator-tvm/pull/3070#issuecomment-486597553\n # group conv2d\n dshape = (1, 32, 18, 18)\n kshape = (32, 4, 3, 3)\n run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape,\n padding=(1, 1), channels=32, groups=8, kernel_size=(3 ,3),\n except_targets=['cuda'])\n # also group conv2d\n dshape = (1, 32, 18, 18)\n kshape = (64, 1, 3, 3)\n run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape,\n padding=(1, 1), channels=64, groups=32, kernel_size=(3 ,3),\n except_targets=['cuda'])\n\n # normal conv2d\n dshape = (1, 3, 224, 224)\n kshape = (10, 3, 3, 3)\n run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape,\n padding=(1, 1), channels=10, kernel_size=(3 ,3))\n # mixed precision\n run_test_conv2d(\"int8\", \"int32\", 1, dshape, kshape,\n padding=(1, 1), channels=10, kernel_size=(3 ,3))\n kshape = (10, 3, 1, 3)\n # mixed precision.\n run_test_conv2d(\"int8\", \"int32\", 1, dshape, kshape,\n padding=(0, 1), channels=10, kernel_size=(1 ,3))\n # dilated conv2d\n dshape = (1, 3, 18, 18)\n kshape = (10, 3, 3, 3)\n run_test_conv2d(\"float32\", \"float32\", 1, dshape, kshape,\n padding=(1, 1), channels=10, kernel_size=(3 ,3), dilation=(3, 3))\n\ndef test_conv2d_winograd():\n class WinogradFallback(autotvm.FallbackContext):\n def _query_inside(self, target, workload):\n key = (target, workload)\n if key in self.memory:\n return self.memory[key]\n cfg = autotvm.task.space.FallbackConfigEntity()\n cfg.template_key = 'winograd'\n cfg.is_fallback = False\n cfg['tile_b'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])\n cfg['tile_y'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])\n cfg['tile_x'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])\n cfg['tile_rc'] = autotvm.task.space.SplitEntity([-1, 1])\n cfg['auto_unroll_max_setp'] = autotvm.task.space.OtherOptionEntity(1500)\n cfg['unroll_explicit'] = autotvm.task.space.OtherOptionEntity(1)\n self.memory[key] = cfg\n return cfg\n\n def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape,\n padding=(1, 1),\n groups=1,\n dilation=(1, 1),\n **attrs):\n\n x = relay.var(\"x\", shape=dshape, dtype=dtype)\n w = relay.var(\"w\", shape=kshape, dtype=dtype)\n y = relay.nn.conv2d(x, w,\n padding=padding,\n dilation=dilation,\n groups=groups,\n **attrs)\n func = relay.Function([x, w], y)\n mod = relay.Module()\n mod['main'] = func\n mod = relay.transform.InferType()(mod)\n\n data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)\n kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)\n ref_res = topi.testing.conv2d_nchw_python(\n data.astype(out_dtype), kernel.astype(out_dtype), 1, padding,\n groups=groups)\n\n with WinogradFallback(), relay.build_config(opt_level=3):\n for target, ctx in ctx_list():\n if target != 'cuda':\n continue\n params = {'w': tvm.nd.array(kernel)}\n graph, lib, params = relay.build_module.build(mod, target=target, params=params)\n module = tvm.contrib.graph_runtime.create(graph, lib, ctx)\n module.set_input('x', tvm.nd.array(data))\n module.set_input(**params)\n module.run()\n op_res1 = module.get_output(0)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-3, atol=1e-3)\n\n # normal winograd: stride 1, padding 1, kernel 3x3\n dshape = (1, 80, 73, 73)\n kshape = (192, 80, 3, 3)\n run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape,\n padding=(1, 1), channels=192, kernel_size=(3, 3))\n # extended winograd: stride 1, padding N, kernel 3x3\n run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape,\n padding=(0, 0), channels=192, kernel_size=(3, 3))\n run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape,\n padding=(2, 2), channels=192, kernel_size=(3, 3))\n # extended winograd: stride 1, padding N, kernel NxN\n kshape = (192, 80, 7, 7)\n run_test_conv2d_cuda(\"float32\", \"float32\", 1, dshape, kshape,\n padding=(2, 2), channels=192, kernel_size=(7, 7))\n\n\ndef test_conv2d_transpose_infer_type():\n # symbolic in batch dimension\n n, c, h, w = tvm.var(\"n\"), 10, 10, 12\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\"))\n w = relay.var(\"w\", relay.IncompleteType())\n y = relay.nn.conv2d_transpose(x, w,\n kernel_size=(3, 3),\n padding=(1, 1),\n channels=15)\n assert \"channels=15\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(\n (n, 15, 10, 12), \"float32\")\n assert yy.args[1].checked_type == relay.TensorType(\n (10, 15, 3, 3), \"float32\")\n\n # infer by shape of w, mixed precision\n n, c, h, w = tvm.var(\"n\"), 10, 10, 12\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\"))\n w = relay.var(\"w\", relay.TensorType((12, 11, 5, 5), \"float32\"))\n y = relay.nn.conv2d_transpose(x, w,\n output_padding=(1, 1),\n channels=11,\n data_layout=\"NHWC\")\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(\n (n, 15, 15, 11), \"float32\")\n\n\ndef test_conv2d_transpose_run():\n dshape = (1, 3, 18, 18)\n kshape = (3, 10, 3, 3)\n oshape = (1, 10, 37, 37)\n x = relay.var(\"x\", shape=dshape)\n w = relay.var(\"w\")\n y = relay.nn.conv2d_transpose(x, w,\n channels=10, kernel_size=(3,3), strides=(2,2),\n padding=(1,1), output_padding=(2, 2))\n func = relay.Function([x, w], y)\n dtype = \"float32\"\n data = np.random.uniform(size=dshape).astype(dtype)\n kernel = np.random.uniform(size=kshape).astype(dtype)\n c_np = topi.testing.conv2d_transpose_nchw_python(\n data, kernel, 2, 1)\n d_np = np.zeros(shape=oshape)\n d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np\n ref_res = d_np\n\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(data, kernel)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)\n\n\n\ndef test_upsampling_infer_type():\n n, c , h, w = tvm.var(\"n\"), tvm.var(\"c\"), tvm.var(\"h\"), tvm.var(\"w\")\n scale = tvm.const(2.0, \"float64\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\"))\n y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\")\n \"method=\\\"BINLINEAR\\\"\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast(\"int32\", tvm.round(h*scale)),\n tvm.expr.Cast(\"int32\", tvm.round(w*scale))),\n \"float32\")\n n, c = tvm.var(\"n\"), tvm.var(\"c\")\n x = relay.var(\"x\", relay.TensorType((n, c, 100, 200), \"float32\"))\n y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout=\"NCHW\", method=\"bilinear\")\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, 200, 400), \"float32\")\n\n\ndef _test_pool2d(opfunc, reffunc):\n n, c, h, w = tvm.var(\"n\"), 10, 224, 224\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\"))\n y = opfunc(x, pool_size=(1, 1))\n assert \"pool_size=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, 10, 224, 224), \"float32\")\n # test execution\n dtype = \"float32\"\n dshape = (1, 3, 28, 28)\n x = relay.var(\"x\", shape=dshape)\n y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))\n func = relay.Function([x], y)\n data = np.random.uniform(size=dshape).astype(dtype)\n ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5))\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)\n\ndef _test_pool2d_int(opfunc, reffunc, dtype):\n n, c, h, w = tvm.var(\"n\"), 10, 224, 224\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n y = opfunc(x, pool_size=(1, 1))\n assert \"pool_size=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, 10, 224, 224), dtype)\n # test execution\n dtype = \"int32\"\n dshape = (1, 3, 28, 28)\n x = relay.var(\"x\", shape=dshape, dtype=dtype)\n y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))\n func = relay.Function([x], y)\n data = np.random.random_integers(low=-128, high=128, size=dshape)\n ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5)).astype(dtype)\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)\n\ndef _test_global_pool2d(opfunc, reffunc):\n n, c, h, w = tvm.var(\"n\"), tvm.var(\"c\"), 224, 224\n x = relay.var(\"x\", relay.TensorType((n, h, w, c), \"float32\"))\n y = opfunc(x, layout=\"NHWC\")\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, 1, 1, c), \"float32\")\n\n n, c, h, w = tvm.var(\"n\"), tvm.var(\"c\"), tvm.var(\"h\"), tvm.var(\"w\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"float32\"))\n y = opfunc(x)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, 1, 1), \"float32\")\n # test execution\n dtype = \"float32\"\n dshape = (1, 1024, 7, 7)\n x = relay.var(\"x\", shape=dshape)\n y = opfunc(x)\n func = relay.Function([x], y)\n data = np.random.uniform(size=dshape).astype(dtype)\n ref_res = reffunc(data, axis=(2,3), keepdims=True)\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)\n\n\ndef test_pool2d():\n _test_pool2d(relay.nn.max_pool2d, np.max)\n _test_pool2d(relay.nn.avg_pool2d, np.mean)\n _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'int32')\n _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'uint16')\n _test_global_pool2d(relay.nn.global_max_pool2d, np.max)\n _test_global_pool2d(relay.nn.global_avg_pool2d, np.mean)\n\n\ndef test_avg_pool2d_no_count_pad():\n kh, kw = (4, 4)\n sh, sw = (2, 2)\n ph, pw = (2, 2)\n n = 1\n (ic, ih, iw) = (3, 28, 28)\n (oc, oh, ow) = (3, 15, 15)\n dshape = (n, ic, ih, iw)\n x = relay.var(\"x\", shape=dshape)\n y = relay.nn.avg_pool2d(x,\n pool_size=(kh, kw),\n strides=(sw, sw),\n padding=(ph, pw),\n count_include_pad=False)\n func = relay.Function([x], y)\n dtype = \"float32\"\n a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)\n pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype)\n no_zero = (range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw)))\n pad_np[np.ix_(*no_zero)] = a_np\n b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)\n for i in range(oh):\n for j in range(ow):\n pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3))\n b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw],\n axis=(2,3)) / np.maximum(pad_count, 1)\n ref_res = np.maximum(b_np, 0.0)\n data = a_np\n\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)\n\ndef test_flatten_infer_type():\n d1, d2, d3, d4 = tvm.var(\"d1\"), tvm.var(\"d2\"), tvm.var(\"d3\"), tvm.var(\"d4\")\n x = relay.var(\"x\", relay.TensorType((d1, d2, d3, d4), \"float32\"))\n y = relay.nn.batch_flatten(x)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), \"float32\")\n\n x = relay.var(\"x\", relay.TensorType((3, 2, 4, 3), \"float32\"))\n y = relay.nn.batch_flatten(x)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((3, 24), \"float32\")\n\n x = relay.var(\"x\", relay.TensorType((d1, 2, d3, 3), \"float32\"))\n y = relay.nn.batch_flatten(x)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), \"float32\")\n\n shape = (1, 5, 10, 10)\n o_shape = (1, 500)\n dtype = \"float32\"\n x = relay.var(\"x\", relay.TensorType(shape, dtype))\n z = relay.nn.batch_flatten(x)\n yy = run_infer_type(z)\n assert yy.checked_type == relay.TensorType(o_shape, dtype)\n func = relay.Function([x], z)\n x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)\n ref_res = x_data.flatten().reshape(o_shape)\n\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)\n op_res2 = intrp2.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)\n\ndef test_pad_infer_type():\n # entirely concrete case\n n, c, h, w = 1, 2, 3, 4\n t = relay.var(\"t\", relay.TensorType((n, c, h, w), \"float32\"))\n y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4)))\n \"pad_width=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((3, 6, 9, 12), \"float32\")\n\n # some symbolic values\n n, c, h, w = tvm.var(\"n\"), 2, 3, tvm.var(\"w\")\n t = relay.var(\"t\", relay.TensorType((n, c, h, w), \"float32\"))\n y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4)))\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n + 2, 6, 9, w + 8), \"float32\")\n\ndef test_pad_run():\n def _test_run(dtype):\n dshape = (4, 10, 7, 7)\n x = relay.var(\"x\", shape=dshape)\n y = relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4, 4)))\n func = relay.Function([x], y)\n data = np.random.uniform(size=dshape).astype(dtype)\n ref_res = np.pad(data, ((1, 1), (2, 2), (3, 3), (4, 4)), 'constant')\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)\n\n _test_run('float32')\n _test_run('int32')\n\ndef test_lrn():\n n, c , h, w = tvm.var(\"n\"), tvm.var(\"c\"), tvm.var(\"h\"), tvm.var(\"w\")\n x = relay.var(\"x\", shape=(n, c , h, w))\n y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75)\n \"alpha=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c , h, w))\n\n shape = (1, 5, 10, 10)\n dtype = \"float32\"\n x = relay.var(\"x\", relay.TensorType(shape, dtype))\n size=5\n axis=1\n bias=0.5\n alpha=.00001\n beta=0.75\n z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)\n yy = run_infer_type(z)\n assert yy.checked_type == relay.TensorType(shape, dtype)\n func = relay.Function([x], z)\n x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)\n ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta)\n\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)\n op_res2 = intrp2.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)\n\ndef test_l2_normalize():\n n, c , h, w = tvm.var(\"n\"), tvm.var(\"c\"), tvm.var(\"h\"), tvm.var(\"w\")\n x = relay.var(\"x\", shape=(n, c , h, w))\n y = relay.nn.l2_normalize(x, eps=0.001, axis=[1])\n \"axis=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c , h, w))\n\n shape = (1, 5, 10, 10)\n dtype = \"float32\"\n x = relay.var(\"x\", relay.TensorType(shape, dtype))\n eps=0.001\n axis=1\n z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis])\n yy = run_infer_type(z)\n assert yy.checked_type == relay.TensorType(shape, dtype)\n func = relay.Function([x], z)\n x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)\n ref_res = topi.testing.l2_normalize_python(x_data, eps, axis)\n\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)\n op_res2 = intrp2.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)\n\n\ndef batch_flatten(data):\n shape = data.shape\n target_dim = 1\n for i in range(len(shape) - 1):\n target_dim = target_dim * shape[i + 1]\n return np.reshape(data, (shape[0], target_dim))\n\n\ndef test_batch_flatten():\n t1 = relay.TensorType((5, 10, 5))\n x = relay.Var(\"x\", t1)\n func = relay.Function([x], relay.nn.batch_flatten(x))\n\n data = np.random.rand(5, 10, 5).astype(t1.dtype)\n ref_res = batch_flatten(data)\n for target, ctx in ctx_list():\n intrp = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(data)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)\n\n\ndef _test_upsampling(layout, method, align_corners=False):\n n, c, h, w = tvm.var(\"n\"), 16, 32, 32\n scale_h = 2.0\n scale_w = 2.0\n dtype = \"float32\"\n def get_shape():\n if layout == \"NCHW\":\n return (c, h, w), (c, int(round(h*scale_h)), int(round(w*scale_w)))\n else:\n return (h, w, c), (int(round(h*scale_h)), int(round(w*scale_w)), c)\n ishape, oshape = get_shape()\n x = relay.var(\"x\", relay.TensorType((n,) + ishape, dtype))\n y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout,\n method=method, align_corners=align_corners)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)\n dshape = (1,) + ishape\n x = relay.var(\"x\", shape=dshape)\n y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout,\n method=method, align_corners=align_corners)\n func = relay.Function([x], y)\n data = np.random.uniform(size=dshape).astype(dtype)\n if method == \"nearest_neighbor\":\n ref = topi.testing.upsampling_python(data, (scale_h, scale_w), layout)\n else:\n ref = topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)),\n int(round(w*scale_w))), layout)\n for target, ctx in ctx_list():\n executor = relay.create_executor(\"graph\", ctx=ctx, target=target)\n out = executor.evaluate(func)(data)\n tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5)\n\n\ndef test_upsampling():\n _test_upsampling(\"NCHW\", \"nearest_neighbor\")\n _test_upsampling(\"NCHW\", \"bilinear\", True)\n _test_upsampling(\"NHWC\", \"nearest_neighbor\")\n _test_upsampling(\"NHWC\", \"bilinear\", True)\n\n\ndef test_conv2d_int8_intrinsics():\n def _compile(ic, oc, target, data_layout, kernel_layout, dtypes):\n input_dtype, weight_dtype, output_dtype = dtypes\n\n n, h, w, ch, cw = 1, 64, 64, 3, 3\n if data_layout == 'NCHW':\n data_shape = (n, ic, h, w)\n x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype))\n elif data_layout == 'NHWC':\n data_shape = (n, h, w, ic)\n x = relay.var(\"x\", relay.TensorType(data_shape, input_dtype))\n else:\n raise ValueError('Not supported')\n\n if kernel_layout == 'OIHW':\n kernel_shape = (oc, ic, ch, cw)\n elif kernel_layout == 'HWIO':\n kernel_shape = (ch, cw, ic, oc)\n else:\n raise ValueError('Not supported')\n\n weight = relay.var(\"weight\", relay.TensorType(kernel_shape, weight_dtype))\n y = relay.nn.conv2d(x, weight,\n kernel_size=(ch, cw),\n channels=oc,\n padding=(1, 1),\n dilation=(1, 1),\n data_layout=data_layout,\n kernel_layout=kernel_layout,\n out_dtype=output_dtype)\n func = relay.Function([x, weight], y)\n wdata = np.random.rand(*kernel_shape) * 10\n parameters = {\"weight\": tvm.nd.array(wdata.astype(weight_dtype))}\n\n with relay.build_config(opt_level=3):\n graph, lib, params = relay.build(func, target, params=parameters)\n\n assembly = lib.get_source(\"asm\")\n return assembly\n\n def _has_fast_int8_instructions(asm, target):\n if 'skylake-avx512' in target:\n return \"pmaddubs\" in asm\n elif 'cascadelake' in target:\n return \"vpdpbusd\" in asm\n else:\n assert False, \"Target should be Skylake or Cascadelake\"\n\n # compile conv2d for x86 (skylake, cascadelake) and test assembly contains *pmadd* instructions\n targets = [\"llvm -mcpu=skylake-avx512\", \"llvm -mcpu=cascadelake\"]\n llvm_version = tvm.codegen.llvm_version_major()\n for target in targets:\n if llvm_version >= 8:\n dtypes = ('uint8', 'int8', 'int32')\n # Sweep the input channels to check int8 robustness\n # Input channels should be a multiple of 4 internally.\n for ic in [1, 4, 6]:\n asm = _compile(ic=ic, oc=16, target=target, data_layout=\"NCHW\",\n kernel_layout='OIHW',\n dtypes=dtypes)\n assert _has_fast_int8_instructions(asm, target)\n\n for ic in [1, 4, 6]:\n asm = _compile(ic=ic, oc=16, target=target, data_layout=\"NHWC\",\n kernel_layout='HWIO',\n dtypes=dtypes)\n assert _has_fast_int8_instructions(asm, target)\n\n # Sweep the output channels to check int8 robustness\n # Output channels should be a multiple of 16 internally.\n for oc in [4, 16, 20]:\n asm = _compile(ic=8, oc=oc, target=target, data_layout=\"NCHW\",\n kernel_layout='OIHW',\n dtypes=dtypes)\n assert _has_fast_int8_instructions(asm, target)\n\n for oc in [4, 16, 20]:\n asm = _compile(ic=8, oc=oc, target=target, data_layout=\"NHWC\",\n kernel_layout='HWIO',\n dtypes=dtypes)\n assert _has_fast_int8_instructions(asm, target)\n\n # Check that both non-divisible oc and ic work\n asm = _compile(ic=17, oc=29, target=target, data_layout=\"NCHW\", kernel_layout='OIHW',\n dtypes=dtypes)\n assert _has_fast_int8_instructions(asm, target)\n\n asm = _compile(ic=17, oc=29, target=target, data_layout=\"NHWC\", kernel_layout='HWIO',\n dtypes=dtypes)\n assert _has_fast_int8_instructions(asm, target)\n\n # Check that int8 x int8 goes through legalization so that fast instructions can be picked up.\n for target in targets:\n if llvm_version >= 8:\n dtypes = (('int8', 'int8', 'int32'))\n # Check that both non-divisible oc and ic work\n asm = _compile(ic=17, oc=29, target=target, data_layout=\"NCHW\", kernel_layout='OIHW',\n dtypes=dtypes)\n assert _has_fast_int8_instructions(asm, target)\n\n asm = _compile(ic=17, oc=29, target=target, data_layout=\"NHWC\", kernel_layout='HWIO',\n dtypes=dtypes)\n assert _has_fast_int8_instructions(asm, target)\n\n # Ensure that code is generated when datatypes are not HW supported.\n dtypes = ('uint8', 'uint8', 'int32')\n asm = _compile(ic=16, oc=32, target=target, data_layout=\"NHWC\", kernel_layout='HWIO',\n dtypes=dtypes)\n # Check that intrinisic is not present in the assembly.\n assert not _has_fast_int8_instructions(asm, target)\n\n # Check that a vectorized instruction is generated for older Intel\n # generations, because we default to NCHWc layout.\n target = \"llvm -mcpu=core-avx2\"\n fast_int8_dtypes = ('uint8', 'int8', 'int32')\n asm = _compile(ic=16, oc=32, target=target, data_layout=\"NCHW\", kernel_layout='OIHW',\n dtypes=fast_int8_dtypes)\n # Check that vector int mult and add instructions are generated.\n assert \"vpmulld\" in asm and \"vpadd\" in asm\n\n\ndef test_bitserial_conv2d_infer_type():\n # Basic shape test with ambiguous batch.\n n, c, h, w = tvm.var(\"n\"), 32, 224, 224\n x = relay.var(\"x\", relay.ty.TensorType((n, c, h, w), \"int16\"))\n w = relay.var(\"w\", relay.ty.TensorType((32, 32, 3, 3), \"int16\"))\n y = relay.nn.bitserial_conv2d(\n x, w, kernel_size=(3, 3), padding=(0, 0), channels=32)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(\n (n, 32, 222, 222), \"int16\")\n\n\ndef test_bitpack_infer_type():\n # Test axis packing shape inference.\n o, i, h, w = 32, 32, 128, 128\n x = relay.var(\"x\", relay.ty.TensorType((o, i, h, w), \"int16\"))\n y = relay.nn.bitpack(x, bit_axis=4, pack_axis=1, pack_type='uint16', bits=1)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(\n (32, 2, 128, 128, 1), \"uint16\")\n\n\nif __name__ == \"__main__\":\n test_pool2d()\n test_avg_pool2d_no_count_pad()\n test_lrn()\n test_l2_normalize()\n test_conv2d_infer_type()\n test_bitpack_infer_type()\n test_upsampling_infer_type()\n test_flatten_infer_type()\n test_pad_infer_type()\n test_pad_run()\n test_conv2d_transpose_infer_type()\n test_conv2d_transpose_run()\n test_conv2d_run()\n test_conv2d_winograd()\n test_bitserial_conv2d_infer_type()\n test_batch_flatten()\n test_upsampling()\n test_conv2d_int8_intrinsics()\n" ]
[ [ "numpy.ix_", "numpy.maximum", "numpy.pad", "numpy.reshape", "numpy.random.random_integers", "numpy.random.rand", "numpy.random.uniform", "numpy.zeros", "numpy.sum" ] ]
mhu-coder/AsSteroid
[ "56dd2b81bb16c1f081b0b91e3bbb8b29dd587dbd" ]
[ "asteroid/masknn/norms.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nEPS = 1e-8\n\n\nclass _LayerNorm(nn.Module):\n \"\"\"Layer Normalization base class.\"\"\"\n def __init__(self, channel_size):\n super(_LayerNorm, self).__init__()\n self.channel_size = channel_size\n self.gamma = nn.Parameter(torch.ones(channel_size),\n requires_grad=True)\n self.beta = nn.Parameter(torch.zeros(channel_size),\n requires_grad=True)\n\n def apply_gain_and_bias(self, normed_x):\n \"\"\" Assumes input of size `[batch, chanel, *]`. \"\"\"\n return (self.gamma * normed_x.transpose(1, -1) +\n self.beta).transpose(1, -1)\n\n\nclass GlobLN(_LayerNorm):\n \"\"\"Global Layer Normalization (globLN).\"\"\"\n def forward(self, x):\n \"\"\" Applies forward pass.\n \n Works for any input size > 2D.\n\n Args:\n x (:class:`torch.Tensor`): Shape `[batch, chan, *]`\n\n Returns:\n :class:`torch.Tensor`: gLN_x `[batch, chan, *]`\n \"\"\"\n dims = list(range(1, len(x.shape)))\n mean = x.mean(dim=dims, keepdim=True)\n var = torch.pow(x - mean, 2).mean(dim=dims, keepdim=True)\n return self.apply_gain_and_bias((x - mean) / (var + EPS).sqrt())\n\n\nclass ChanLN(_LayerNorm):\n \"\"\"Channel-wise Layer Normalization (chanLN).\"\"\"\n def forward(self, x):\n \"\"\" Applies forward pass.\n \n Works for any input size > 2D.\n\n Args:\n x (:class:`torch.Tensor`): `[batch, chan, *]`\n\n Returns:\n :class:`torch.Tensor`: chanLN_x `[batch, chan, *]`\n \"\"\"\n mean = torch.mean(x, dim=1, keepdim=True)\n var = torch.var(x, dim=1, keepdim=True, unbiased=False)\n return self.apply_gain_and_bias((x - mean) / (var + EPS).sqrt())\n\n\nclass CumLN(_LayerNorm):\n \"\"\"Cumulative Global layer normalization(cumLN).\"\"\"\n def forward(self, x):\n \"\"\"\n\n Args:\n x (:class:`torch.Tensor`): Shape `[batch, channels, length]`\n Returns:\n :class:`torch.Tensor`: cumLN_x `[batch, channels, length]`\n \"\"\"\n batch, chan, spec_len = x.size()\n cum_sum = torch.cumsum(x.sum(1, keepdim=True), dim=-1)\n cum_pow_sum = torch.cumsum(x.pow(2).sum(1, keepdim=True), dim=-1)\n cnt = torch.arange(start=chan, end=chan*(spec_len+1),\n step=chan, dtype=x.dtype).view(1, 1, -1)\n cum_mean = cum_sum / cnt\n cum_var = cum_pow_sum - cum_mean.pow(2)\n return self.apply_gain_and_bias((x - cum_mean) / (cum_var + EPS).sqrt())\n\n\nclass FeatsGlobLN(_LayerNorm):\n \"\"\"feature-wise global Layer Normalization (FeatsGlobLN).\n Applies normalization over frames for each channel.\"\"\"\n\n def forward(self, x):\n \"\"\" Applies forward pass.\n\n Works for any input size > 2D.\n\n Args:\n x (:class:`torch.Tensor`): `[batch, chan, time]`\n\n Returns:\n :class:`torch.Tensor`: chanLN_x `[batch, chan, time]`\n \"\"\"\n\n stop = len(x.size())\n dims = list(range(2, stop))\n\n mean = torch.mean(x, dim=dims, keepdim=True)\n var = torch.var(x, dim=dims, keepdim=True, unbiased=False)\n return self.apply_gain_and_bias((x - mean) / (var + EPS).sqrt())\n\n\nclass BatchNorm(_BatchNorm):\n \"\"\"Wrapper class for pytorch BatchNorm1D and BatchNorm2D\"\"\"\n def _check_input_dim(self, input):\n if input.dim() < 2 or input.dim() > 4:\n raise ValueError('expected 4D or 3D input (got {}D input)'\n .format(input.dim()))\n\n\n# Aliases.\ngLN = GlobLN\nfgLN = FeatsGlobLN\ncLN = ChanLN\ncgLN = CumLN\nbN = BatchNorm\n\n\ndef get(identifier):\n \"\"\" Returns a norm class from a string. Returns its input if it\n is callable (already a :class:`._LayerNorm` for example).\n\n Args:\n identifier (str or Callable or None): the norm identifier.\n\n Returns:\n :class:`._LayerNorm` or None\n \"\"\"\n if identifier is None:\n return None\n elif callable(identifier):\n return identifier\n elif isinstance(identifier, str):\n cls = globals().get(identifier)\n if cls is None:\n raise ValueError('Could not interpret normalization identifier: ' +\n str(identifier))\n return cls\n else:\n raise ValueError('Could not interpret normalization identifier: ' +\n str(identifier))\n" ]
[ [ "torch.mean", "torch.ones", "torch.zeros", "torch.arange", "torch.var", "torch.pow" ] ]
vid-nath/w210-capstone
[ "e857d371cc1f57cbf0fcf4a0a5a26b585b862707" ]
[ "recommender_bgguser.py" ]
[ "import json\nimport csv\nimport pandas as pd\nimport turicreate as tc\n'''\nrecommend based on games provided in questionnaire and filter recommended games by questionnaire answers,\ninput: json file get from webhook\noutput: json with game_id and confidence_score \n'''\n\n\ndef recommender_bgguser(dataset):\n MODEL_PATH = 'game_rec_model_750_bgguser'\n MODEL = tc.load_model(MODEL_PATH)\n dataset_json=json.dumps(dataset)\n new_obs_data = json.loads(dataset_json)\n new_obs_data_new = [new_obs_data[\"user_id\"]]\n filter_condt = tc.SFrame({\"age_min\": [new_obs_data[\"age\"][\"min\"]],\n \"age_max\": [new_obs_data[\"age\"][\"max\"]],\n \"num_players_min\": [new_obs_data[\"num_players\"][\"min\"]],\n \"num_players_max\": [new_obs_data[\"num_players\"][\"max\"]],\n \"play_time_min\": [new_obs_data[\"play_time\"][\"min\"]],\n \"play_time_max\": [new_obs_data[\"play_time\"][\"max\"]]})\n\n # Load in the model from a saved bin file, then read in the game data adn combine it with the passed in data.\n df_items = pd.read_csv('data/game_info_750.csv')\n df_items = df_items.fillna(0)\n rec_items = MODEL.recommend(new_obs_data_new, k=50)\n # Select 50 recommended games' info.\n df_rec_game_info = df_items.loc[df_items['game_id'].isin(rec_items['game_id'])]\n # Filter out game based on user answers.\n df_items_filter=df_rec_game_info[(df_rec_game_info['age_min'] >= filter_condt['age_min'])&\n (df_rec_game_info['min_ppl'] <= filter_condt['num_players_min']) &\n (df_rec_game_info['avg_time']>= filter_condt['play_time_min'])&(df_rec_game_info['avg_time']<= filter_condt['play_time_max'])]\n# # Convert rec_items to a dataframe.\n df_rec_items = rec_items.to_dataframe()\n \n# # Output the top 5 games.\n output = df_rec_items.loc[df_rec_items['game_id'].isin(df_items_filter.game_id)].sort_values('score', ascending=False).head(5)\n output_name=df_items_filter[df_items_filter['game_id'].isin(output['game_id'])][['game_id','game_title']]\n output=pd.merge(output,output_name, on='game_id')\n\n output['level'] = output['score'].where(~(output['score']>=9),\"Extremely fits your taste\")\n output['level'] = output['level'].where(~((output['score']>=8)&(output['score']<9)),\"Very much fits your taste\")\n output['level'] = output['level'].where(~(output['score']<8),\"Fits your taste\")\n json_output=json.dumps({\"game_id\": list(output[\"game_id\"]),\"game_name\": list(output[\"game_title\"]), \"level\": list(output[\"level\"])})\n return json_output\n" ]
[ [ "pandas.merge", "pandas.read_csv" ] ]
almajo/allRank
[ "845c191ed00e112351437c8884cbe5573def9531" ]
[ "allrank/click_models/duplicate_aware.py" ]
[ "from typing import Tuple, Union\n\nimport numpy as np\nimport torch\nfrom scipy.spatial.distance import cdist\n\nfrom allrank.click_models.base import ClickModel\n\n\nclass EverythingButDuplicatesClickModel(ClickModel):\n \"\"\"\n This ClickModel clicks on every document, which was not previously clicked,\n if the distance between this document and any previous is larger than given margin in given metric\n \"\"\"\n\n def __init__(self, duplicate_margin: float = 0, metric: str = \"euclidean\"):\n \"\"\"\n\n :param duplicate_margin: a margin to tell whether a pair of documents is treated as a duplicate.\n If the distance is less than or equal to this value - this marks a duplicate\n :param metric: a metric in which pairwise distances are calculated\n (metric must be supported by `scipy.spatial.distance.cdist`)\n \"\"\"\n self.duplicate_margin = duplicate_margin\n self.metric = metric\n\n def click(self, documents: Tuple[torch.Tensor, Union[torch.Tensor, np.ndarray]]) -> np.ndarray:\n X, y = documents\n dist = cdist(X, X, metric=self.metric)\n dist = np.triu(dist, k=1)\n np.fill_diagonal(dist, np.inf)\n indices = np.tril_indices(dist.shape[0])\n dist[indices] = np.inf\n return 1 * (dist > self.duplicate_margin).min(0)\n" ]
[ [ "numpy.tril_indices", "numpy.triu", "scipy.spatial.distance.cdist", "numpy.fill_diagonal" ] ]
AbhishekSalian/DCGAN
[ "0496b58adf59f79dffaa688b584223172748a4c3" ]
[ "generator.py" ]
[ "import torch\nfrom torch import nn\n\n\nclass Generator(nn.Module):\n\n def __init__(self,\n noise_dim=10,\n img_channels=1,\n hidden_dim=64):\n\n super(Generator, self).__init__()\n self.noise_dim = noise_dim\n\n self.gen = nn.Sequential(\n nn.ConvTranspose2d(noise_dim, hidden_dim*4,\n kernel_size=3, stride=2),\n nn.BatchNorm2d(hidden_dim*4),\n nn.ReLU(),\n\n nn.ConvTranspose2d(hidden_dim*4, hidden_dim*2,\n kernel_size=4, stride=1),\n nn.BatchNorm2d(hidden_dim*2),\n nn.ReLU(),\n\n nn.ConvTranspose2d(hidden_dim*2, hidden_dim,\n kernel_size=3, stride=2),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU(),\n\n nn.ConvTranspose2d(hidden_dim, img_channels, kernel_size=4, stride=2),\n nn.Tanh()\n )\n\n def forward(self, noise):\n x = noise.view(len(noise), self.noise_dim, 1, 1)\n return self.gen(x)\n" ]
[ [ "torch.nn.ReLU", "torch.nn.Tanh", "torch.nn.ConvTranspose2d", "torch.nn.BatchNorm2d" ] ]
devangi2000/GAN-Colorizer
[ "79f335dd3d225825f2c2b2873a923797338194cf" ]
[ "dataset.py" ]
[ "# on colab :\r\n\r\n#!pip install fastai --upgrade \r\n# from fastai.data.external import untar_data, URLs\r\n# coco_path = untar_data(URLs.COCO_SAMPLE)\r\n# coco_path = str(coco_path) + \"/train_sample\"\r\n# use_colab = True\r\n\r\nimport os\r\nimport glob\r\nimport time\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom pathlib import Path\r\nfrom tqdm.notebook import tqdm\r\nimport matplotlib.pyplot as plt\r\nfrom skimage.color import rgb2lab, lab2rgb\r\n\r\nimport torch\r\nfrom torch import nn, optim\r\nfrom torchvision import transforms\r\nfrom torchvision.utils import make_grid\r\nfrom torch.utils.data import Dataset, DataLoader\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\nuse_colab = None\r\n\r\nif use_colab == True:\r\n path = coco_path\r\nelse:\r\n path = \"Path to the dataset\"\r\n \r\npaths = glob.glob(path + \"/*.jpg\") # Grabbing all the image file names\r\nnp.random.seed(123)\r\npaths_subset = np.random.choice(paths, 10_000, replace=False) # choosing 1000 images randomly\r\nrand_idxs = np.random.permutation(10_000)\r\ntrain_idxs = rand_idxs[:8000] # choosing the first 8000 as training set\r\nval_idxs = rand_idxs[8000:] # choosing last 2000 as validation set\r\ntrain_paths = paths_subset[train_idxs]\r\nval_paths = paths_subset[val_idxs]\r\nprint(len(train_paths), len(val_paths))\r\n\r\n_, axes = plt.subplots(4, 4, figsize=(10, 10))\r\nfor ax, img_path in zip(axes.flatten(), train_paths):\r\n ax.imshow(Image.open(img_path))\r\n ax.axis(\"off\")\r\n\r\nSIZE = 256\r\nclass ColorizationDataset(Dataset):\r\n def __init__(self, paths, split='train'):\r\n if split == 'train':\r\n self.transforms = transforms.Compose([\r\n transforms.Resize((SIZE, SIZE), Image.BICUBIC),\r\n transforms.RandomHorizontalFlip(), # A little data augmentation!\r\n ])\r\n elif split == 'val':\r\n self.transforms = transforms.Resize((SIZE, SIZE), Image.BICUBIC)\r\n \r\n self.split = split\r\n self.size = SIZE\r\n self.paths = paths\r\n \r\n def __getitem__(self, idx):\r\n img = Image.open(self.paths[idx]).convert(\"RGB\")\r\n img = self.transforms(img)\r\n img = np.array(img)\r\n img_lab = rgb2lab(img).astype(\"float32\") # Converting RGB to L*a*b\r\n img_lab = transforms.ToTensor()(img_lab)\r\n L = img_lab[[0], ...] / 50. - 1. # Between -1 and 1\r\n ab = img_lab[[1, 2], ...] / 110. # Between -1 and 1\r\n \r\n return {'L': L, 'ab': ab}\r\n \r\n def __len__(self):\r\n return len(self.paths)\r\n\r\ndef make_dataloaders(batch_size=16, n_workers=4, pin_memory=True, **kwargs): # A handy function to make our dataloaders\r\n dataset = ColorizationDataset(**kwargs)\r\n dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=n_workers,\r\n pin_memory=pin_memory)\r\n return dataloader\r\n\r\ntrain_dl = make_dataloaders(paths=train_paths, split='train')\r\nval_dl = make_dataloaders(paths=val_paths, split='val')\r\n\r\ndata = next(iter(train_dl))\r\nLs, abs_ = data['L'], data['ab']\r\nprint(Ls.shape, abs_.shape)\r\nprint(len(train_dl), len(val_dl))" ]
[ [ "numpy.random.seed", "numpy.random.choice", "torch.utils.data.DataLoader", "matplotlib.pyplot.subplots", "numpy.random.permutation", "torch.cuda.is_available", "numpy.array" ] ]
BearerPipelineTest/fairo
[ "a5691de7037a2a62832f986dc0e7369bcb8ad6bb" ]
[ "droidlet/memory/craftassist/mc_memory_nodes.py" ]
[ "\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\"\"\"\nimport os\n\nimport numpy as np\nimport logging\nfrom collections import Counter\nfrom typing import cast, List, Sequence, Dict\nfrom droidlet.base_util import XYZ, POINT_AT_TARGET, IDM, Block, Look\nfrom droidlet.shared_data_struct.craftassist_shared_utils import MOBS_BY_ID\nfrom droidlet.memory.memory_nodes import (\n link_archive_to_mem,\n ReferenceObjectNode,\n MemoryNode,\n NODELIST,\n)\n\n\nclass VoxelObjectNode(ReferenceObjectNode):\n \"\"\"This is a reference object that is distributed over\n multiple voxels and uses VoxelObjects table to hold the\n location of the voxels; and ReferenceObjects to hold 'global' info\n\n Args:\n agent_memory (AgentMemory): An AgentMemory object\n memid (string): Memory ID for this node\n\n Attributes:\n locs (tuple): List of (x, y, z) tuples\n blocks (dict): Dictionary of (x, y, z) -> (blockid, meta)\n update_times (dict): Dictionary of (x, y, z) -> time this was last updated\n player_placed (dict): Dictionary of (x, y, z) -> was this placed by player ?\n agent_placed (dict): Dictionary of (x, y, z) -> was this placed by the agent ?\n\n Examples::\n >>> node_list = [TaskNode, VoxelObjectNode]\n >>> schema_path = [os.path.join(os.path.dirname(__file__), \"memory_schema.sql\")]\n >>> agent_memory = AgentMemory(db_file=\":memory:\",\n schema_paths=schema_path,\n db_log_path=None,\n nodelist=node_list)\n >>> memid = '10517cc584844659907ccfa6161e9d32'\n >>> VoxelObjectNode(agent_memory=agent_memory, memid=memid)\n \"\"\"\n\n def __init__(self, agent_memory, memid: str):\n super().__init__(agent_memory, memid)\n ref = self.agent_memory._db_read(\"SELECT * FROM ReferenceObjects WHERE uuid=?\", self.memid)\n if len(ref) == 0:\n raise Exception(\"no mention of this VoxelObject in ReferenceObjects Table\")\n self.ref_info = ref[0]\n voxels = self.agent_memory._db_read(\"SELECT * FROM VoxelObjects WHERE uuid=?\", self.memid)\n self.locs: List[tuple] = []\n self.blocks: Dict[tuple, tuple] = {}\n self.update_times: Dict[tuple, int] = {}\n self.player_placed: Dict[tuple, bool] = {}\n self.agent_placed: Dict[tuple, bool] = {}\n for v in voxels:\n loc = (v[1], v[2], v[3])\n self.locs.append(loc)\n if v[4]:\n assert v[5] is not None\n self.blocks[loc] = (v[4], v[5])\n else:\n self.blocks[loc] = (None, None)\n self.agent_placed[loc] = v[6]\n self.player_placed[loc] = v[7]\n self.update_times[loc] = v[8]\n # TODO assert these all the same?\n self.memtype = v[9]\n\n def get_pos(self) -> XYZ:\n return cast(XYZ, tuple(int(x) for x in np.mean(self.locs, axis=0)))\n\n def get_point_at_target(self) -> POINT_AT_TARGET:\n point_min = [int(x) for x in np.min(self.locs, axis=0)]\n point_max = [int(x) for x in np.max(self.locs, axis=0)]\n return cast(POINT_AT_TARGET, point_min + point_max)\n\n def get_bounds(self):\n M = np.max(self.locs, axis=0)\n m = np.min(self.locs, axis=0)\n return m[0], M[0], m[1], M[1], m[2], M[2]\n\n def snapshot(self, agent_memory):\n archive_memid = self.new(agent_memory, snapshot=True)\n for loc in self.locs:\n cmd = \"INSERT INTO ArchivedVoxelObjects (uuid, x, y, z, bid, meta, agent_placed, player_placed, updated, ref_type) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\n values = (\n archive_memid,\n loc[0],\n loc[1],\n loc[2],\n self.blocks[loc][0],\n self.blocks[loc][1],\n self.agent_placed[loc],\n self.player_placed[loc],\n self.update_times[loc],\n self.memtype,\n )\n agent_memory.db_write(cmd, *values)\n\n archive_memid = self.new(agent_memory, snapshot=True)\n cmd = \"INSERT INTO ArchivedReferenceObjects (uuid, eid, x, y, z, yaw, pitch, name, type_name, ref_type, player_placed, agent_placed, created, updated, voxel_count) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\n info = list(self.ref_info)\n info[0] = archive_memid\n agent_memory.db_write(cmd, *info)\n link_archive_to_mem(agent_memory, self.memid, archive_memid)\n return archive_memid\n\n\nclass BlockObjectNode(VoxelObjectNode):\n \"\"\"This is a voxel object that represents a set of physically present blocks.\n it is considered to be nonephemeral\n\n Args:\n agent_memory (AgentMemory): An AgentMemory object\n memid (string): Memory ID for this node\n\n Examples::\n >>> node_list = [TaskNode, BlockObjectNode]\n >>> schema_path = [os.path.join(os.path.dirname(__file__), \"memory_schema.sql\")]\n >>> agent_memory = AgentMemory(db_file=\":memory:\",\n schema_paths=schema_path,\n db_log_path=None,\n nodelist=node_list)\n >>> memid = '10517cc584844659907ccfa6161e9d32'\n >>> BlockObjectNode(agent_memory=agent_memory, memid=memid)\n \"\"\"\n\n TABLE_COLUMNS = [\n \"uuid\",\n \"x\",\n \"y\",\n \"z\",\n \"bid\",\n \"meta\",\n \"agent_placed\",\n \"player_placed\",\n \"updated\",\n ]\n TABLE = \"ReferenceObjects\"\n NODE_TYPE = \"BlockObject\"\n\n @classmethod\n def create(cls, memory, blocks: Sequence[Block]) -> str:\n \"\"\"Creates a new entry into the ReferenceObjects table\n Returns:\n string: memid of the entry\n Examples::\n >>> memory = AgentMemory()\n >>> blocks = [((1, 0, 34), (10, 1)), ((1, 0, 35), (10, 1)),\n ((2, 0, 34), (2, 2)), ((3, 0, 34), (10, 0))]\n >>> create(memory, blocks)\n \"\"\"\n # check if block object already exists in memory\n for xyz, _ in blocks:\n old_memids = memory.get_block_object_ids_by_xyz(xyz)\n if old_memids:\n return old_memids[0]\n memid = cls.new(memory)\n # TODO check/assert this isn't there...\n cmd = \"INSERT INTO ReferenceObjects (uuid, x, y, z, ref_type, voxel_count) VALUES ( ?, ?, ?, ?, ?, ?)\"\n # TODO this is going to cause a bug, need better way to initialize and track mean loc\n memory.db_write(cmd, memid, 0, 0, 0, \"BlockObjects\", 0)\n for block in blocks:\n memory.upsert_block(block, memid, \"BlockObjects\")\n memory.tag(memid, \"_block_object\")\n memory.tag(memid, \"_VOXEL_OBJECT\")\n memory.tag(memid, \"_physical_object\")\n memory.tag(memid, \"_destructible\")\n # this is a hack until memory_filters does \"not\"\n memory.tag(memid, \"_not_location\")\n logging.debug(\n \"Added block object {} with {} blocks, {}\".format(\n memid, len(blocks), Counter([idm for _, idm in blocks])\n )\n )\n\n return memid\n\n def __repr__(self):\n return \"<BlockObject Node @ {}>\".format(list(self.blocks.keys())[0])\n\n\n# note: instance segmentation objects should not be tagged except by the creator\n# build an archive if you want to tag permanently\nclass InstSegNode(VoxelObjectNode):\n \"\"\"This is a voxel object that represents a region of space,\n and is considered ephemeral\n\n Args:\n agent_memory (AgentMemory): An AgentMemory object\n memid (string): Memory ID for this node\n\n Attributes:\n locs (tuple): List of (x, y, z) tuples for this object\n blocks (dict): Dictionary of (x, y, z) to (blockid, meta)\n tags (list): List of tags for this object\n\n Examples::\n >>> node_list = [TaskNode, InstSegNode]\n >>> schema_path = [os.path.join(os.path.dirname(__file__), \"memory_schema.sql\")]\n >>> agent_memory = AgentMemory(db_file=\":memory:\",\n schema_paths=schema_path,\n db_log_path=None,\n nodelist=node_list)\n >>> memid = '10517cc584844659907ccfa6161e9d32'\n >>> InstSegNode(agent_memory=agent_memory, memid=memid)\n \"\"\"\n\n TABLE_COLUMNS = [\"uuid\", \"x\", \"y\", \"z\", \"ref_type\"]\n TABLE = \"ReferenceObjects\"\n NODE_TYPE = \"InstSeg\"\n\n @classmethod\n def create(cls, memory, locs, tags=[]) -> str:\n \"\"\"Creates a new entry into the VoxelObjects table\n\n Returns:\n string: memid of the entry\n\n Examples::\n >>> memory = AgentMemory()\n >>> locs = [(1, 0, 34), (1, 0, 35), (2, 0, 34), (3, 0, 34)]\n >>> tags = [\"shiny\", \"bright\"]\n >>> create(memory, locs=locs, tags=tags)\n \"\"\"\n # TODO option to not overwrite\n # check if instance segmentation object already exists in memory\n inst_memids = {}\n for xyz in locs:\n m = memory._db_read(\n 'SELECT uuid from VoxelObjects WHERE ref_type=\"inst_seg\" AND x=? AND y=? AND z=?',\n *xyz\n )\n if len(m) > 0:\n for memid in m:\n inst_memids[memid[0]] = True\n # FIXME just remember the locs in the first pass\n for m in inst_memids.keys():\n olocs = memory._db_read(\"SELECT x, y, z from VoxelObjects WHERE uuid=?\", m)\n # TODO maybe make an archive?\n if len(set(olocs) - set(locs)) == 0:\n memory.forget(m)\n\n memid = cls.new(memory)\n loc = np.mean(locs, axis=0)\n # TODO check/assert this isn't there...\n cmd = \"INSERT INTO ReferenceObjects (uuid, x, y, z, ref_type) VALUES ( ?, ?, ?, ?, ?)\"\n memory.db_write(cmd, memid, loc[0], loc[1], loc[2], \"inst_seg\")\n for loc in locs:\n cmd = \"INSERT INTO VoxelObjects (uuid, x, y, z, ref_type) VALUES ( ?, ?, ?, ?, ?)\"\n memory.db_write(cmd, memid, loc[0], loc[1], loc[2], \"inst_seg\")\n memory.tag(memid, \"_VOXEL_OBJECT\")\n memory.tag(memid, \"_inst_seg\")\n memory.tag(memid, \"_destructible\")\n # this is a hack until memory_filters does \"not\"\n memory.tag(memid, \"_not_location\")\n for tag in tags:\n if type(tag) is str:\n memory.tag(memid, tag)\n elif type(tag) is dict:\n for k, v in tag.items():\n memory.add_triple(subj=memid, pred_text=k, obj_text=v)\n return memid\n\n def __init__(self, memory, memid: str):\n super().__init__(memory, memid)\n r = memory._db_read(\"SELECT x, y, z FROM VoxelObjects WHERE uuid=?\", self.memid)\n self.locs = r\n self.blocks = {l: (0, 0) for l in self.locs}\n tags = memory.get_triples(subj=self.memid, pred_text=\"has_tag\")\n self.tags = [] # noqa: T484\n for tag in tags:\n if tag[2][0] != \"_\":\n self.tags.append(tag[2])\n\n def __repr__(self):\n return \"<InstSeg Node @ {} with tags {} >\".format(self.locs, self.tags)\n\n\nclass MobNode(ReferenceObjectNode):\n \"\"\"This is a memory node representing a mob (moving object) in game\n\n Args:\n agent_memory (AgentMemory): An AgentMemory object\n memid (string): Memory ID for this node\n\n Attributes:\n eid (int): Entity ID of the mob node\n pos (tuple): (x, y, z) coordinates of the mob\n look (tuple): (yaw, pitch) of the mob\n\n Examples::\n >>> node_list = [TaskNode, MobNode]\n >>> schema_path = [os.path.join(os.path.dirname(__file__), \"memory_schema.sql\")]\n >>> agent_memory = AgentMemory(db_file=\":memory:\",\n schema_paths=schema_path,\n db_log_path=None,\n nodelist=node_list)\n >>> memid = '10517cc584844659907ccfa6161e9d32'\n >>> MobNode(agent_memory=agent_memory, memid=memid)\n \"\"\"\n\n TABLE_COLUMNS = [\n \"uuid\",\n \"eid\",\n \"x\",\n \"y\",\n \"z\",\n \"yaw\",\n \"pitch\",\n \"ref_type\",\n \"type_name\",\n \"player_placed\",\n \"agent_placed\",\n \"created\",\n ]\n TABLE = \"ReferenceObjects\"\n NODE_TYPE = \"Mob\"\n\n def __init__(self, agent_memory, memid: str):\n super().__init__(agent_memory, memid)\n eid, x, y, z, yaw, pitch = self.agent_memory._db_read_one(\n \"SELECT eid, x, y, z, yaw, pitch FROM ReferenceObjects WHERE uuid=?\", self.memid\n )\n self.eid = eid\n self.pos = (x, y, z)\n self.look = (yaw, pitch)\n\n @classmethod\n def create(cls, memory, mob, player_placed=False, agent_placed=False) -> str:\n \"\"\"Creates a new entry into the ReferenceObjects table\n\n Returns:\n string: memid of the entry\n\n Examples::\n >>> from droidlet.shared_data_struct.craftassist_shared_utils import MOBS_BY_ID >>> memory = AgentMemory()\n >>> chicken = {v: k for k, v in MOBS_BY_ID.items()}[\"chicken\"]\n >>> mob_id, mob_type, pos, look = 42, chicken, Pos(3, 4, 5), Look(0.0, 0.0)\n >>> mob = Mob(mob_id, mob_type, pos, look)) # get an instance of the Mob class\n >>> player_placed=True # spawned by player\n >>> create(memory, mob, player_placed=player_placed)\n \"\"\"\n # TODO warn/error if mob already in memory?\n memid = cls.new(memory)\n mobtype = MOBS_BY_ID[mob.mobType]\n memory.db_write(\n \"INSERT INTO ReferenceObjects(uuid, eid, x, y, z, yaw, pitch, ref_type, type_name, player_placed, agent_placed, created) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\",\n memid,\n mob.entityId,\n mob.pos.x,\n mob.pos.y,\n mob.pos.z,\n mob.look.yaw,\n mob.look.pitch,\n \"mob\",\n mobtype,\n player_placed,\n agent_placed,\n memory.get_time(),\n )\n memory.tag(memid, \"mob\")\n memory.tag(memid, \"_physical_object\")\n memory.tag(memid, \"_animate\")\n # this is a hack until memory_filters does \"not\"\n memory.tag(memid, \"_not_location\")\n memory.tag(memid, mobtype)\n memory.add_triple(subj=memid, pred_text=\"has_name\", obj_text=mobtype)\n return memid\n\n def get_pos(self) -> XYZ:\n x, y, z = self.agent_memory._db_read_one(\n \"SELECT x, y, z FROM ReferenceObjects WHERE uuid=?\", self.memid\n )\n self.pos = (x, y, z)\n return self.pos\n\n def get_look(self) -> Look:\n yaw, pitch = self.agent_memory._db_read_one(\n \"SELECT yaw, pitch FROM ReferenceObjects WHERE uuid=?\", self.memid\n )\n self.look = (yaw, pitch)\n return self.look\n\n # TODO: use a smarter way to get point_at_target\n def get_point_at_target(self) -> POINT_AT_TARGET:\n x, y, z = self.agent_memory._db_read_one(\n \"SELECT x, y, z FROM ReferenceObjects WHERE uuid=?\", self.memid\n )\n # use the block above the mob as point_at_target\n return cast(POINT_AT_TARGET, (x, y + 1, z, x, y + 1, z))\n\n def get_bounds(self):\n x, y, z = self.pos\n return x, x, y, y, z, z\n\n\nclass ItemStackNode(ReferenceObjectNode):\n \"\"\"A memory node for an item stack, which is something on the ground,\n this is different from the placed blocks and can be picked up by the player/agent\n if they are close to it.\n\n Args:\n agent_memory (AgentMemory): An AgentMemory object\n memid (string): Memory ID for this node\n\n Attributes:\n memid (string): MemoryID of the node\n eid (int): Entity ID of the item\n pos(tuple): (x, y, z) coordinates of the item\n\n Examples::\n >>> node_list = [TaskNode, ItemStackNode]\n >>> schema_path = [os.path.join(os.path.dirname(__file__), \"memory_schema.sql\")]\n >>> agent_memory = AgentMemory(db_file=\":memory:\",\n schema_paths=schema_path,\n db_log_path=None,\n nodelist=node_list)\n >>> memid = '10517cc584844659907ccfa6161e9d32'\n >>> ItemStackNode(agent_memory=agent_memory, memid=memid)\n \"\"\"\n\n TABLE_ROWS = [\"uuid\", \"eid\", \"x\", \"y\", \"z\", \"type_name\", \"ref_type\", \"created\"]\n TABLE = \"ReferenceObjects\"\n NODE_TYPE = \"ItemStack\"\n\n def __init__(self, agent_memory, memid: str):\n super().__init__(agent_memory, memid)\n eid, x, y, z = self.agent_memory._db_read_one(\n \"SELECT eid, x, y, z FROM ReferenceObjects WHERE uuid=?\", self.memid\n )\n self.memid = memid\n self.eid = eid\n self.pos = (x, y, z)\n\n @classmethod\n def create(cls, memory, item_stack, block_data_info) -> str:\n \"\"\"Creates a new entry into the ReferenceObjects table\n\n Returns:\n string: memid of the entry\n\n Examples::\n >>> memory = AgentMemory()\n >>> from collections import namedtuple\n >>> ItemStack = namedtuple(\"ItemStack\", \"entityId, pos\")\n >>> item_stack = ItemStack(12345678, Pos(0.0, 0.0, 0.0))\n >>> create(memory, item_stack)\n \"\"\"\n bid_to_name = block_data_info.get(\"bid_to_name\", {})\n type_name = bid_to_name[(item_stack.item.id, item_stack.item.meta)]\n memid = cls.new(memory)\n memory.db_write(\n \"INSERT INTO ReferenceObjects(uuid, eid, x, y, z, type_name, ref_type, created) VALUES (?, ?, ?, ?, ?, ?, ?, ?)\",\n memid,\n item_stack.entityId,\n item_stack.pos.x,\n item_stack.pos.y,\n item_stack.pos.z,\n type_name,\n \"item_stack\",\n memory.get_time(),\n )\n memory.tag(memid, type_name)\n memory.tag(memid, \"_item_stack\")\n memory.tag(memid, \"_on_ground\")\n memory.tag(memid, \"_physical_object\")\n # this is a hack until memory_filters does \"not\"\n memory.tag(memid, \"_not_location\")\n return memid\n\n def get_pos(self) -> XYZ:\n x, y, z = self.agent_memory._db_read_one(\n \"SELECT x, y, z FROM ReferenceObjects WHERE uuid=?\", self.memid\n )\n self.pos = (x, y, z)\n return self.pos\n\n # TODO: use a smarter way to get point_at_target\n def get_point_at_target(self) -> POINT_AT_TARGET:\n x, y, z = self.agent_memory._db_read_one(\n \"SELECT x, y, z FROM ReferenceObjects WHERE uuid=?\", self.memid\n )\n # use the block above the item stack as point_at_target\n return cast(POINT_AT_TARGET, (x, y + 1, z, x, y + 1, z))\n\n def get_bounds(self):\n x, y, z = self.pos\n return x, x, y, y, z, z\n\n\nclass SchematicNode(MemoryNode):\n \"\"\"A memory node representing a plan for an object that could\n be built in the environment.\n\n Args:\n agent_memory (AgentMemory): An AgentMemory object\n memid (string): Memory ID for this node\n\n Attributes:\n blocks (dict): Mapping of each (x, y, z) coordinate to the (block_id, meta) of\n the block at that coordinate.\n\n Examples::\n >>> node_list = [TaskNode, SchematicNode]\n >>> schema_path = [os.path.join(os.path.dirname(__file__), \"memory_schema.sql\")]\n >>> agent_memory = AgentMemory(db_file=\":memory:\",\n schema_paths=schema_path,\n db_log_path=None,\n nodelist=node_list)\n >>> memid = '10517cc584844659907ccfa6161e9d32'\n >>> SchematicNode(agent_memory=agent_memory, memid=memid)\n \"\"\"\n\n TABLE_COLUMNS = [\"uuid\", \"x\", \"y\", \"z\", \"bid\", \"meta\"]\n TABLE = \"Schematics\"\n NODE_TYPE = \"Schematic\"\n\n def __init__(self, agent_memory, memid: str):\n super().__init__(agent_memory, memid)\n if memid in agent_memory.schematics.keys():\n self.blocks = {\n (x, y, z): (b, m) for ((x, y, z), (b, m)) in agent_memory.schematics[memid]\n }\n else:\n r = self.agent_memory._db_read(\n \"SELECT x, y, z, bid, meta FROM Schematics WHERE uuid=?\", self.memid\n )\n self.blocks = {(x, y, z): (b, m) for (x, y, z, b, m) in r}\n\n @classmethod\n def create(cls, memory, blocks: Sequence[Block]) -> str:\n \"\"\"Creates a new entry into the Schematics table\n\n Returns:\n string: memid of the entry\n\n Examples::\n >>> memory = AgentMemory()\n >>> blocks = [((0, 0, 1), (1, 0)), ((0, 0, 2), (1, 0)),\n ((0, 0, 3), (2, 0))]\n >>> create(memory, blocks)\n \"\"\"\n memid = cls.new(memory)\n for ((x, y, z), (b, m)) in blocks:\n memory.db_write(\n \"\"\"\n INSERT INTO Schematics(uuid, x, y, z, bid, meta)\n VALUES (?, ?, ?, ?, ?, ?)\"\"\",\n memid,\n x,\n y,\n z,\n b,\n m,\n )\n return memid\n\n\nclass BlockTypeNode(MemoryNode):\n \"\"\"This is a memory node representing the type of a block in Minecraft\n\n Args:\n agent_memory (AgentMemory): An AgentMemory object\n memid (string): Memory ID for this node\n\n Attributes:\n type_name (string): Name of the type of block (example: wool)\n b (int): The id of the block\n m (int): The meta information of a block\n\n Examples::\n >>> node_list = [TaskNode, BlockTypeNode]\n >>> schema_path = [os.path.join(os.path.dirname(__file__), \"memory_schema.sql\")]\n >>> agent_memory = AgentMemory(db_file=\":memory:\",\n schema_paths=schema_path,\n db_log_path=None,\n nodelist=node_list)\n >>> memid = '10517cc584844659907ccfa6161e9d32'\n >>> BlockTypeNode(agent_memory=agent_memory, memid=memid)\n \"\"\"\n\n TABLE_COLUMNS = [\"uuid\", \"type_name\", \"bid\", \"meta\"]\n TABLE = \"BlockTypes\"\n NODE_TYPE = \"BlockType\"\n\n def __init__(self, agent_memory, memid: str):\n super().__init__(agent_memory, memid)\n type_name, b, m = self.agent_memory._db_read(\n \"SELECT type_name, bid, meta FROM BlockTypes WHERE uuid=?\", self.memid\n )[0]\n self.type_name = type_name\n self.b = b\n self.m = m\n\n @classmethod\n def create(cls, memory, type_name: str, idm: IDM) -> str:\n \"\"\"Creates a new entry into the BlockTypes table\n\n Returns:\n string: memid of the entry\n\n Examples::\n >>> memory = AgentMemory()\n >>> type_name = \"air_block\"\n >>> idm = (0, 0)\n >>> create(memory, type_name, idm)\n \"\"\"\n memid = cls.new(memory)\n memory.db_write(\n \"INSERT INTO BlockTypes(uuid, type_name, bid, meta) VALUES (?, ?, ?, ?)\",\n memid,\n type_name,\n idm[0],\n idm[1],\n )\n return memid\n\n\nclass MobTypeNode(MemoryNode):\n \"\"\"This represents a mob type memory node (the type of a mob,\n example: animal)\n\n Args:\n agent_memory (AgentMemory): An AgentMemory object\n memid (string): Memory ID for this node\n\n Attributes:\n type_name (string): Name of the mob type\n b (int): Id of the mob type\n m (int): Meta information of the mob type\n\n Examples::\n >>> node_list = [TaskNode, MobTypeNode]\n >>> schema_path = [os.path.join(os.path.dirname(__file__), \"memory_schema.sql\")]\n >>> agent_memory = AgentMemory(db_file=\":memory:\",\n schema_paths=schema_path,\n db_log_path=None,\n nodelist=node_list)\n >>> memid = '10517cc584844659907ccfa6161e9d32'\n >>> MobTypeNode(agent_memory=agent_memory, memid=memid)\n \"\"\"\n\n TABLE_COLUMNS = [\"uuid\", \"type_name\", \"bid\", \"meta\"]\n TABLE = \"MobTypes\"\n NODE_TYPE = \"MobType\"\n\n def __init__(self, agent_memory, memid: str):\n super().__init__(agent_memory, memid)\n type_name, b, m = self.agent_memory._db_read(\n \"SELECT type_name, bid, meta FROM MobTypes WHERE uuid=?\", self.memid\n )\n self.type_name = type_name\n self.b = b\n self.m = m\n\n @classmethod\n def create(cls, memory, type_name: str, idm: IDM) -> str:\n \"\"\"Creates a new entry into the MobTypes table\n\n Returns:\n string: memid of the entry\n\n Examples::\n >>> memory = AgentMemory()\n >>> type_name = \"spawn husk\"\n >>> idm = (23, 0)\n >>> create(memory, type_name, idm)\n \"\"\"\n memid = cls.new(memory)\n memory.db_write(\n \"INSERT INTO MobTypes(uuid, type_name, bid, meta) VALUES (?, ?, ?, ?)\",\n memid,\n type_name,\n idm[0],\n idm[1],\n )\n return memid\n\n\nclass DanceNode(MemoryNode):\n \"\"\"This is a memory node representing a dance or sequence of movement steps\n\n Args:\n agent_memory (AgentMemory): An AgentMemory object\n memid (string): Memory ID for this node\n\n Attributes:\n dance_fn (function): The function representing the execution of the dance\n\n Examples::\n >>> node_list = [TaskNode, DanceNode]\n >>> schema_path = [os.path.join(os.path.dirname(__file__), \"memory_schema.sql\")]\n >>> agent_memory = AgentMemory(db_file=\":memory:\",\n schema_paths=schema_path,\n db_log_path=None,\n nodelist=node_list)\n >>> memid = '10517cc584844659907ccfa6161e9d32'\n >>> DanceNode(agent_memory=agent_memory, memid=memid)\n \"\"\"\n\n TABLE_COLUMNS = [\"uuid\"]\n TABLE = \"Dances\"\n NODE_TYPE = \"Dance\"\n\n def __init__(self, agent_memory, memid: str):\n super().__init__(agent_memory, memid)\n # TODO put in DB/pickle like tasks?\n self.dance_fn = self.agent_memory.dances[memid]\n\n @classmethod\n def create(cls, memory, dance_fn, name=None, tags=[]) -> str:\n \"\"\"Creates a new entry into the Dances table\n\n Returns:\n string: memid of the entry\n\n Examples::\n >>> memory = AgentMemory()\n >>> from dance import *\n >>> konami_dance = [\n {\"translate\": (0, 1, 0)},\n {\"translate\": (0, 1, 0)},\n {\"translate\": (0, -1, 0)},\n {\"translate\": (0, -1, 0)},\n {\"translate\": (0, 0, -1)},\n {\"translate\": (0, 0, 1)},\n {\"translate\": (0, 0, -1)},\n {\"translate\": (0, 0, 1)},\n ]\n >>> dance_fn = generate_sequential_move_fn(konami_dance)\n >>> name = \"konami_dance\"\n >>> tags = [\"dance\", \"konami\"]\n >>> create(memory, dance_fn, name=name, tags=tags)\n \"\"\"\n memid = cls.new(memory)\n memory.db_write(\"INSERT INTO Dances(uuid) VALUES (?)\", memid)\n # TODO put in db via pickle like tasks?\n memory.dances[memid] = dance_fn\n if name is not None:\n memory.add_triple(subj=memid, pred_text=\"has_name\", obj_text=name)\n if len(tags) > 0:\n for tag in tags:\n memory.add_triple(subj=memid, pred_text=\"has_tag\", obj_text=tag)\n return memid\n\n\nclass RewardNode(MemoryNode):\n \"\"\"This is a memory node for a reward (positive or negative)\n to the agent\"\"\"\n\n TABLE_COLUMNS = [\"uuid\", \"value\", \"time\"]\n TABLE = \"Rewards\"\n NODE_TYPE = \"Reward\"\n\n def __init__(self, agent_memory, memid: str):\n _, value, timestamp = agent_memory._db_read_one(\n \"SELECT * FROM Rewards WHERE uuid=?\", memid\n )\n self.value = value\n self.time = timestamp\n\n @classmethod\n def create(cls, agent_memory, reward_value: str) -> str:\n \"\"\"Creates a new entry into the Rewards table\n\n Returns:\n string: memid of the entry\n\n Examples::\n >>> memory = AgentMemory()\n >>> reward_value = \"positive\"\n >>> create(memory, reward_value)\n \"\"\"\n memid = cls.new(agent_memory)\n agent_memory.db_write(\n \"INSERT INTO Rewards(uuid, value, time) VALUES (?,?,?)\",\n memid,\n reward_value,\n agent_memory.get_time(),\n )\n return memid\n\n\nNODELIST = NODELIST + [\n RewardNode,\n DanceNode,\n BlockTypeNode,\n SchematicNode,\n MobNode,\n ItemStackNode,\n MobTypeNode,\n InstSegNode,\n BlockObjectNode,\n] # noqa\n" ]
[ [ "numpy.max", "numpy.mean", "numpy.min" ] ]
frankdarkluo/SOLS
[ "cca756d48da5d34f03b09cd1e8e55f65adfaf134" ]
[ "DialogRE/SOLS/evaluate.py" ]
[ "import json\nimport numpy as np\nimport argparse\n\ndef softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=0)\n\ndef getresult(fn):\n result = []\n with open(fn, \"r\") as f:\n l = f.readline()\n while l:\n l = l.strip().split()\n for i in range(len(l)):\n l[i] = float(l[i])\n result += [l]\n l = f.readline()\n result = np.asarray(result)\n return list(1 / (1 + np.exp(-result)))\n\ndef getpredict(result, T1 = 0.5, T2 = 0.4):\n for i in range(len(result)):\n r = []\n maxl, maxj = -1, -1\n for j in range(len(result[i])):\n if result[i][j] > T1:\n r += [j]\n if result[i][j] > maxl:\n maxl = result[i][j]\n maxj = j\n if len(r) == 0:\n if maxl <= T2:\n r = [36]\n else:\n r += [maxj]\n result[i] = r\n return result\n\ndef evaluate(devp, data):\n index = 0\n correct_sys, all_sys = 0, 0\n correct_gt = 0\n \n for i in range(len(data)):\n for j in range(len(data[i][1])):\n for id in data[i][1][j][\"rid\"]:\n if id != 36:\n correct_gt += 1\n if id in devp[index]:\n correct_sys += 1\n for id in devp[index]:\n if id != 36:\n all_sys += 1\n index += 1\n\n precision = correct_sys/all_sys if all_sys != 0 else 1\n recall = correct_sys/correct_gt if correct_gt != 0 else 0\n f_1 = 2*precision*recall/(precision+recall) if precision+recall != 0 else 0\n\n return precision, recall, f_1\n\n\ndef evaluate_f1c(devp, data):\n index = 0\n precisions = []\n recalls = []\n \n for i in range(len(data)):\n for j in range(len(data[i][1])):\n correct_sys, all_sys = 0, 0\n correct_gt = 0\n \n x = data[i][1][j][\"x\"].lower().strip()\n y = data[i][1][j][\"y\"].lower().strip()\n t = {}\n for k in range(len(data[i][1][j][\"rid\"])):\n if data[i][1][j][\"rid\"][k] != 36:\n t[data[i][1][j][\"rid\"][k]] = data[i][1][j][\"t\"][k].lower().strip()\n\n l = set(data[i][1][j][\"rid\"]) - set([36])\n\n ex, ey = False, False\n et = {}\n for r in range(36):\n et[r] = r not in l\n\n for k in range(len(data[i][0])):\n o = set(devp[index]) - set([36])\n e = set()\n if x in data[i][0][k].lower():\n ex = True\n if y in data[i][0][k].lower():\n ey = True\n if k == len(data[i][0])-1:\n ex = ey = True\n for r in range(36):\n et[r] = True\n for r in range(36):\n if r in t:\n if t[r] != \"\" and t[r] in data[i][0][k].lower():\n et[r] = True\n if ex and ey and et[r]:\n e.add(r)\n correct_sys += len(o & l & e)\n all_sys += len(o & e)\n correct_gt += len(l & e)\n index += 1\n\n precisions += [correct_sys/all_sys if all_sys != 0 else 1]\n recalls += [correct_sys/correct_gt if correct_gt != 0 else 0]\n\n precision = sum(precisions) / len(precisions)\n recall = sum(recalls) / len(recalls)\n f_1 = 2*precision*recall/(precision+recall) if precision+recall != 0 else 0\n\n return precision, recall, f_1\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--f1dev\",\n default=None,\n type=str,\n required=True,\n help=\"Dev logits (f1).\")\n parser.add_argument(\"--f1test\",\n default=None,\n type=str,\n required=True,\n help=\"Test logits (f1).\")\n parser.add_argument(\"--f1cdev\",\n default=None,\n type=str,\n required=True,\n help=\"Dev logits (f1c).\")\n parser.add_argument(\"--f1ctest\",\n default=None,\n type=str,\n required=True,\n help=\"Test logits (f1c).\")\n \n args = parser.parse_args()\n \n f1dev = args.f1dev\n f1test = args.f1test\n f1cdev = args.f1cdev\n f1ctest = args.f1ctest\n\n with open(\"datacn/dev.json\", \"r\", encoding='utf8') as f:\n datadev = json.load(f)\n with open(\"datacn/test.json\", \"r\", encoding='utf8') as f:\n datatest = json.load(f)\n for i in range(len(datadev)):\n for j in range(len(datadev[i][1])):\n for k in range(len(datadev[i][1][j][\"rid\"])):\n datadev[i][1][j][\"rid\"][k] -= 1\n for i in range(len(datatest)):\n for j in range(len(datatest[i][1])):\n for k in range(len(datatest[i][1][j][\"rid\"])):\n datatest[i][1][j][\"rid\"][k] -= 1\n\n bestT2 = bestf_1 = 0\n for T2 in range(51):\n dev = getresult(f1dev)\n devp = getpredict(dev, T2=T2/100.)\n precision, recall, f_1 = evaluate(devp, datadev)\n if f_1 > bestf_1:\n bestf_1 = f_1\n bestT2 = T2/100.\n\n print(\"best T2:\", bestT2)\n\n dev = getresult(f1dev)\n devp = getpredict(dev, T2=bestT2)\n precision, recall, f_1 = evaluate(devp, datadev)\n print(\"dev (P R F1)\", precision, recall, f_1)\n\n test = getresult(f1test)\n testp = getpredict(test, T2=bestT2)\n precision, recall, f_1 = evaluate(testp, datatest)\n print(\"test (P R F1)\", precision, recall, f_1)\n\n dev = getresult(f1cdev)\n devp = getpredict(dev, T2=bestT2)\n precision, recall, f_1c = evaluate_f1c(devp, datadev)\n print (\"dev (P_c R_c F1_c)\", precision, recall, f_1c)\n\n test = getresult(f1ctest)\n testp = getpredict(test, T2=bestT2)\n precision, recall, f_1c = evaluate_f1c(testp, datatest)\n print (\"test (P_c R_c F1_c)\", precision, recall, f_1c)\n" ]
[ [ "numpy.asarray", "numpy.exp" ] ]
adverML/adversarial-detection
[ "0173b19a7352a2ec769f24a89d4e2cf8f4423514" ]
[ "expts/generate_samples_custom.py" ]
[ "\"\"\"\nMain script for generating adversarial data (from custom KNN attack) from the cross-validation folds and saving\nthem to numpy files.\n\nExample usage:\npython generate_samples_custom.py -m mnist max-num-adver 1000 --gpu 3 --defense-method proposed --dist-metric cosine --n-jobs 16\npython generate_samples_custom.py -m cifar10 --max-num-adver 100 --gpu 0 --defense-method proposed --dist-metric cosine --n-jobs 16 --nf 2\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport sys\nimport time\nimport argparse\nimport os\nimport pickle\nimport numpy as np\nimport torch\nfrom torchvision import datasets, transforms\nfrom sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit\nfrom nets.mnist import *\nfrom nets.cifar10 import *\nfrom nets.svhn import *\nfrom nets.resnet import *\nfrom helpers.constants import (\n ROOT,\n SEED_DEFAULT,\n CROSS_VAL_SIZE,\n NORMALIZE_IMAGES,\n BATCH_SIZE_DEF,\n NEIGHBORHOOD_CONST,\n CUSTOM_ATTACK,\n MAX_NUM_REPS\n)\nfrom helpers.utils import (\n load_model_checkpoint,\n convert_to_loader,\n load_numpy_data,\n get_data_bounds,\n verify_data_loader,\n get_samples_as_ndarray,\n get_predicted_classes\n)\nfrom helpers import knn_attack\nfrom helpers.utils import extract_layer_embeddings as extract_layer_embeddings_numpy\n\n# Declaring global model objects to avoid copies in memory\nmodels_detec_propo = None\nmodels_detec_dknn = None\n\n\ndef helper_accuracy(layer_embeddings, labels_pred_dnn, labels, ind_fold):\n global models_detec_propo, models_detec_dknn\n\n # Accuracy of the DNN classifier\n n_test = labels.shape[0]\n mask = labels_pred_dnn == labels\n accu_dnn = (100. * mask[mask].shape[0]) / n_test\n # Accuracy of the proposed method\n is_error, _ = knn_attack.check_adv_detec(layer_embeddings, labels, labels_pred_dnn, models_detec_propo[ind_fold],\n is_numpy=True)\n accu_propo = (100. * (n_test - is_error[is_error].shape[0])) / n_test\n # Accuracy of deep KNN\n is_error, _ = knn_attack.check_adv_detec(layer_embeddings, labels, labels_pred_dnn, models_detec_dknn[ind_fold],\n is_numpy=True)\n accu_dknn = (100. * (n_test - is_error[is_error].shape[0])) / n_test\n\n return accu_dnn, accu_propo, accu_dknn\n\n\ndef combine_and_save(save_path, data_adver, labels_adver, data_clean, labels_clean, norm_perturb, is_correct,\n is_adver, labels_te):\n # Concatenate list of arrays into a single array\n data_adver = np.concatenate(data_adver, axis=0)\n labels_adver = np.asarray(np.concatenate(labels_adver), dtype=labels_te.dtype)\n data_clean = np.concatenate(data_clean, axis=0)\n labels_clean = np.asarray(np.concatenate(labels_clean), dtype=labels_te.dtype)\n norm_perturb = np.concatenate(norm_perturb)\n is_correct = np.concatenate(is_correct)\n is_adver = np.concatenate(is_adver)\n # save data to numpy files\n np.save(os.path.join(save_path, 'data_te_adv.npy'), data_adver)\n np.save(os.path.join(save_path, 'labels_te_adv.npy'), labels_adver)\n np.save(os.path.join(save_path, 'data_te_clean.npy'), data_clean)\n np.save(os.path.join(save_path, 'labels_te_clean.npy'), labels_clean)\n np.save(os.path.join(save_path, 'norm_perturb.npy'), norm_perturb)\n # np.save(os.path.join(save_path, 'is_correct_detec.npy'), is_correct)\n np.save(os.path.join(save_path, 'is_adver.npy'), is_adver)\n\n return data_adver, labels_adver, data_clean, labels_clean, norm_perturb, is_correct, is_adver\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch-size', type=int, default=64, help='batch size of evaluation')\n parser.add_argument('--max-num-adver', type=int, default=-1,\n help='Maximum number of adversarial samples to generate. If set to the default of -1, it '\n 'attempts to generate adversarial samples for every test fold sample.')\n parser.add_argument('--model-type', '-m', choices=['mnist', 'cifar10', 'svhn'], default='cifar10',\n help='model type or name of the dataset')\n parser.add_argument('--output-dir', '-o', default='', help='directory path for saving the output and model files')\n parser.add_argument('--seed', '-s', type=int, default=SEED_DEFAULT, help='seed for random number generation')\n parser.add_argument('--generate-attacks', type=bool, default=True,\n help='should attack samples be generated/not (default:True)')\n parser.add_argument('--gpu', type=str, default=\"3\", help='which gpus to execute code on')\n parser.add_argument('--defense-method', '--dm', choices=['dknn', 'proposed', 'dnn'], default='proposed',\n help=\"Defense method to attack. Choices are 'dnn', 'dknn' and 'proposed'\")\n parser.add_argument('--det-model-file', '--dmf', default='',\n help='Path to the saved detector model file. Loads from a default location of not specified.')\n parser.add_argument('--dist-metric', choices=['euclidean', 'cosine'], default='euclidean',\n help='distance metric to use')\n parser.add_argument('--n-jobs', type=int, default=16, help='number of parallel jobs to use for multiprocessing')\n parser.add_argument('--untargeted', action='store_true', default=False,\n help='Use this option to create untargeted adversarial samples from this attack')\n parser.add_argument('--skip-save-batches', action='store_true', default=False,\n help='Use this option to skip saving the intermediate data batches to numpy files. '\n 'This will shave off some time and avoid frequent I/O')\n parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training')\n parser.add_argument('--test-batch-size', '--tb', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--num-folds', '--nf', type=int, default=CROSS_VAL_SIZE,\n help='number of cross-validation folds')\n '''\n parser.add_argument('--stepsize', type=float, default=0.001, help='stepsize')\n parser.add_argument('--max-iterations', type=int, default=1000, help='max num. of iterations')\n '''\n args = parser.parse_args()\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n\n # Output directory\n if not args.output_dir:\n output_dir = os.path.join(ROOT, 'numpy_data', args.model_type)\n else:\n output_dir = args.output_dir\n\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n\n num_folds = args.num_folds\n data_path = os.path.join(ROOT, 'data')\n if args.model_type == 'mnist':\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize(*NORMALIZE_IMAGES['mnist'])]\n )\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(data_path, train=False, download=True, transform=transform),\n batch_size=args.test_batch_size, shuffle=False, **kwargs\n )\n model = MNIST().to(device)\n model = load_model_checkpoint(model, args.model_type)\n num_classes = 10\n\n elif args.model_type == 'cifar10':\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize(*NORMALIZE_IMAGES['cifar10'])]\n )\n testset = datasets.CIFAR10(root=data_path, train=False, download=True, transform=transform)\n test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, **kwargs)\n num_classes = 10\n model = ResNet34().to(device)\n model = load_model_checkpoint(model, args.model_type)\n\n elif args.model_type == 'svhn':\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize(*NORMALIZE_IMAGES['svhn'])]\n )\n testset = datasets.SVHN(root=data_path, split='test', download=True, transform=transform)\n test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, **kwargs)\n num_classes = 10\n model = SVHN().to(device)\n model = load_model_checkpoint(model, args.model_type)\n\n else:\n raise ValueError(\"'{}' is not a valid model type\".format(args.model_type))\n\n # Set model to evaluation mode\n model.eval()\n\n # convert the test data loader to 2 ndarrays\n data, labels = get_samples_as_ndarray(test_loader)\n\n # Get the range of values in the data array\n bounds = get_data_bounds(data)\n print(\"Range of data values: ({:.4f}, {:.4f})\\n\".format(*bounds))\n\n # verify if the data loader is the same as the ndarrays it generates\n if not verify_data_loader(test_loader, batch_size=args.test_batch_size):\n raise ValueError(\"Data loader verification failed\")\n\n # Path to the detection model file\n det_model_file = ''\n if args.det_model_file:\n det_model_file = args.det_model_file\n else:\n if args.defense_method != 'dnn':\n # default path the the saved detection model file\n det_model_file = os.path.join(ROOT, 'outputs', args.model_type, 'detection', CUSTOM_ATTACK,\n 'models_{}.pkl'.format(args.defense_method))\n\n print(\"Defense method: {}\".format(args.defense_method))\n if det_model_file:\n print(\"Loading saved detection models from the file: {}\".format(det_model_file))\n # Load the detection models (from each cross-validation fold) from a pickle file.\n # `models_detec` will be a list of trained detection models from each fold\n with open(det_model_file, 'rb') as fp:\n models_detec = pickle.load(fp)\n else:\n models_detec = [None] * num_folds\n\n global models_detec_propo, models_detec_dknn\n # Detection models for the dknn method. Used for comparison\n fname = os.path.join(ROOT, 'outputs', args.model_type, 'detection', CUSTOM_ATTACK, 'models_dknn.pkl')\n with open(fname, 'rb') as fp:\n models_detec_dknn = pickle.load(fp)\n\n # Detection models for the proposed method. Used for comparison\n fname = os.path.join(ROOT, 'outputs', args.model_type, 'detection', CUSTOM_ATTACK, 'models_proposed.pkl')\n with open(fname, 'rb') as fp:\n models_detec_propo = pickle.load(fp)\n\n if args.max_num_adver > 0:\n max_num_adver = args.max_num_adver // num_folds\n else:\n max_num_adver = args.max_num_adver\n\n # repeat for each fold in the cross-validation split\n skf = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=args.seed)\n i = 1\n for ind_tr, ind_te in skf.split(data, labels):\n t_init = time.time()\n data_tr = data[ind_tr, :]\n labels_tr = labels[ind_tr]\n data_te = data[ind_te, :]\n labels_te = labels[ind_te]\n\n # Set number of nearest neighbors based on the data size and the neighborhood constant\n n_neighbors = int(np.ceil(labels_tr.shape[0] ** NEIGHBORHOOD_CONST))\n print(\"\\nProcessing fold {:d}\".format(i))\n print(\"Number of nearest neighbors = {:d}\".format(n_neighbors))\n \n # make dir based on fold to save data\n numpy_save_path = os.path.join(output_dir, \"fold_\" + str(i))\n if not os.path.isdir(numpy_save_path):\n os.makedirs(numpy_save_path)\n\n # save train fold to numpy_save_path or load if it exists already\n if not os.path.isfile(os.path.join(numpy_save_path, 'data_tr.npy')):\n np.save(os.path.join(numpy_save_path, 'data_tr.npy'), data_tr)\n else:\n data_tr = np.load(os.path.join(numpy_save_path, \"data_tr.npy\"))\n\n if not os.path.isfile(os.path.join(numpy_save_path, 'labels_tr.npy')):\n np.save(os.path.join(numpy_save_path, 'labels_tr.npy'), labels_tr)\n else:\n labels_tr = np.load(os.path.join(numpy_save_path, \"labels_tr.npy\"))\n \n # save test fold to numpy_save_path or load if it exists already\n if not os.path.isfile(os.path.join(numpy_save_path, 'data_te.npy')):\n np.save(os.path.join(numpy_save_path, 'data_te.npy'), data_te)\n else:\n data_te = np.load(os.path.join(numpy_save_path, \"data_te.npy\"))\n\n if not os.path.isfile(os.path.join(numpy_save_path, 'labels_te.npy')):\n np.save(os.path.join(numpy_save_path, 'labels_te.npy'), labels_te)\n else:\n labels_te = np.load(os.path.join(numpy_save_path, \"labels_te.npy\"))\n\n if args.generate_attacks:\n # print(data_tr.shape, labels_tr.shape)\n adv_save_path = os.path.join(output_dir, 'fold_{}'.format(i), CUSTOM_ATTACK)\n if not os.path.isdir(adv_save_path):\n os.makedirs(adv_save_path)\n\n n_test = labels_te.shape[0]\n n_train = labels_tr.shape[0]\n if n_train > MAX_NUM_REPS:\n # Select a random, class-stratified sample from the training data of size `MAX_NUM_REPS`.\n # This is done to speed-up the attack optimization\n sss = StratifiedShuffleSplit(n_splits=1, test_size=MAX_NUM_REPS, random_state=args.seed)\n _, ind_sample = next(sss.split(data_tr, labels_tr))\n data_tr_sample = data_tr[ind_sample, :]\n labels_tr_sample = labels_tr[ind_sample]\n print(\"\\nRandomly sampling the train split from {:d} to {:d} samples\".format(n_train, MAX_NUM_REPS))\n else:\n data_tr_sample = data_tr\n labels_tr_sample = labels_tr\n\n # Data loader for the train and test split\n train_fold_loader = convert_to_loader(data_tr_sample, labels_tr_sample, batch_size=args.batch_size,\n custom=False)\n test_fold_loader = convert_to_loader(data_te, labels_te, batch_size=args.batch_size, custom=False)\n # Extract the layer embeddings for samples from the train and test split\n layer_embeddings_train, _, _, _ = extract_layer_embeddings_numpy(model, device, train_fold_loader,\n method='proposed')\n layer_embeddings_test, _, labels_pred_dnn_test, _ = extract_layer_embeddings_numpy(\n model, device, test_fold_loader, method='proposed'\n )\n # Calculate accuracy of the DNN and the detection methods on clean data\n accu_clean_dnn, accu_clean_propo, accu_clean_dknn = helper_accuracy(\n layer_embeddings_test, labels_pred_dnn_test, labels_te, i - 1\n )\n print(\"Accuracy on clean data:\\nDNN classifier: {:.4f}, proposed: {:.4f}, dknn: {:.4f}\".\n format(accu_clean_dnn, accu_clean_propo, accu_clean_dknn))\n\n # Load kernel sigma values from file if available\n sigma_filename = os.path.join(adv_save_path, 'kernel_sigma_{}.npy'.format(args.dist_metric))\n if os.path.isfile(sigma_filename):\n sigma_per_layer = np.load(sigma_filename)\n else:\n # Search for suitable kernel scale per layer.\n # `sigma_per_layer` should be a numpy array of size `(data_te.shape[0], n_layers)`\n print(\"Setting the kernel scale values for the test fold data.\")\n sigma_per_layer = knn_attack.set_kernel_scale(\n layer_embeddings_train, layer_embeddings_test,\n metric=args.dist_metric, n_neighbors=n_neighbors, n_jobs=args.n_jobs\n )\n np.save(sigma_filename, sigma_per_layer)\n\n del test_fold_loader, layer_embeddings_train, layer_embeddings_test\n # numpy array to torch tensor\n sigma_per_layer = torch.from_numpy(sigma_per_layer).to(device)\n # Index of samples from each class in `labels_tr_sample`\n labels_uniq = np.unique(labels_tr_sample)\n indices_per_class = {c: np.where(labels_tr_sample == c)[0] for c in labels_uniq}\n\n # `layer_embeddings_per_class_train` contains the layer wise embeddings corresponding to each class\n # from the `train_fold_loader`. It is a dict mapping each class to a list of torch tensors per layer\n layer_embeddings_per_class_train = knn_attack.extract_layer_embeddings(\n model, device, train_fold_loader, indices_per_class, split_by_class=True\n )\n if max_num_adver > 0:\n max_num_adver_fold = min(max_num_adver, n_test)\n else:\n max_num_adver_fold = n_test\n\n print(\"Creating adversarial samples from the test fold. Maximum number of adversarial samples: {:d}\".\n format(max_num_adver_fold))\n # Recreating the test fold loader with `custom = True` in order to get the sample indices.\n test_fold_loader = convert_to_loader(data_te, labels_te, batch_size=args.batch_size,\n custom=True, shuffle=True)\n data_adver = []\n labels_adver = []\n data_clean = []\n labels_clean = []\n norm_perturb = []\n is_correct = []\n is_adver = []\n n_batches = len(test_fold_loader)\n n_adver_curr = 0\n for batch_idx, (data_temp, labels_temp, index_temp) in enumerate(test_fold_loader, start=1):\n print(\"Batch {:d}/{:d}\".format(batch_idx, n_batches))\n index_temp = index_temp.cpu().numpy()\n # data_batch_excl = np.delete(data_te, index_temp, axis=0)\n # labels_batch_excl = np.delete(labels_te, index_temp, axis=0)\n # main attack function\n labels_pred_temp = labels_pred_dnn_test[index_temp]\n data_adver_batch, labels_adver_batch, norm_perturb_batch, is_correct_batch, is_adver_batch = \\\n knn_attack.attack(\n model, device, data_temp.to(device), labels_temp, labels_pred_temp,\n layer_embeddings_per_class_train, labels_uniq, sigma_per_layer[index_temp, :],\n model_detector=models_detec[i - 1], untargeted=args.untargeted,\n dist_metric=args.dist_metric, fast_mode=True, verbose=True\n )\n # all returned outputs are numpy arrays\n # accumulate results from this batch\n data_adver.append(data_adver_batch)\n labels_adver.append(labels_adver_batch)\n data_clean.append(data_temp.detach().cpu().numpy())\n labels_clean.append(labels_temp.detach().cpu().numpy())\n norm_perturb.append(norm_perturb_batch)\n is_correct.append(is_correct_batch)\n is_adver.append(is_adver_batch)\n if not args.skip_save_batches:\n # combine data from the batches so far and save them to numpy files\n _ = combine_and_save(adv_save_path, data_adver, labels_adver, data_clean, labels_clean,\n norm_perturb, is_correct, is_adver, labels_te)\n print(\"Saved data up to batch {:d}\".format(batch_idx))\n\n n_adver_curr += is_adver_batch[is_adver_batch].shape[0]\n if n_adver_curr >= max_num_adver_fold:\n print(\"Found {:d} adversarial samples from {:d} data batches\".format(n_adver_curr, batch_idx))\n break\n\n del test_fold_loader\n # combine data from the batches and save them to numpy files\n data_adver, labels_adver, data_clean, labels_clean, norm_perturb, is_correct, is_adver = \\\n combine_and_save(adv_save_path, data_adver, labels_adver, data_clean, labels_clean, norm_perturb,\n is_correct, is_adver, labels_te)\n\n # Calculate accuracy of the DNN and the detection methods on adversarial inputs\n data_loader = convert_to_loader(data_adver, labels_clean, batch_size=args.batch_size)\n layer_embeddings, _, labels_pred_dnn, _ = extract_layer_embeddings_numpy(\n model, device, data_loader, method='proposed'\n )\n del data_loader\n accu_dnn, accu_propo, accu_dknn = helper_accuracy(\n layer_embeddings, labels_pred_dnn, labels_clean, i - 1\n )\n n_adver = is_adver[is_adver].shape[0]\n print(\"\\nTest fold {:d}: #samples = {:d}, #adversarial samples = {:d}, avg. perturbation norm = {:.6f}\".\n format(i, n_test, n_adver, np.mean(norm_perturb[is_adver])))\n print(\"Accuracy on clean and adversarial data from test fold {:d}:\".format(i))\n print(\"method\\t{}\\t{}\".format('accu. clean', 'accu. adver'))\n print(\"{}\\t{:.4f}\\t{:.4f}\".format('DNN', accu_clean_dnn, accu_dnn))\n print(\"{}\\t{:.4f}\\t{:.4f}\".format('proposed', accu_clean_propo, accu_propo))\n print(\"{}\\t{:.4f}\\t{:.4f}\".format('dknn', accu_clean_dknn, accu_dknn))\n t_del = (time.time() - t_init) / 3600.\n print(\"\\nTime taken for fold {:d}: {:.2f} hours\".format(i, t_del))\n else:\n print(\"generated original data split for fold : \", i)\n\n i = i + 1\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.unique", "torch.utils.data.DataLoader", "torch.from_numpy", "sklearn.model_selection.StratifiedKFold", "numpy.save", "numpy.concatenate", "numpy.ceil", "numpy.mean", "torch.cuda.is_available", "torch.device", "numpy.load", "sklearn.model_selection.StratifiedShuffleSplit", "numpy.where" ] ]
sadimanna/project_euler
[ "fe7907917bb59d186f84f0942629d108fa2a13da" ]
[ "p007.py" ]
[ "import numpy as np\r\nimport time\r\n\r\nif __name__ == '__main__':\r\n\tprimelist = np.array([2])\r\n\tpcount = 1\r\n\tpnum = 1\r\n\tstime = time.time()\r\n\twhile pcount!=10001:\r\n\t\tprimefound = 0\r\n\t\tpnum+=2\r\n\t\tfor i in range(pcount):\r\n\t\t\tif pnum%primelist[i]==0:\r\n\t\t\t\tbreak\r\n\t\t\telif primelist[i]>=np.sqrt(pnum):\r\n\t\t\t\tprimelist = np.append(primelist,pnum)\r\n\t\t\t\tprimefound = 1\r\n\t\t\t\tbreak\r\n\t\tif primefound == 1:\r\n\t\t\tpcount+=1\r\n\tprint(\"10001st prime number is %s\"%(pnum))\r\n\tprint(\"Time taken :: %.3f seconds\"%(time.time()-stime))" ]
[ [ "numpy.append", "numpy.array", "numpy.sqrt" ] ]