query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Runs compute_c_max with isotope H and checks that the correct value is produced
def test_compute_c_max_h(): # build T = np.array([600, 500]) E_ion = np.array([20, 10]) E_atom = np.array([30, 40]) angles_ion = np.array([60, 60]) angles_atom = np.array([60, 60]) ion_flux = np.array([1e21, 1e20]) atom_flux = np.array([2e21, 2e20]) # run c_max = divHretention.compute_c_max( T, E_ion, E_atom, angles_ion, angles_atom, ion_flux, atom_flux, full_export=False, isotope="H") # test D_0_W = 1.9e-7 E_D_W = 0.2 k_B = 8.617e-5 D = D_0_W*np.exp(-E_D_W/k_B/T) # implantation ranges implantation_range_ions = [ float(divHretention.implantation_range(energy, angle)) for energy, angle in zip(E_ion, angles_ion)] implantation_range_atoms = [ float(divHretention.implantation_range(energy, angle)) for energy, angle in zip(E_atom, angles_atom)] # reflection coefficients reflection_coeff_ions = [ float(divHretention.reflection_coeff(energy, angle)) for energy, angle in zip(E_ion, angles_ion)] reflection_coeff_atoms = [ float(divHretention.reflection_coeff(energy, angle)) for energy, angle in zip(E_atom, angles_atom)] reflection_coeff_ions = np.array(reflection_coeff_ions) reflection_coeff_atoms = np.array(reflection_coeff_atoms) c_max_ions = (1 - reflection_coeff_ions) * \ ion_flux*implantation_range_ions/D c_max_atoms = (1 - reflection_coeff_atoms) * \ atom_flux*implantation_range_atoms/D c_max_expected = c_max_ions + c_max_atoms assert c_max.all() == c_max_expected.all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_compute_c_max_output():\n # build\n T = np.array([600, 500])\n E_ion = np.array([20, 10])\n E_atom = np.array([30, 40])\n angles_ion = np.array([60, 60])\n angles_atom = np.array([60, 60])\n ion_flux = np.array([1e21, 1e20])\n atom_flux = np.array([2e21, 2e20])\n\n # run\n output = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=True)\n\n # test\n assert len(output) == 3\n\n # run\n output = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=False)\n\n # test\n assert len(output) == 2", "def test_compute_c_max_D():\n # build\n T = np.array([600, 500])\n E_ion = np.array([20, 10])\n E_atom = np.array([30, 40])\n angles_ion = np.array([60, 60])\n angles_atom = np.array([60, 60])\n ion_flux = np.array([1e21, 1e20])\n atom_flux = np.array([2e21, 2e20])\n\n # run\n c_max = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=False, isotope=\"D\")\n\n # test\n D_0_W = 1.9e-7\n E_D_W = 0.2\n k_B = 8.617e-5\n D = D_0_W*np.exp(-E_D_W/k_B/T)\n D *= 1/2**0.5\n\n # implantation ranges\n implantation_range_ions = [\n float(divHretention.implantation_range(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n implantation_range_atoms = [\n float(divHretention.implantation_range(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n # reflection coefficients\n reflection_coeff_ions = [\n float(divHretention.reflection_coeff(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n reflection_coeff_atoms = [\n float(divHretention.reflection_coeff(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n reflection_coeff_ions = np.array(reflection_coeff_ions)\n reflection_coeff_atoms = np.array(reflection_coeff_atoms)\n\n c_max_ions = (1 - reflection_coeff_ions) * \\\n ion_flux*implantation_range_ions/D\n c_max_atoms = (1 - reflection_coeff_atoms) * \\\n atom_flux*implantation_range_atoms/D\n c_max_expected = c_max_ions + c_max_atoms\n\n assert c_max.all() == c_max_expected.all()", "def test_compute_c_max_D():\n # build\n T = np.array([600, 500])\n E_ion = np.array([20, 10])\n E_atom = np.array([30, 40])\n angles_ion = np.array([60, 60])\n angles_atom = np.array([60, 60])\n ion_flux = np.array([1e21, 1e20])\n atom_flux = np.array([2e21, 2e20])\n\n # run\n c_max = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=False, isotope=\"T\")\n\n # test\n D_0_W = 1.9e-7\n E_D_W = 0.2\n k_B = 8.617e-5\n D = D_0_W*np.exp(-E_D_W/k_B/T)\n D *= 1/3**0.5\n\n # implantation ranges\n implantation_range_ions = [\n float(divHretention.implantation_range(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n implantation_range_atoms = [\n float(divHretention.implantation_range(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n # reflection coefficients\n reflection_coeff_ions = [\n float(divHretention.reflection_coeff(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n reflection_coeff_atoms = [\n float(divHretention.reflection_coeff(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n reflection_coeff_ions = np.array(reflection_coeff_ions)\n reflection_coeff_atoms = np.array(reflection_coeff_atoms)\n\n c_max_ions = (1 - reflection_coeff_ions) * \\\n ion_flux*implantation_range_ions/D\n c_max_atoms = (1 - reflection_coeff_atoms) * \\\n atom_flux*implantation_range_atoms/D\n c_max_expected = c_max_ions + c_max_atoms\n\n assert c_max.all() == c_max_expected.all()\n assert c_max.all() == c_max_expected.all()", "def test_cmax(self):\n cbca_obj = aggregation.AbstractAggregation(**{'aggregation_method': 'cbca',\n 'cbca_intensity': 5., 'cbca_distance': 3})\n\n cv_aggreg = cbca_obj.cost_volume_aggregation(self.ref, self.sec, self.cv)\n\n # Check if the calculated maximal cost is equal to the ground truth\n assert (np.nanmax(cv_aggreg['cost_volume'].data) <= (24 * 18))", "def compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=False, isotope=\"H\"):\n # Diffusion coefficient Fernandez et al Acta Materialia (2015)\n # https://doi.org/10.1016/j.actamat.2015.04.052\n D_0_W = 1.9e-7\n E_D_W = 0.2\n k_B = 8.617e-5\n D = D_0_W*np.exp(-E_D_W/k_B/T)\n if isotope == \"D\":\n D *= 1/2**0.5\n elif isotope == \"T\":\n D *= 1/3**0.5\n # implantation ranges\n implantation_range_ions = [\n float(implantation_range(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n implantation_range_atoms = [\n float(implantation_range(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n # reflection coefficients\n reflection_coeff_ions = [\n float(reflection_coeff(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n reflection_coeff_atoms = [\n float(reflection_coeff(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n reflection_coeff_ions = np.array(reflection_coeff_ions)\n reflection_coeff_atoms = np.array(reflection_coeff_atoms)\n\n implantation_range_ions = np.array(implantation_range_ions)\n implantation_range_atoms = np.array(implantation_range_atoms)\n\n # compute c_max\n c_max_ions = (1 - reflection_coeff_ions) * \\\n ion_flux*implantation_range_ions/D\n c_max_atoms = (1 - reflection_coeff_atoms) * \\\n atom_flux*implantation_range_atoms/D\n c_max = c_max_ions + c_max_atoms\n\n if full_export:\n return c_max, c_max_ions, c_max_atoms\n else:\n return c_max", "def test_maxcut_output(self, graph, cost_hamiltonian, mixer_hamiltonian):\n\n cost_h, mixer_h = qaoa.maxcut(graph)\n\n assert decompose_hamiltonian(cost_hamiltonian) == decompose_hamiltonian(cost_h)\n assert decompose_hamiltonian(mixer_hamiltonian) == decompose_hamiltonian(mixer_h)", "def cmax(self):\n return self[\"cmax\"]", "def cmax(self):\n return self['cmax']", "def _maxvar_vcm_calc(ifg_paths, params, preread_ifgs):\n log.info('Calculating the temporal variance-covariance matrix')\n process_indices = mpiops.array_split(range(len(ifg_paths)))\n\n def _get_r_dist(ifg_path):\n \"\"\"\n Get RDIst class object\n \"\"\"\n ifg = Ifg(ifg_path)\n ifg.open()\n r_dist = vcm_module.RDist(ifg)()\n ifg.close()\n return r_dist\n\n r_dist = mpiops.run_once(_get_r_dist, ifg_paths[0])\n prcs_ifgs = mpiops.array_split(ifg_paths)\n process_maxvar = []\n for n, i in enumerate(prcs_ifgs):\n log.debug('Calculating maxvar for {} of process ifgs {} of total {}'.format(n+1, len(prcs_ifgs), len(ifg_paths)))\n process_maxvar.append(vcm_module.cvd(i, params, r_dist, calc_alpha=True, write_vals=True, save_acg=True)[0])\n if mpiops.rank == MASTER_PROCESS:\n maxvar = np.empty(len(ifg_paths), dtype=np.float64)\n maxvar[process_indices] = process_maxvar\n for i in range(1, mpiops.size): # pragma: no cover\n rank_indices = mpiops.array_split(range(len(ifg_paths)), i)\n this_process_ref_phs = np.empty(len(rank_indices), dtype=np.float64)\n mpiops.comm.Recv(this_process_ref_phs, source=i, tag=i)\n maxvar[rank_indices] = this_process_ref_phs\n else: # pragma: no cover\n maxvar = np.empty(len(ifg_paths), dtype=np.float64)\n mpiops.comm.Send(np.array(process_maxvar, dtype=np.float64), dest=MASTER_PROCESS, tag=mpiops.rank)\n\n mpiops.comm.barrier()\n maxvar = mpiops.comm.bcast(maxvar, root=0)\n vcmt = mpiops.run_once(vcm_module.get_vcmt, preread_ifgs, maxvar)\n log.debug(\"Finished maxvar and vcm calc!\")\n return maxvar, vcmt", "def extract_max_value(h: np.ndarray):\n return np.argmax(h, axis=1)", "def max(self):\n maxs = self.client.map(_call_max, self.vecDask, pure=False)\n max_val = - np.inf\n for future, result in daskD.as_completed(maxs, with_results=True):\n if result > max_val:\n max_val = result\n return max_val", "def calcBlockMaxes(self):\n # restrict to fuel\n for k in self.p.paramDefs.inCategory(\"block-max\").names:\n try:\n maxVal = self.getMaxBlockParam(k.replace(\"max\", \"\"), Flags.FUEL)\n if maxVal != 0.0:\n self.p[k] = maxVal\n except KeyError:\n continue\n\n # add maxes based on pin-level max if it exists, block level max otherwise.\n # may want to use percentBuMax for pin-detailed cases.\n self.p.maxBuF = max(\n (\n a.getMaxParam(\"percentBu\")\n for a in self.getAssemblies(Flags.FEED | Flags.FUEL)\n ),\n default=0.0,\n )\n self.p.maxBuI = max(\n (\n a.getMaxParam(\"percentBu\")\n for a in self.getAssemblies(\n [\n Flags.IGNITER | Flags.FUEL,\n Flags.DRIVER | Flags.FUEL,\n Flags.STARTER | Flags.FUEL,\n ]\n )\n ),\n default=0.0,\n )", "def compute_maximisation( self, X, Z, O ):\n\n raise NotImplementedError", "def hmax(f, h=1, Bc=None):\n\n if Bc is None: Bc = secross()\n g = subm(f,h)\n y = infrec(g,f,Bc);\n return y", "def density_maxima(self, samplesize=5, thresh_mod=0):\r\n filtered = ndimage.maximum_filter(self.data, size=(samplesize, samplesize, samplesize), mode=\"wrap\")\r\n\r\n threshold = filtered.mean() + thresh_mod\r\n print(f\"actual threhold value: {threshold:.2f}\")\r\n labels, num_labels = ndimage.label(filtered > threshold)\r\n\r\n # Coordinates of maxima\r\n pos = np.array(ndimage.measurements.center_of_mass(np.asarray(self.data), labels=labels,\r\n index=np.arange(1, num_labels + 1)))\r\n\r\n # Values of maxima\r\n val = np.array(ndimage.measurements.maximum(self.data, labels=labels, index=np.arange(1, num_labels + 1)))\r\n\r\n pos[:, 0] *= iCube.x[0]\r\n pos[:, 1] *= iCube.y[1]\r\n pos[:, 2] *= iCube.z[2]\r\n\r\n return pos, val", "def get_max(criterion, max_card, elements):\n if max_card <= 0:\n raise ValueError(\n \"max_card: \" + str(max_card) + \"\\n\" +\n \"The maximum cardinal cannot be null nor negative!\"\n )\n\n maxima = []\n currentMax = 0\n for e in elements:\n if 0 < e.cardinal <= max_card:\n if len(maxima) == 0:\n currentMax = criterion(e)\n maxima.append((e, currentMax))\n else:\n newCandidate = criterion(e)\n if newCandidate == currentMax:\n maxima.append((e, currentMax))\n elif newCandidate > currentMax:\n maxima = []\n currentMax = newCandidate\n maxima.append((e, currentMax))\n return maxima", "def test_max_clique_output(self, graph, constrained, cost_hamiltonian, mixer_hamiltonian):\n\n cost_h, mixer_h = qaoa.max_clique(graph, constrained=constrained)\n\n assert decompose_hamiltonian(cost_hamiltonian) == decompose_hamiltonian(cost_h)\n assert decompose_hamiltonian(mixer_hamiltonian) == decompose_hamiltonian(mixer_h)", "def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )", "def grid_channelwise_max(grid_):\n return np.max(np.max(np.max(grid_, axis=0), axis=0), axis=0)", "def standardComposition_Max(self):\n temp = np.fmax(self.rulesList[0], self.rulesList[1])\n for r in self.rulesList[2:]:\n temp = np.fmax(temp, r)\n\n self.fuzzy_output = temp", "def fuction_call(chest):\n\n for i in chest:\n max_i = maximum(chest,i)\n if max_i >= 2:\n print(\"The maximum size of a set Matyoshka Dolls with outermost doll\",i,\"is\",max_i)", "def get_max(x, y, z):\n if isinstance(self.results_array[x][y][z], tuple):\n num_zeros = self.tup_max_length - len(self.results_array[x][y][z])\n if num_zeros != 0:\n print('Number of zeros: ', num_zeros)\n hist_arr = np.array(self.results_array[x][y][z])\n maxes.append(max(hist_arr))", "def get_max(bij, exploration, bij_bool):\n\n#\tbij[bij_bool] = -sys.maxint - 1\n\n\tm = bij.argmax()\n\tc = np.unravel_index(m, bij.shape)\n\t#c = np.unravel_index(bij.argmax(), bij.shape)\n\n############################## A MODIFIER EVENTUELLEMENT #################\n#\tb = bij[bij_bool]\n#\tm = b.argmax()\n#\tind = np.unravel_index(m, b.shape)\n#\tc = np.where(bij == b[ind])\n#\tc = (c[0][0], c[1][0])\n#\tprint('mMAXx', bij[c])\n\treturn (c)", "def v_cmax(self, tl, ared):\n\t return ared*self.VCMAX0*exp(self.HAV/(R*self.TO)*(1. - self.TO/tl))/(1. + exp((self.SVC*tl - self.HDV)/(R*tl)))", "def findmax(h5file, pcoord_dim, fi, li):\n max_values = []\n for i in range(fi,li+1):\n i = str(i)\n iteration = \"iter_\" + str(numpy.char.zfill(i,8))\n pc = h5file['iterations'][iteration]['pcoord']\n maxv = numpy.max(pc[:,-1,pcoord_dim-1])\n max_values.append(maxv)\n maxmax = numpy.max(max_values)\n nw = numpy.where(max_values>(maxmax-maxmax*0.0001))\n iter_num = str((nw[0]+1)[0])\n \n wheretolook = \"iter_\" + str(numpy.char.zfill(iter_num,8))\n max_iter = h5file['iterations'][wheretolook]['pcoord'][:,-1,pcoord_dim-1]\n segmax = numpy.max(max_iter)\n nw2 = numpy.where(max_iter>(segmax-segmax*0.0001))\n seg_num = (nw2[0])[0]\n print (\"Maximum pcoord value for dimension\",pcoord_dim,\"is:\",segmax) \n print (\"It is segment:\",seg_num,\"of iteration:\",iter_num)", "def compute(self, node, input_vals):\r\n #start = time.time()\r\n\r\n #assert len(input_vals) == 1\r\n strides = node.const_attr[1]\r\n ksize = node.const_attr[0]\r\n ish = list(input_vals[0].shape)\r\n input = input_vals[0]\r\n output = np.zeros([ish[0],(ish[1]-ksize[1])//strides[1]+1,(ish[2]-ksize[2])//strides[2]+1,ish[3]])\r\n osh = output.shape\r\n #print(osh)\r\n for i in range(osh[1]):\r\n for j in range(osh[2]):\r\n output[:,i,j,:] = np.amax(input[:,i*strides[1]:(i+1)*strides[1],j*strides[1]:(j+1)*strides[1],:],axis=(1,2))\r\n #end = time.time() \r\n #print(\"max_pool\") \r\n #print(end - start) \r\n return output\r\n \r\n #assert False\r", "def getImageMax(self):\n fname = '%s::%s'%(self.__class__.__name__, self.getImageMax.__name__)\n if (not self.lhaveImage):\n print(\"%s: DSM image not yet computed\"%fname)\n return None, None\n maxIndex = c_int(1)\n maxValue = c_float(1)\n ierr = c_int(1)\n self.lib.xcloc_getImageMax(maxIndex, maxValue, ierr)\n if (ierr.value != 0):\n print(\"%s: Failed to get max value and index of DSM image\"%fname)\n return None, None\n imax = maxIndex.value - 1 # Fortran to C\n vmax = maxValue.value\n return imax, vmax", "def compute_optimal_block_maximum(block_counts) -> int:\n q1, q3 = compute_quartiles(block_counts)\n iqr = q3 - q1\n high_threshold = q3 + 1.5 * iqr\n return high_threshold", "def max3(stdin):\n # return max(map(float, stdin.split()))\n return float(run(\"./max3\", [], stdin)[1])", "def max(self):\n if 0 in type(self).flatten_shape(self.shape):\n raise ValueError(\"zero-size array has no maximum\")\n if self.isscalar():\n return self.defval\n # If not all blocks are set, then the tensor has an element of defval\n # somewhere.\n m = -np.inf if self.is_full() else self.defval\n for v in self.sects.values():\n try:\n m = max(m, np.max(v))\n except ValueError:\n # This block was zero-size, and has no elements.\n pass\n return m" ]
[ "0.7804228", "0.7230941", "0.71904975", "0.6807927", "0.64462936", "0.6286616", "0.6217312", "0.6186257", "0.6168397", "0.6108435", "0.61060095", "0.6064953", "0.6030965", "0.5990423", "0.59639454", "0.5931527", "0.59088767", "0.5885161", "0.58844084", "0.5855641", "0.5847799", "0.58460695", "0.5785212", "0.5771255", "0.5760888", "0.5759304", "0.57516015", "0.5746994", "0.57459337", "0.5712045" ]
0.7624501
1
Runs compute_c_max with isotope D and checks that the correct value is produced
def test_compute_c_max_D(): # build T = np.array([600, 500]) E_ion = np.array([20, 10]) E_atom = np.array([30, 40]) angles_ion = np.array([60, 60]) angles_atom = np.array([60, 60]) ion_flux = np.array([1e21, 1e20]) atom_flux = np.array([2e21, 2e20]) # run c_max = divHretention.compute_c_max( T, E_ion, E_atom, angles_ion, angles_atom, ion_flux, atom_flux, full_export=False, isotope="D") # test D_0_W = 1.9e-7 E_D_W = 0.2 k_B = 8.617e-5 D = D_0_W*np.exp(-E_D_W/k_B/T) D *= 1/2**0.5 # implantation ranges implantation_range_ions = [ float(divHretention.implantation_range(energy, angle)) for energy, angle in zip(E_ion, angles_ion)] implantation_range_atoms = [ float(divHretention.implantation_range(energy, angle)) for energy, angle in zip(E_atom, angles_atom)] # reflection coefficients reflection_coeff_ions = [ float(divHretention.reflection_coeff(energy, angle)) for energy, angle in zip(E_ion, angles_ion)] reflection_coeff_atoms = [ float(divHretention.reflection_coeff(energy, angle)) for energy, angle in zip(E_atom, angles_atom)] reflection_coeff_ions = np.array(reflection_coeff_ions) reflection_coeff_atoms = np.array(reflection_coeff_atoms) c_max_ions = (1 - reflection_coeff_ions) * \ ion_flux*implantation_range_ions/D c_max_atoms = (1 - reflection_coeff_atoms) * \ atom_flux*implantation_range_atoms/D c_max_expected = c_max_ions + c_max_atoms assert c_max.all() == c_max_expected.all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_compute_c_max_output():\n # build\n T = np.array([600, 500])\n E_ion = np.array([20, 10])\n E_atom = np.array([30, 40])\n angles_ion = np.array([60, 60])\n angles_atom = np.array([60, 60])\n ion_flux = np.array([1e21, 1e20])\n atom_flux = np.array([2e21, 2e20])\n\n # run\n output = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=True)\n\n # test\n assert len(output) == 3\n\n # run\n output = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=False)\n\n # test\n assert len(output) == 2", "def test_compute_c_max_D():\n # build\n T = np.array([600, 500])\n E_ion = np.array([20, 10])\n E_atom = np.array([30, 40])\n angles_ion = np.array([60, 60])\n angles_atom = np.array([60, 60])\n ion_flux = np.array([1e21, 1e20])\n atom_flux = np.array([2e21, 2e20])\n\n # run\n c_max = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=False, isotope=\"T\")\n\n # test\n D_0_W = 1.9e-7\n E_D_W = 0.2\n k_B = 8.617e-5\n D = D_0_W*np.exp(-E_D_W/k_B/T)\n D *= 1/3**0.5\n\n # implantation ranges\n implantation_range_ions = [\n float(divHretention.implantation_range(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n implantation_range_atoms = [\n float(divHretention.implantation_range(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n # reflection coefficients\n reflection_coeff_ions = [\n float(divHretention.reflection_coeff(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n reflection_coeff_atoms = [\n float(divHretention.reflection_coeff(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n reflection_coeff_ions = np.array(reflection_coeff_ions)\n reflection_coeff_atoms = np.array(reflection_coeff_atoms)\n\n c_max_ions = (1 - reflection_coeff_ions) * \\\n ion_flux*implantation_range_ions/D\n c_max_atoms = (1 - reflection_coeff_atoms) * \\\n atom_flux*implantation_range_atoms/D\n c_max_expected = c_max_ions + c_max_atoms\n\n assert c_max.all() == c_max_expected.all()\n assert c_max.all() == c_max_expected.all()", "def test_compute_c_max_h():\n # build\n T = np.array([600, 500])\n E_ion = np.array([20, 10])\n E_atom = np.array([30, 40])\n angles_ion = np.array([60, 60])\n angles_atom = np.array([60, 60])\n ion_flux = np.array([1e21, 1e20])\n atom_flux = np.array([2e21, 2e20])\n\n # run\n c_max = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=False, isotope=\"H\")\n\n # test\n D_0_W = 1.9e-7\n E_D_W = 0.2\n k_B = 8.617e-5\n D = D_0_W*np.exp(-E_D_W/k_B/T)\n\n # implantation ranges\n implantation_range_ions = [\n float(divHretention.implantation_range(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n implantation_range_atoms = [\n float(divHretention.implantation_range(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n # reflection coefficients\n reflection_coeff_ions = [\n float(divHretention.reflection_coeff(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n reflection_coeff_atoms = [\n float(divHretention.reflection_coeff(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n reflection_coeff_ions = np.array(reflection_coeff_ions)\n reflection_coeff_atoms = np.array(reflection_coeff_atoms)\n\n c_max_ions = (1 - reflection_coeff_ions) * \\\n ion_flux*implantation_range_ions/D\n c_max_atoms = (1 - reflection_coeff_atoms) * \\\n atom_flux*implantation_range_atoms/D\n c_max_expected = c_max_ions + c_max_atoms\n\n assert c_max.all() == c_max_expected.all()", "def test_cmax(self):\n cbca_obj = aggregation.AbstractAggregation(**{'aggregation_method': 'cbca',\n 'cbca_intensity': 5., 'cbca_distance': 3})\n\n cv_aggreg = cbca_obj.cost_volume_aggregation(self.ref, self.sec, self.cv)\n\n # Check if the calculated maximal cost is equal to the ground truth\n assert (np.nanmax(cv_aggreg['cost_volume'].data) <= (24 * 18))", "def compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=False, isotope=\"H\"):\n # Diffusion coefficient Fernandez et al Acta Materialia (2015)\n # https://doi.org/10.1016/j.actamat.2015.04.052\n D_0_W = 1.9e-7\n E_D_W = 0.2\n k_B = 8.617e-5\n D = D_0_W*np.exp(-E_D_W/k_B/T)\n if isotope == \"D\":\n D *= 1/2**0.5\n elif isotope == \"T\":\n D *= 1/3**0.5\n # implantation ranges\n implantation_range_ions = [\n float(implantation_range(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n implantation_range_atoms = [\n float(implantation_range(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n # reflection coefficients\n reflection_coeff_ions = [\n float(reflection_coeff(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n reflection_coeff_atoms = [\n float(reflection_coeff(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n reflection_coeff_ions = np.array(reflection_coeff_ions)\n reflection_coeff_atoms = np.array(reflection_coeff_atoms)\n\n implantation_range_ions = np.array(implantation_range_ions)\n implantation_range_atoms = np.array(implantation_range_atoms)\n\n # compute c_max\n c_max_ions = (1 - reflection_coeff_ions) * \\\n ion_flux*implantation_range_ions/D\n c_max_atoms = (1 - reflection_coeff_atoms) * \\\n atom_flux*implantation_range_atoms/D\n c_max = c_max_ions + c_max_atoms\n\n if full_export:\n return c_max, c_max_ions, c_max_atoms\n else:\n return c_max", "def max(self):\n maxs = self.client.map(_call_max, self.vecDask, pure=False)\n max_val = - np.inf\n for future, result in daskD.as_completed(maxs, with_results=True):\n if result > max_val:\n max_val = result\n return max_val", "def cmax(self):\n return self[\"cmax\"]", "def _maxvar_vcm_calc(ifg_paths, params, preread_ifgs):\n log.info('Calculating the temporal variance-covariance matrix')\n process_indices = mpiops.array_split(range(len(ifg_paths)))\n\n def _get_r_dist(ifg_path):\n \"\"\"\n Get RDIst class object\n \"\"\"\n ifg = Ifg(ifg_path)\n ifg.open()\n r_dist = vcm_module.RDist(ifg)()\n ifg.close()\n return r_dist\n\n r_dist = mpiops.run_once(_get_r_dist, ifg_paths[0])\n prcs_ifgs = mpiops.array_split(ifg_paths)\n process_maxvar = []\n for n, i in enumerate(prcs_ifgs):\n log.debug('Calculating maxvar for {} of process ifgs {} of total {}'.format(n+1, len(prcs_ifgs), len(ifg_paths)))\n process_maxvar.append(vcm_module.cvd(i, params, r_dist, calc_alpha=True, write_vals=True, save_acg=True)[0])\n if mpiops.rank == MASTER_PROCESS:\n maxvar = np.empty(len(ifg_paths), dtype=np.float64)\n maxvar[process_indices] = process_maxvar\n for i in range(1, mpiops.size): # pragma: no cover\n rank_indices = mpiops.array_split(range(len(ifg_paths)), i)\n this_process_ref_phs = np.empty(len(rank_indices), dtype=np.float64)\n mpiops.comm.Recv(this_process_ref_phs, source=i, tag=i)\n maxvar[rank_indices] = this_process_ref_phs\n else: # pragma: no cover\n maxvar = np.empty(len(ifg_paths), dtype=np.float64)\n mpiops.comm.Send(np.array(process_maxvar, dtype=np.float64), dest=MASTER_PROCESS, tag=mpiops.rank)\n\n mpiops.comm.barrier()\n maxvar = mpiops.comm.bcast(maxvar, root=0)\n vcmt = mpiops.run_once(vcm_module.get_vcmt, preread_ifgs, maxvar)\n log.debug(\"Finished maxvar and vcm calc!\")\n return maxvar, vcmt", "def cmax(self):\n return self['cmax']", "def density_maxima(self, samplesize=5, thresh_mod=0):\r\n filtered = ndimage.maximum_filter(self.data, size=(samplesize, samplesize, samplesize), mode=\"wrap\")\r\n\r\n threshold = filtered.mean() + thresh_mod\r\n print(f\"actual threhold value: {threshold:.2f}\")\r\n labels, num_labels = ndimage.label(filtered > threshold)\r\n\r\n # Coordinates of maxima\r\n pos = np.array(ndimage.measurements.center_of_mass(np.asarray(self.data), labels=labels,\r\n index=np.arange(1, num_labels + 1)))\r\n\r\n # Values of maxima\r\n val = np.array(ndimage.measurements.maximum(self.data, labels=labels, index=np.arange(1, num_labels + 1)))\r\n\r\n pos[:, 0] *= iCube.x[0]\r\n pos[:, 1] *= iCube.y[1]\r\n pos[:, 2] *= iCube.z[2]\r\n\r\n return pos, val", "def __calc_mmd_maxconc(self,event):\n \n # Use smoothed data\n if self.particle_mode:\n data = np.log10(gaussian_filter(self.par_data,self.smooth,mode='constant'))\n dpdp,tt = np.meshgrid(self.par_diam,self.par_time)\n points = np.concatenate((tt.flatten()[np.newaxis].T,\n dpdp.flatten()[np.newaxis].T,\n data.flatten()[np.newaxis].T),\n axis=1)\n if self.ion_mode:\n data = np.log10(gaussian_filter(self.ion1_data,self.smooth,mode='constant'))\n dpdp,tt = np.meshgrid(self.ion1_diam,self.ion1_time)\n points = np.concatenate((tt.flatten()[np.newaxis].T,\n dpdp.flatten()[np.newaxis].T,\n data.flatten()[np.newaxis].T),\n axis=1)\n\n # Transform polygon perimeter to path\n try:\n banana_perimeter = Path(np.array(list(zip(self.polyx,self.polyy))))\n except ValueError:\n print (\"No polygon found\")\n return\n\n # Eliminate nans and infs from dndlogdp\n points = np.delete(points,np.argwhere((np.isnan(points[:,2]))|(np.isinf(points[:,2]))),axis=0)\n banana_points = points[banana_perimeter.contains_points(points[:,[0,1]]),:]\n\n if len(banana_points)==0:\n print (\"Found no points inside polygon.\")\n return\n \n # Grouping the size distribution data points\n if self.particle_mode:\n pre_sorted_banana_points = [banana_points[banana_points[:,1]==x,:] for x in self.par_diam if x in banana_points[:,1]]\n if self.ion_mode:\n pre_sorted_banana_points = [banana_points[banana_points[:,1]==x,:] for x in self.ion1_diam if x in banana_points[:,1]]\n \n sorted_banana_points = [x[x[:,0].argsort()] for x in pre_sorted_banana_points]\n \n for i in range(0,len(sorted_banana_points)):\n x = sorted_banana_points[i][:,0] - self.mintime\n y = sorted_banana_points[i][:,2]\n a=np.max(y)\n mu=np.mean(x)\n sigma=np.std(x)\n try:\n params,pcov = curve_fit(self.__gaus,x,y,p0=[a,mu,sigma])\n if ((params[1]>=x.max()) | (params[1]<=x.min())):\n print (\"Peak outside range. Skipping %f\" % (sorted_banana_points[i][0,1]))\n else:\n self.mmd_dp = np.append(self.mmd_dp,sorted_banana_points[i][0,1])\n self.mmd_time = np.append(self.mmd_time,params[1] + self.mintime)\n except:\n print (\"Diverges. Skipping %f\" % (sorted_banana_points[i][0,1]))\n\n # Plot the result on ax\n self.mmd_plot.set_data(self.mmd_time,self.mmd_dp)\n plt.draw()", "def get_max(criterion, max_card, elements):\n if max_card <= 0:\n raise ValueError(\n \"max_card: \" + str(max_card) + \"\\n\" +\n \"The maximum cardinal cannot be null nor negative!\"\n )\n\n maxima = []\n currentMax = 0\n for e in elements:\n if 0 < e.cardinal <= max_card:\n if len(maxima) == 0:\n currentMax = criterion(e)\n maxima.append((e, currentMax))\n else:\n newCandidate = criterion(e)\n if newCandidate == currentMax:\n maxima.append((e, currentMax))\n elif newCandidate > currentMax:\n maxima = []\n currentMax = newCandidate\n maxima.append((e, currentMax))\n return maxima", "def getImageMax(self):\n fname = '%s::%s'%(self.__class__.__name__, self.getImageMax.__name__)\n if (not self.lhaveImage):\n print(\"%s: DSM image not yet computed\"%fname)\n return None, None\n maxIndex = c_int(1)\n maxValue = c_float(1)\n ierr = c_int(1)\n self.lib.xcloc_getImageMax(maxIndex, maxValue, ierr)\n if (ierr.value != 0):\n print(\"%s: Failed to get max value and index of DSM image\"%fname)\n return None, None\n imax = maxIndex.value - 1 # Fortran to C\n vmax = maxValue.value\n return imax, vmax", "def get_max(bij, exploration, bij_bool):\n\n#\tbij[bij_bool] = -sys.maxint - 1\n\n\tm = bij.argmax()\n\tc = np.unravel_index(m, bij.shape)\n\t#c = np.unravel_index(bij.argmax(), bij.shape)\n\n############################## A MODIFIER EVENTUELLEMENT #################\n#\tb = bij[bij_bool]\n#\tm = b.argmax()\n#\tind = np.unravel_index(m, b.shape)\n#\tc = np.where(bij == b[ind])\n#\tc = (c[0][0], c[1][0])\n#\tprint('mMAXx', bij[c])\n\treturn (c)", "def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )", "def v_cmax(self, tl, ared):\n\t return ared*self.VCMAX0*exp(self.HAV/(R*self.TO)*(1. - self.TO/tl))/(1. + exp((self.SVC*tl - self.HDV)/(R*tl)))", "def calcBlockMaxes(self):\n # restrict to fuel\n for k in self.p.paramDefs.inCategory(\"block-max\").names:\n try:\n maxVal = self.getMaxBlockParam(k.replace(\"max\", \"\"), Flags.FUEL)\n if maxVal != 0.0:\n self.p[k] = maxVal\n except KeyError:\n continue\n\n # add maxes based on pin-level max if it exists, block level max otherwise.\n # may want to use percentBuMax for pin-detailed cases.\n self.p.maxBuF = max(\n (\n a.getMaxParam(\"percentBu\")\n for a in self.getAssemblies(Flags.FEED | Flags.FUEL)\n ),\n default=0.0,\n )\n self.p.maxBuI = max(\n (\n a.getMaxParam(\"percentBu\")\n for a in self.getAssemblies(\n [\n Flags.IGNITER | Flags.FUEL,\n Flags.DRIVER | Flags.FUEL,\n Flags.STARTER | Flags.FUEL,\n ]\n )\n ),\n default=0.0,\n )", "def standardComposition_Max(self):\n temp = np.fmax(self.rulesList[0], self.rulesList[1])\n for r in self.rulesList[2:]:\n temp = np.fmax(temp, r)\n\n self.fuzzy_output = temp", "def max_score_test(self):\n max_score_tuple = self.results.max_score(molecules=[\"DDSPDLPK\"])\n assert max_score_tuple[0] == 1 # score\n assert max_score_tuple[3].scaling_factor == 100 # intensity\n\n assert self.results.max_score(molecules=[\"_DDSPDLPK_\"]) == [0, None, None, None]\n return", "def Max(data):\n return data.max()", "def test_realistic_max_dose(self):\n\n # min and max dose can only really hope to be within half a bin width\n\n for struct, data in self.test_structs.items():\n dvh = DVH(data[\"doses\"], data[\"volumes\"])\n diff = dvh.max_dose - data[\"monaco_dvh_max_dose\"]\n self.assertLessEqual(abs(diff), 5.)", "def compute_maximisation( self, X, Z, O ):\n\n raise NotImplementedError", "def test_returns_largest_product_within_array(self):\n result = max_product([2,3,-2,4,10,-5,3,2,1])\n self.assertEqual(result, 14400)", "def fuction_call(chest):\n\n for i in chest:\n max_i = maximum(chest,i)\n if max_i >= 2:\n print(\"The maximum size of a set Matyoshka Dolls with outermost doll\",i,\"is\",max_i)", "def three_array_max(array_list: List[np.ndarray]) -> np.ndarray:\n temp = np.maximum(array_list[0], array_list[1])\n all_maxs = np.maximum(temp, array_list[2])\n\n return all_maxs", "def findmax(h5file, pcoord_dim, fi, li):\n max_values = []\n for i in range(fi,li+1):\n i = str(i)\n iteration = \"iter_\" + str(numpy.char.zfill(i,8))\n pc = h5file['iterations'][iteration]['pcoord']\n maxv = numpy.max(pc[:,-1,pcoord_dim-1])\n max_values.append(maxv)\n maxmax = numpy.max(max_values)\n nw = numpy.where(max_values>(maxmax-maxmax*0.0001))\n iter_num = str((nw[0]+1)[0])\n \n wheretolook = \"iter_\" + str(numpy.char.zfill(iter_num,8))\n max_iter = h5file['iterations'][wheretolook]['pcoord'][:,-1,pcoord_dim-1]\n segmax = numpy.max(max_iter)\n nw2 = numpy.where(max_iter>(segmax-segmax*0.0001))\n seg_num = (nw2[0])[0]\n print (\"Maximum pcoord value for dimension\",pcoord_dim,\"is:\",segmax) \n print (\"It is segment:\",seg_num,\"of iteration:\",iter_num)", "def search_for_maximum(self):\n return self.maximise_aquisition(self.expected_improvement)", "def get_maximum_value(dataset):\n d = [int(i) for i in dataset if i.isdigit()]\n op = [o for o in dataset if o in ['*', '-', '+']]\n n = len(d)\n d.insert(0, None)\n op.insert(0, None)\n m = [[0 for x in range(n+1)] for y in range(n+1)]\n M = [[0 for x in range(n+1)] for y in range(n+1)]\n for i in range(1, n+1):\n m[i][i] = d[i]\n M[i][i] = d[i]\n for s in range(1, n):\n for i in range(1, n-s+1):\n j = i + s\n m[i][j], M[i][j] = min_and_max(i, j, op, m, M)\n return M[1][n]", "def calc_max(data: list) -> float:\n acc = data[0]\n for n in data:\n if n > acc:\n acc = n\n return float(acc)", "def find_max(subimage):\r\n\tmax_val_subimage = np.nanmax(subimage)\r\n\treturn max_val_subimage" ]
[ "0.7668764", "0.7432257", "0.70756537", "0.6916304", "0.66039973", "0.633388", "0.6233156", "0.6231558", "0.62197894", "0.6086765", "0.6010786", "0.59821117", "0.595326", "0.5884363", "0.58655345", "0.5859344", "0.5851879", "0.5801701", "0.5789719", "0.5774218", "0.5764772", "0.57606435", "0.5759333", "0.5694705", "0.56853366", "0.56711155", "0.5659055", "0.56563234", "0.5639816", "0.5608533" ]
0.7500105
1
Runs compute_c_max and checks that the correct output
def test_compute_c_max_output(): # build T = np.array([600, 500]) E_ion = np.array([20, 10]) E_atom = np.array([30, 40]) angles_ion = np.array([60, 60]) angles_atom = np.array([60, 60]) ion_flux = np.array([1e21, 1e20]) atom_flux = np.array([2e21, 2e20]) # run output = divHretention.compute_c_max( T, E_ion, E_atom, angles_ion, angles_atom, ion_flux, atom_flux, full_export=True) # test assert len(output) == 3 # run output = divHretention.compute_c_max( T, E_ion, E_atom, angles_ion, angles_atom, ion_flux, atom_flux, full_export=False) # test assert len(output) == 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cmax(self):\n cbca_obj = aggregation.AbstractAggregation(**{'aggregation_method': 'cbca',\n 'cbca_intensity': 5., 'cbca_distance': 3})\n\n cv_aggreg = cbca_obj.cost_volume_aggregation(self.ref, self.sec, self.cv)\n\n # Check if the calculated maximal cost is equal to the ground truth\n assert (np.nanmax(cv_aggreg['cost_volume'].data) <= (24 * 18))", "def test_compute_c_max_D():\n # build\n T = np.array([600, 500])\n E_ion = np.array([20, 10])\n E_atom = np.array([30, 40])\n angles_ion = np.array([60, 60])\n angles_atom = np.array([60, 60])\n ion_flux = np.array([1e21, 1e20])\n atom_flux = np.array([2e21, 2e20])\n\n # run\n c_max = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=False, isotope=\"T\")\n\n # test\n D_0_W = 1.9e-7\n E_D_W = 0.2\n k_B = 8.617e-5\n D = D_0_W*np.exp(-E_D_W/k_B/T)\n D *= 1/3**0.5\n\n # implantation ranges\n implantation_range_ions = [\n float(divHretention.implantation_range(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n implantation_range_atoms = [\n float(divHretention.implantation_range(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n # reflection coefficients\n reflection_coeff_ions = [\n float(divHretention.reflection_coeff(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n reflection_coeff_atoms = [\n float(divHretention.reflection_coeff(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n reflection_coeff_ions = np.array(reflection_coeff_ions)\n reflection_coeff_atoms = np.array(reflection_coeff_atoms)\n\n c_max_ions = (1 - reflection_coeff_ions) * \\\n ion_flux*implantation_range_ions/D\n c_max_atoms = (1 - reflection_coeff_atoms) * \\\n atom_flux*implantation_range_atoms/D\n c_max_expected = c_max_ions + c_max_atoms\n\n assert c_max.all() == c_max_expected.all()\n assert c_max.all() == c_max_expected.all()", "def test_compute_c_max_D():\n # build\n T = np.array([600, 500])\n E_ion = np.array([20, 10])\n E_atom = np.array([30, 40])\n angles_ion = np.array([60, 60])\n angles_atom = np.array([60, 60])\n ion_flux = np.array([1e21, 1e20])\n atom_flux = np.array([2e21, 2e20])\n\n # run\n c_max = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=False, isotope=\"D\")\n\n # test\n D_0_W = 1.9e-7\n E_D_W = 0.2\n k_B = 8.617e-5\n D = D_0_W*np.exp(-E_D_W/k_B/T)\n D *= 1/2**0.5\n\n # implantation ranges\n implantation_range_ions = [\n float(divHretention.implantation_range(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n implantation_range_atoms = [\n float(divHretention.implantation_range(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n # reflection coefficients\n reflection_coeff_ions = [\n float(divHretention.reflection_coeff(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n reflection_coeff_atoms = [\n float(divHretention.reflection_coeff(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n reflection_coeff_ions = np.array(reflection_coeff_ions)\n reflection_coeff_atoms = np.array(reflection_coeff_atoms)\n\n c_max_ions = (1 - reflection_coeff_ions) * \\\n ion_flux*implantation_range_ions/D\n c_max_atoms = (1 - reflection_coeff_atoms) * \\\n atom_flux*implantation_range_atoms/D\n c_max_expected = c_max_ions + c_max_atoms\n\n assert c_max.all() == c_max_expected.all()", "def test_compute_c_max_h():\n # build\n T = np.array([600, 500])\n E_ion = np.array([20, 10])\n E_atom = np.array([30, 40])\n angles_ion = np.array([60, 60])\n angles_atom = np.array([60, 60])\n ion_flux = np.array([1e21, 1e20])\n atom_flux = np.array([2e21, 2e20])\n\n # run\n c_max = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=False, isotope=\"H\")\n\n # test\n D_0_W = 1.9e-7\n E_D_W = 0.2\n k_B = 8.617e-5\n D = D_0_W*np.exp(-E_D_W/k_B/T)\n\n # implantation ranges\n implantation_range_ions = [\n float(divHretention.implantation_range(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n implantation_range_atoms = [\n float(divHretention.implantation_range(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n # reflection coefficients\n reflection_coeff_ions = [\n float(divHretention.reflection_coeff(energy, angle))\n for energy, angle in zip(E_ion, angles_ion)]\n reflection_coeff_atoms = [\n float(divHretention.reflection_coeff(energy, angle))\n for energy, angle in zip(E_atom, angles_atom)]\n\n reflection_coeff_ions = np.array(reflection_coeff_ions)\n reflection_coeff_atoms = np.array(reflection_coeff_atoms)\n\n c_max_ions = (1 - reflection_coeff_ions) * \\\n ion_flux*implantation_range_ions/D\n c_max_atoms = (1 - reflection_coeff_atoms) * \\\n atom_flux*implantation_range_atoms/D\n c_max_expected = c_max_ions + c_max_atoms\n\n assert c_max.all() == c_max_expected.all()", "def compute(self, node, input_vals):\r\n #start = time.time()\r\n\r\n #assert len(input_vals) == 1\r\n strides = node.const_attr[1]\r\n ksize = node.const_attr[0]\r\n ish = list(input_vals[0].shape)\r\n input = input_vals[0]\r\n output = np.zeros([ish[0],(ish[1]-ksize[1])//strides[1]+1,(ish[2]-ksize[2])//strides[2]+1,ish[3]])\r\n osh = output.shape\r\n #print(osh)\r\n for i in range(osh[1]):\r\n for j in range(osh[2]):\r\n output[:,i,j,:] = np.amax(input[:,i*strides[1]:(i+1)*strides[1],j*strides[1]:(j+1)*strides[1],:],axis=(1,2))\r\n #end = time.time() \r\n #print(\"max_pool\") \r\n #print(end - start) \r\n return output\r\n \r\n #assert False\r", "def max(self):\n maxs = self.client.map(_call_max, self.vecDask, pure=False)\n max_val = - np.inf\n for future, result in daskD.as_completed(maxs, with_results=True):\n if result > max_val:\n max_val = result\n return max_val", "def cmax(self):\n return self['cmax']", "def local_max_and_argmax(node):\r\n if node.op == T._max_and_argmax:\r\n if len(node.outputs[1].clients) == 0:\r\n #MaxAndArgmax support variable axis,\r\n #but CAReduce support only constant axis.\r\n if node.inputs[1].data is None:\r\n axis = None\r\n else:\r\n try:\r\n axis = get_scalar_constant_value(node.inputs[1])\r\n except NotScalarConstantError:\r\n return False\r\n\r\n new = CAReduce(scal.maximum, axis)(node.inputs[0])\r\n return [new, None]", "def cmax(self):\n return self[\"cmax\"]", "def _maxvar_vcm_calc(ifg_paths, params, preread_ifgs):\n log.info('Calculating the temporal variance-covariance matrix')\n process_indices = mpiops.array_split(range(len(ifg_paths)))\n\n def _get_r_dist(ifg_path):\n \"\"\"\n Get RDIst class object\n \"\"\"\n ifg = Ifg(ifg_path)\n ifg.open()\n r_dist = vcm_module.RDist(ifg)()\n ifg.close()\n return r_dist\n\n r_dist = mpiops.run_once(_get_r_dist, ifg_paths[0])\n prcs_ifgs = mpiops.array_split(ifg_paths)\n process_maxvar = []\n for n, i in enumerate(prcs_ifgs):\n log.debug('Calculating maxvar for {} of process ifgs {} of total {}'.format(n+1, len(prcs_ifgs), len(ifg_paths)))\n process_maxvar.append(vcm_module.cvd(i, params, r_dist, calc_alpha=True, write_vals=True, save_acg=True)[0])\n if mpiops.rank == MASTER_PROCESS:\n maxvar = np.empty(len(ifg_paths), dtype=np.float64)\n maxvar[process_indices] = process_maxvar\n for i in range(1, mpiops.size): # pragma: no cover\n rank_indices = mpiops.array_split(range(len(ifg_paths)), i)\n this_process_ref_phs = np.empty(len(rank_indices), dtype=np.float64)\n mpiops.comm.Recv(this_process_ref_phs, source=i, tag=i)\n maxvar[rank_indices] = this_process_ref_phs\n else: # pragma: no cover\n maxvar = np.empty(len(ifg_paths), dtype=np.float64)\n mpiops.comm.Send(np.array(process_maxvar, dtype=np.float64), dest=MASTER_PROCESS, tag=mpiops.rank)\n\n mpiops.comm.barrier()\n maxvar = mpiops.comm.bcast(maxvar, root=0)\n vcmt = mpiops.run_once(vcm_module.get_vcmt, preread_ifgs, maxvar)\n log.debug(\"Finished maxvar and vcm calc!\")\n return maxvar, vcmt", "def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )", "def v_cmax(self, tl, ared):\n\t return ared*self.VCMAX0*exp(self.HAV/(R*self.TO)*(1. - self.TO/tl))/(1. + exp((self.SVC*tl - self.HDV)/(R*tl)))", "def test_eval_one_max(self):\n f0 = np.ones((10, 5))\n self.assertTrue(np.isinf(eval_one_max(f0)[0]))", "def test_eval_3(self):\n maxcycles = collatz_eval(201, 210)\n self.assertEqual(maxcycles, 89)", "def test_eval_2(self):\n maxcycles = collatz_eval(100, 200)\n self.assertEqual(maxcycles, 125)", "def compute(self, node, input_vals):\r\n #assert len(input_vals) == 1\r\n if node.const_attr!=None:\r\n return np.argmax(input_vals[0], node.const_attr)\r\n else:\r\n return np.argmax(input_vals[0])", "def calc_confidence(raw_output, labels_list=None):\n #print(\"raw_output:\", raw_output)\n result = raw_output\n probs = []\n for dc in result:\n for key in dc:\n #print(\"dc:\", dc)\n #print(\"key:\", key)\n logits = dc[key]\n logit = logits[0]\n ps = softmax_probabilities(logit)\n max_ps = max(ps)\n index = np.argmax(ps)\n probs.append(max_ps)\n #print('max_ps:', max_ps)\n #print('index:', index)\n #if labels_list:\n # print('tag:', labels_list[index])\n return probs", "def test_eval_6(self):\n maxcycles = collatz_eval(1000, 2001)\n self.assertEqual(maxcycles, 182)", "def max3(stdin):\n # return max(map(float, stdin.split()))\n return float(run(\"./max3\", [], stdin)[1])", "def calculate_ucb_max(self, node):\n pass", "def calc_max(data: list) -> float:\n acc = data[0]\n for n in data:\n if n > acc:\n acc = n\n return float(acc)", "def test_returns_largest_product_within_array(self):\n result = max_product([2,3,-2,4,10,-5,3,2,1])\n self.assertEqual(result, 14400)", "def test_eval_4(self):\n maxcycles = collatz_eval(900, 1000)\n self.assertEqual(maxcycles, 174)", "def _call_max(vecObj):\n res = vecObj.max()\n return res", "def max_val(board):\n v = -math.inf\n if terminal(board):\n return utility(board)\n for action in actions(board):\n v = max(v,min_val(result(board,action)))\n return v", "def computeGoodMax(totalTimes, noerrs):\n # Could allow a small amount of space above the top, but it's annnoying for percentages!\n # return None\n factor = 1.00\n maxReading = factor * max(\n [max([v for v in l if v != None]) for l in list(totalTimes.values())]\n )\n if maxReading == 0:\n maxReading = 0.1\n decade = math.floor(math.log10(maxReading))\n scaledValue = maxReading * 10 ** (-decade)\n # print (\"maxReading: \",maxReading,\"decade: \",decade,\" scaledValue: \",scaledValue)\n for v in (\n 1.0,\n 1.1,\n 1.2,\n 1.25,\n 1.3,\n 1.4,\n 1.5,\n 1.6,\n 1.7,\n 1.75,\n 1.8,\n 1.9,\n 2.0,\n 2.5,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 7.5,\n 8.0,\n 9.0,\n ):\n if scaledValue <= v:\n # print (\"computeGoodMax: \", v * (10**decade))\n return v * (10 ** decade)\n # print (\"computeGoodMax: \", 10**(decade+1))\n return 10 ** (decade + 1)", "def test_eval_5(self):\n maxcycles = collatz_eval(500, 1500)\n self.assertEqual(maxcycles, 182)", "def compute_maximisation( self, X, Z, O ):\n\n raise NotImplementedError", "def test_eval_7(self):\n maxcycles = collatz_eval(1, 1)\n self.assertEqual(maxcycles, 1)", "def test_maxcut_output(self, graph, cost_hamiltonian, mixer_hamiltonian):\n\n cost_h, mixer_h = qaoa.maxcut(graph)\n\n assert decompose_hamiltonian(cost_hamiltonian) == decompose_hamiltonian(cost_h)\n assert decompose_hamiltonian(mixer_hamiltonian) == decompose_hamiltonian(mixer_h)" ]
[ "0.71956396", "0.6706909", "0.6666093", "0.6654545", "0.6497787", "0.64847076", "0.6446621", "0.64427555", "0.6440773", "0.63409734", "0.6231431", "0.6216798", "0.62037325", "0.6184099", "0.6173182", "0.6159964", "0.61472124", "0.6138057", "0.61080885", "0.6080764", "0.60594267", "0.6049107", "0.6040228", "0.60253334", "0.60180223", "0.6015219", "0.5996745", "0.59854984", "0.5964481", "0.59609896" ]
0.81507987
0
Return a dict for our Ansible facts.
def _facts(facts): return {'swift_facts': facts}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def provides_facts():\n return {\n \"domain\": \"The domain name configured at the [edit system \"\n \"domain-name] configuration hierarchy.\",\n \"fqdn\": \"The device's hostname + domain\",\n }", "def facts(self): # pylint: disable=invalid-overridden-method\n return {}", "def get_facts_dict(junos_module):\n if junos_module.conn_type == \"local\":\n dev = junos_module.dev\n # Retrieve all PyEZ-supported facts and copy to a standard dict.\n facts = dict(dev.facts)\n # Add two useful facts that are implement as PyEZ Device attributes.\n facts['re_name'] = dev.re_name\n facts['master_state'] = dev.master\n else:\n facts = junos_module.get_facts()\n # Ansible doesn't allow keys starting with numbers.\n # Replace the '2RE' key with the 'has_2RE' key.\n if '2RE' in facts:\n facts['has_2RE'] = facts['2RE']\n del facts['2RE']\n # The value of the 'version_info' key is a custom junos.version_info\n # object. Convert this value to a dict.\n if 'version_info' in facts and facts['version_info'] is not None:\n facts['version_info'] = dict(facts['version_info'])\n # The values of the ['junos_info'][re_name]['object'] keys are\n # custom junos.version_info objects. Convert all of these to dicts.\n if 'junos_info' in facts and facts['junos_info'] is not None:\n for key in facts['junos_info']:\n facts['junos_info'][key]['object'] = dict(\n facts['junos_info'][key]['object'])\n return facts", "def get_facts(module):\n bmc = build_client(module)\n bios_settings = get_bios_settings(bmc)\n nic_settings = get_nic_settings(bmc)\n controllers, pdisks, vdisks = get_raid_config(bmc)\n jobs = get_jobs(bmc, False)\n unfinished_jobs = get_jobs(bmc, True)\n return {\n \"drac_bios_settings\": bios_settings,\n \"drac_nic_settings\": nic_settings,\n \"drac_jobs\": jobs,\n \"drac_unfinished_jobs\": unfinished_jobs,\n \"drac_raid_controllers\": controllers,\n \"drac_physical_disks\": pdisks,\n \"drac_virtual_disks\": vdisks,\n }", "def gather_facts(self):\n try:\n status = subprocess.check_output([\"mysql\", \"-e\", \"show status\"],\n stderr=subprocess.STDOUT)\n global_vars = subprocess.check_output([\"mysql\", \"-e\",\n \"show global variables\"],\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n message = 'Mysql fact collection failed: \"%s\".' % e.output.strip()\n self.module.fail_json(msg=message)\n else:\n lines = status.split('\\n')\n lines += global_vars.split('\\n')\n facts = dict(l.split('\\t') for l in lines if l)\n self.module.exit_json(\n changed=self.state_change,\n ansible_facts={'mysql_status': facts})", "def facts(env):\n envs = environments()\n check_env(env, envs)\n facts = []\n order_by = '[{\"field\": \"name\", \"order\": \"asc\"}]'\n\n if env == '*':\n facts = get_or_abort(puppetdb.fact_names)\n else:\n query = ExtractOperator()\n query.add_field(str('name'))\n query.add_query(EqualsOperator(\"environment\", env))\n query.add_group_by(str(\"name\"))\n\n for names in get_or_abort(puppetdb._query,\n 'facts',\n query=query,\n order_by=order_by):\n facts.append(names['name'])\n\n facts_dict = collections.defaultdict(list)\n for fact in facts:\n letter = fact[0].upper()\n letter_list = facts_dict[letter]\n letter_list.append(fact)\n facts_dict[letter] = letter_list\n\n sorted_facts_dict = sorted(facts_dict.items())\n return render_template('facts.html',\n facts_dict=sorted_facts_dict,\n facts_len=(sum(map(len, facts_dict.values())) +\n len(facts_dict) * 5),\n envs=envs,\n current_env=env)", "def getAnsibleInfo(host):\n #First do a ping to get more results\n data = runAnsibleCommand(host.getID(), 'ping')\n if data[0]['status'] == 'UNREACHABLE!':\n return None\n #Get the actual data\n return runAnsibleCommand(host.getID(), 'setup')[0]['json']", "def populate_facts(self, connection, ansible_facts, data=None):\n fos = self._fos if self._fos else connection\n vdom = self._module.params['vdom']\n ansible_facts['ansible_network_resources'].pop('system', None)\n facts = {}\n if self._uri.startswith(tuple(FACT_SYSTEM_SUBSETS)):\n resp = fos.monitor('system', self._uri[len('system_'):].replace('_', '/'), vdom=vdom)\n facts.update({self._uri: resp})\n ansible_facts['ansible_network_resources'].update(facts)\n return ansible_facts", "def get_facts(device):\n domain_config = \"\"\"\n <configuration>\n <system>\n <domain-name/>\n </system>\n </configuration>\n \"\"\"\n domain = None\n fqdn = None\n # Try to read the domain-name from the config.\n # This might fail due to lack of permissions.\n try:\n rsp = device.rpc.get_config(\n filter_xml=etree.XML(domain_config),\n options={\n \"database\": \"committed\",\n \"inherit\": \"inherit\",\n \"commit-scripts\": \"apply\",\n },\n )\n domain = rsp.findtext(\".//domain-name\")\n # Ignore if user can't view the configuration.\n except PermissionError:\n pass\n\n # Try to read the domain from the resolv.conf file. This only requires\n # view permissions.\n if domain is None:\n fs = FS(device)\n file_content = fs.cat(\"/etc/resolv.conf\") or fs.cat(\"/var/etc/resolv.conf\")\n words = file_content.split() if file_content is not None else []\n if \"domain\" in words:\n idx = words.index(\"domain\") + 1\n domain = words[idx]\n\n # Set the fqdn\n fqdn = device.facts[\"hostname\"]\n if fqdn is not None and domain is not None:\n fqdn = fqdn + \".\" + domain\n\n return {\n \"domain\": domain,\n \"fqdn\": fqdn,\n }", "def list_inventory(self):\n inventory = {}\n host_vars = {}\n\n for droplet in self.do.droplets:\n for rule in self.group_rules:\n rule.apply(droplet, inventory)\n\n host_vars[droplet[\"ip_address\"]] = {\n \"do_{}\".format(k): v for k, v in droplet.iteritems()\n }\n\n inventory[\"_meta\"] = {\n \"hostvars\": host_vars\n }\n\n return inventory", "def main():\n\n # the AnsibleModule object will be our abstraction for working with Ansible.\n # This includes instantiation, a couple of common attr that will be the\n # args/params passed to the execution, as well as if the module\n # supports check mode\n module = AnsibleModule(\n argument_spec=dict(\n hostvars=dict(type='raw', required=True),\n report_timestamp=dict(type=str, required=False, default=''),\n registered_dict_name=dict(type=str, required=False, default=\"get_sas_host_details_results\"),\n include_hotfix_report=dict(type=bool, required=False, default=True),\n hotfix_url = dict(type=str, required=True),\n hotfix_master_file = dict(type=str, required=True)\n ),\n supports_check_mode=True\n )\n\n # get module parameters\n hostvars = module.params['hostvars']\n report_timestamp = module.params['report_timestamp']\n registered_dict_name = module.params['registered_dict_name']\n include_hotfix_report = module.params['include_hotfix_report']\n hotfix_url = module.params['hotfix_url']\n hotfix_master_file = module.params['hotfix_master_file']\n\n # Starting in Ansible 2.8.1, there is the potential for hostvars\n # to be passed as a byte string, if the dict is too large\n # This will convert the str back to a dict before proceeding\n if isinstance(hostvars, str):\n hostvars = ast.literal_eval(hostvars.decode())\n\n results = dict()\n results['sas_hosts'] = dict()\n results['created'] = report_timestamp\n\n for inventory_hostname, host_vars in hostvars.items():\n\n # set up returnable values\n unreachable = True\n failed = True\n failure_details = dict(\n msg=\"\",\n rc=0,\n stderr=\"\",\n stdout=\"\",\n )\n\n # get the host details dict\n host_details = host_vars.get(registered_dict_name)\n\n # check if the host has the registered dict\n if host_details is not None:\n\n # host details exist, so host was reachable\n unreachable = False\n\n # check if the host failed\n failed = host_details['failed']\n\n # if the module reported a failure, collect details\n if failed:\n failure_details['msg'] = host_details['msg']\n failure_details['rc'] = host_details['rc']\n failure_details['stderr'] = host_details['module_stderr']\n failure_details['stdout'] = host_details['module_stdout']\n else:\n # get module results\n host_results = host_details.get('sas_host_details')\n\n if host_results is not None:\n results['sas_hosts'].update(host_results)\n else:\n failed = True\n\n # if the results dict could not be found, mark the host as unreachable\n if failed or unreachable:\n host_groups = host_vars.get('group_names')\n\n if host_groups is not None and 'sas_all' in host_groups:\n hostname = host_vars.get('ansible_fqdn')\n if hostname is None or hostname == \"\":\n hostname = host_vars.get('ansible_hostname')\n if hostname is None or hostname == \"\":\n hostname = host_vars.get('ansible_host')\n if hostname is None or hostname == \"\":\n hostname = host_vars.get('inventory_hostname')\n if hostname is None or hostname == \"\":\n hostname = inventory_hostname\n\n try:\n host_groups.remove('sas_all')\n host_groups.remove('sas-all')\n except ValueError:\n pass # do nothing\n\n results['sas_hosts'][hostname] = dict(\n _id=hostname.replace('.', '-'),\n _unreachable=unreachable,\n _failed=failed,\n _failure_details=failure_details,\n ansible_host_groups=host_groups\n )\n else:\n pass # this host isn't in sas_all so there's no need to try and report on it\n\n ##################################################################################\n # This section will find all of the hotfixes available and add them to the report.\n ##################################################################################\n\n # There are a few data structures that are complicated enough to warrant a description:\n # fullReport\n # This will hold all of the data in a format condusive to printing it out in the final report. This is how\n # It is structured:\n # fullReport (dict):\n # key=Hot Fix Name, point to another dict:\n # key=\"released\", points to a string containing the release date of the hotfix.\n # key= \"installed\", points to a boolean that will reflect whether any of the packages used by this hotfix are installed on any of the machines in the deployment.\n # key=\"upToDate\", point to a boolean that will reflest whether ALL of the packages used by this hotfix are up to date on ALL of the machines in the deployment.\n # key=\"sasnote\", points to another dict:\n # key=SASNote number, points to the description of the SASNote.\n # key=\"package\", points to another dict:\n # key=\"platform\" , points to another dict:\n # key=OS, points to another dict:\n # key=\"version\", points to the string of the version of the package.\n # key=\"installed\", points to a boolean which reflects whether this package is installed on any machine in the deployment.\n # key=\"upToDate\", points to a boolean which reflects whether this package is up to data on ALL of the machines in the deployment.\n # key=\"os\", points to the fully qualified name of the operating system.\n # key=\"arch\", points to the architecture of the OS (NOTE: This does not exist on Windows systems.)\n # key=\"alreadyUpdated\", points to a boolean, which is used to keep track of whether the upToDate has already been set.\n # key=\"installedVersions\", points to another dict:\n # key=machineName, points to a 2 element list:\n # [0]=string containing package version that is currently installed.\n # [1]=boolean reflecting whether this version is at or above the package delevered in this hotfix.\n #\n ###########################################################################\n #\n # packageToHotFix\n # This will hold a dict of lists:\n # key: package name, pointing to a 2 element list:\n # [0] OS\n # [1] The Hotfix that this package is associated with.\n #\n ###########################################################################\n #\n # environmentReportDict\n # This is inherited from the environment report, but it's probably worth documenting what it looks like.\n # There is a lot of data inerherited, and I'm only describing what is used in this script.\n # environmentReportDict\n # key=hostname (for each machine in the deployment), pointing to another dict:\n # key=\"OS\", pointing to string for the OS family.\n # key=\"arch\", pointing to the string for the architecture of the host.\n # key=\"sas_packages\", pointing to another dict:\n # key=package number, pointing to another dict:\n # key=\"attributes\", pointing to another dict:\n # key=\"version\", pointing to a string of the package versions currently installed on the host.\n ############################################################################\n\n results[\"include_hotfix_report\"] = include_hotfix_report\n if include_hotfix_report:\n # This is the URL from which to pull the hotfix files.\n if hotfix_url[-1:] == '/':\n baseURL = hotfix_url\n else:\n baseURL = hotfix_url + '/'\n # This is the master file that lists which other files should be examined for the actual hotfixes themselves.\n masterFile = hotfix_master_file\n # This is the top level object to store the hotfix report information (see above).\n fullReport = {}\n # This is a dict of package to hotfixes (see above).\n packageToHotfix = {}\n # This boolean will help with debugging.\n debug = False\n\n try:\n # Parse the master file to obtain where the hotfix files are.\n masterFileXML = urllib2.urlopen(baseURL + masterFile)\n\n # Parse the master file and build a list of all files.\n allFilesRoot = ET.fromstring(masterFileXML.read())\n results[\"contact_hotfix_website\"] = True\n except urllib2.URLError :\n results[\"contact_hotfix_website\"] = False\n results[\"master_website\"] = baseURL + masterFile\n if debug:\n print(\"***** Error parsing \" + baseURL + masterFile)\n print(traceback.format_exc())\n print(\"***** No hot fix information obtained. Skipping hot fix report.\\n\\n\")\n\n if results[\"contact_hotfix_website\"]:\n # Loop through the files discoverd in the master file\n if debug:\n print(\"Building hot fix report, based on master file input.\")\n for file_tag in allFilesRoot.findall('File'):\n currentFile = file_tag.get('fileName')\n fileToParse = baseURL + currentFile\n # Retrieve each file.\n # Inside of each file, the lines are keyed by the hot fix id. There are three types of lines, in order:\n # 1) id and release date\n # 2) id, sasnote, sasnotetitle\n # 3) id, OS, package.\n # This script loops through to build a dictionary of dictonaries with the basic structure:\n # ID\n # Release Date\n # SASNotes\n # SASNote and Title\n # ...\n # Packages\n # Package Name, Version, and OS\n try:\n currentFileXML = urllib2.urlopen(fileToParse)\n currentFileRoot = ET.fromstring(currentFileXML.read())\n updateID = \"\"\n for update_tag in currentFileRoot.findall('update'):\n currentUpdate = update_tag.get('id')\n releaseDate = update_tag.get('released')\n # To get the top level Dictionary seeded with the hot fix Name and release date.\n if releaseDate is not None:\n if currentUpdate in fullReport:\n if debug:\n print(\"WARNING! Hot Fix \" + currentUpdate + \" already discovered. Skipping\")\n updateID = \"DUPLICATE-SKIP\"\n else:\n # The SCXXXX hot fixes are special. The package files are only included in\n # Viya_<version>_<platform>_home.xml files. So, the entries in the\n # scheduled_update_<platform>_<shipevent>.xml files can be skipped.\n if currentUpdate.startswith(\"SC\") and currentFile.find(\"scheduled_update_\") < 0:\n continue\n updateID = currentUpdate\n fullReport[updateID] = {}\n fullReport[updateID][\"release_date\"] = releaseDate\n fullReport[updateID][\"installed\"] = False\n fullReport[updateID][\"upToDate\"] = False\n # To get the SASNote information under the hot fix\n else:\n if updateID == \"DUPLICATE-SKIP\":\n continue\n sasNote = update_tag.get('sasnote')\n sasNoteTitle = update_tag.get('sasnoteTitle')\n if sasNote is not None:\n if \"sasnote\" not in fullReport[updateID]:\n fullReport[updateID][\"sasnote\"] = {}\n # This string needs to be encoded because some non-ASCII characters are\n # in some of the titles.\n fullReport[updateID][\"sasnote\"][sasNote] = sasNoteTitle.encode('utf-8')\n # To get the Package information under the hot fix.\n else:\n os = update_tag.get(\"os\")\n fullPackage = update_tag.get(\"package\")\n if fullPackage is not None:\n if \"package\" not in fullReport[updateID]:\n fullReport[updateID][\"package\"] = {}\n\n lastPeriodIndex = fullPackage.rfind(\".\")\n # Format the package information.\n # Windows does not have a dash in the version; Linux does. So, we need to break differently,\n # depending on the OS.\n if os.lower().find(\"windows\") > -1:\n versionStartIndex = fullPackage.rfind(\"-\")\n achitectureStartIndex = -1\n versionEndIndex = lastPeriodIndex\n osFamily = \"Windows\"\n else:\n versionStartIndex = fullPackage.rfind(\"-\", 0, fullPackage.rfind(\"-\"))\n # Linux has architecture in the package. This will be stored in its own key.\n achitectureStartIndex = fullPackage.rfind(\".\", 0, lastPeriodIndex)\n # SLES has the string 'suse' in its package. This will strip it out (as well as an extra .).\n if os.lower().find(\"suse\") > -1:\n versionEndIndex = achitectureStartIndex - 5\n osFamily = \"Suse\"\n else:\n if os.lower().find(\"yocto\") > -1:\n versionEndIndex = achitectureStartIndex - 6\n osFamily = \"Yocto\"\n else:\n if os.lower().find(\"ubuntu\") > -1:\n versionStartIndex = fullPackage.rfind(\"_\", 0, fullPackage.rfind(\"_\"))\n versionEndIndex = fullPackage.rfind(\"_\")\n achitectureStartIndex = versionEndIndex\n osFamily = \"Ubuntu\"\n else:\n if os.lower().find(\"red hat enterprise linux 7\") > -1:\n versionStartIndex = versionStartIndex = fullPackage.rfind(\":\")\n versionEndIndex = len(fullPackage)\n achitectureStartIndex = -1\n osFamily = \"RedHat\"\n else:\n versionEndIndex = achitectureStartIndex\n osFamily = \"RedHat\"\n package = fullPackage[:versionStartIndex]\n packageVersion = fullPackage[versionStartIndex + 1:versionEndIndex]\n architecture = fullPackage[achitectureStartIndex + 1:lastPeriodIndex]\n\n if package not in fullReport[updateID][\"package\"]:\n fullReport[updateID][\"package\"][package] = {}\n if \"platform\" not in fullReport[updateID][\"package\"][package]:\n fullReport[updateID][\"package\"][package][\"platform\"] = {}\n if osFamily not in fullReport[updateID][\"package\"][package][\"platform\"]:\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily] = {}\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"version\"] = packageVersion\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"installed\"] = False\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"upToDate\"] = False\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"os\"] = os\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"installedVersions\"] = {}\n if achitectureStartIndex != -1:\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"arch\"] = architecture\n # This property is used to make sure that when evaluating the installed packages,\n # the upToDate=false does not get overridden by a True at the end.\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"alreadyUpdated\"] = False\n\n # Add to the package to hot fix dict.\n if package not in packageToHotfix:\n packageToHotfix[package] = []\n packageToHotfix[package].append([osFamily, updateID])\n\n except ET.ParseError:\n if debug:\n print(\"***** Error parsing \" + fileToParse)\n print(traceback.format_exc())\n print(\"***** Skipping file.\\n\\n\")\n except urllib2.HTTPError:\n if debug:\n print(\"***** Cannot access \" + fileToParse)\n print(traceback.format_exc())\n print(\"***** Skipping the file.\\n\\n\")\n except:\n if debug:\n print(\"***** Error encountered with \" + fileToParse)\n print(traceback.format_exc())\n print(\"***** Skipping the file.\\n\\n\")\n\n if debug:\n print(\"**** Build complete. Here are the hot fixes:\")\n print_Full_Report(fullReport)\n print(\"***********************************************************************************\")\n print(\"**** Here is the package to hot fix dict:\")\n print(\"***********************************************************************************\")\n for current_package in packageToHotfix:\n print(\" \" + current_package)\n for machine_list in packageToHotfix[current_package]:\n print(\" \" + machine_list[0] + \" @ \" + machine_list[1] + \".\")\n print(\"***********************************************************************************\")\n print(\"Report built.\")\n print(\"Accessing environment Data.\")\n\n for currentMachine in results['sas_hosts']:\n if not results['sas_hosts'][currentMachine][\"_unreachable\"] and not results['sas_hosts'][currentMachine][\"_failed\"]:\n currentOS = results['sas_hosts'][currentMachine]['os']['family']\n for currentPackage in results['sas_hosts'][currentMachine]['sas_packages']:\n if currentPackage in packageToHotfix:\n for osHotfix in packageToHotfix[currentPackage]:\n if osHotfix[0] == currentOS:\n currentHotfix = osHotfix[1]\n installedVersion = \\\n results['sas_hosts'][currentMachine]['sas_packages'][currentPackage]['attributes']['version']\n if installedVersion.endswith('.suse'):\n installedVersion = installedVersion[:-5]\n else:\n if installedVersion.endswith('.yocto'):\n installedVersion = installedVersion[:-6]\n else:\n if '_' in installedVersion:\n installedVersion = installedVersion[0:installedVersion.rfind(\"_\")]\n hotfixVersion = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"version\"]\n upToDate = compare_versions(installedVersion, hotfixVersion) >= 0\n fullReport[currentHotfix][\"installed\"] = True\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installed\"] = True\n # If a previous pacakage marked updateToDate=True, it can still be pulled back to false if another package isn't\n # up to date. If the previous package was marked upToDate=false, the hotfix cannot be marked true.\n if not fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"alreadyUpdated\"] or \\\n (fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"alreadyUpdated\"] and\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"upToDate\"]):\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"upToDate\"] = upToDate\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"alreadyUpdated\"] = True\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"][currentMachine] = [installedVersion, upToDate]\n\n if debug:\n print(\"Comparing evironment data to hotfix data.\")\n for currentHotFix in fullReport:\n cumulativeOverallUpToDate = True\n # This will only allow the top level \"upToDate\" property to be set, if there is a package installed on this OS.\n allowTopLevelUpdate = False\n for currentPackage in fullReport[currentHotFix][\"package\"]:\n cumulativeOSUpToDate = True\n for currentOS in fullReport[currentHotFix][\"package\"][currentPackage][\"platform\"]:\n if len(fullReport[currentHotFix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"]) > 0:\n cumulativeOSUpToDate = cumulativeOSUpToDate and \\\n fullReport[currentHotFix][\"package\"][currentPackage][\"platform\"][currentOS][\n \"upToDate\"]\n allowTopLevelUpdate = True\n\n cumulativeOverallUpToDate = cumulativeOverallUpToDate and cumulativeOSUpToDate\n if allowTopLevelUpdate:\n fullReport[currentHotFix][\"upToDate\"] = cumulativeOverallUpToDate\n\n # Now that the fullReport has been updated, go back and add to results, for the final report.\n results[\"available_hotfixes\"] = {}\n results[\"installed_hotfixes\"] = {}\n\n for currentHotfix in fullReport:\n if not fullReport[currentHotfix][\"installed\"]:\n continue\n if fullReport[currentHotfix][\"upToDate\"]:\n hotfix_dict_to_use = \"installed_hotfixes\"\n else:\n hotfix_dict_to_use = \"available_hotfixes\"\n results[hotfix_dict_to_use][currentHotfix] = {}\n results[hotfix_dict_to_use][currentHotfix][\"release_date\"] = fullReport[currentHotfix][\"release_date\"]\n results[hotfix_dict_to_use][currentHotfix][\"packages\"] = []\n for currentPackage in fullReport[currentHotfix][\"package\"]:\n for currentOS in fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"]:\n if not fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installed\"]:\n continue\n for currentHost in fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"]:\n temp_dict = {}\n temp_dict[\"hostname\"] = currentHost\n temp_dict[\"package\"] = currentPackage\n temp_dict[\"installed_version\"] = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"][currentHost][0]\n temp_dict[\"hotfix_version\"] = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"version\"]\n temp_dict[\"up_to_date\"] = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"][currentHost][1]\n results[hotfix_dict_to_use][currentHotfix][\"packages\"].append(temp_dict)\n # Format the SAS Note description so that we can respect any HTML tags that are included in the text.\n results[hotfix_dict_to_use][currentHotfix][\"sas_notes\"] = {}\n for current_number in fullReport[currentHotfix][\"sasnote\"]:\n # Honor any html that is coming through.\n temp_sasnote_description = fullReport[currentHotfix][\"sasnote\"][current_number]\n temp_sasnote_description = temp_sasnote_description.replace(\"&lt;\", \"<\")\n temp_sasnote_description = temp_sasnote_description.replace(\"&gt;\", \">\")\n # Build a link to the URL for the SAS Note.\n hot_fix_prefix = current_number[:2]\n hot_fix_postfix = current_number[2:]\n sas_note_url = \"http://support.sas.com/kb/\" + hot_fix_prefix + \"/\" + hot_fix_postfix + \".html\"\n sas_note_html_link = \"<a href=\\\"\" + sas_note_url + \"\\\"\\>\" + current_number + \"</a>\"\n results[hotfix_dict_to_use][currentHotfix][\"sas_notes\"][current_number] = {\"sas_note_link\":sas_note_html_link, \"description\":temp_sasnote_description}\n\n # in the event of a successful module execution, you will want to\n # simple AnsibleModule.exit_json(), passing the key/value results\n #\n # changed will always be 'False' since we'll never alter state on a host\n module.exit_json(changed=False, processed_host_details=results)", "def create_current_host_dict_playbook(self):\n\n host_dict = {\n 'no_access_hosts': self.module.params['no_access_hosts'],\n 'read_only_hosts': self.module.params['read_only_hosts'],\n 'read_only_root_hosts': self.module.params[\n 'read_only_root_hosts'],\n 'read_write_hosts': self.module.params['read_write_hosts'],\n 'read_write_root_hosts': self.module.params[\n 'read_write_root_hosts']\n }\n return host_dict", "def get_hostsdata_from_hostsfile(hosts_file) -> dict:\n\n if not os.path.isfile(hosts_file):\n logger.error(f\"Suzieq inventory {hosts_file} must be a file\")\n print(f\"ERROR: Suzieq inventory {hosts_file} must be a file\")\n sys.exit(1)\n\n if not os.access(hosts_file, os.R_OK):\n logger.error(\"Suzieq inventory file is not readable: {}\", hosts_file)\n print(\"ERROR: hosts Suzieq inventory file is not readable: {}\",\n hosts_file)\n sys.exit(1)\n\n with open(hosts_file, \"r\") as f:\n try:\n data = f.read()\n hostsconf = yaml.safe_load(data)\n except Exception as e:\n logger.error(\"Invalid Suzieq inventory file:{}\", e)\n print(\"Invalid Suzieq inventory file:{}\", e)\n sys.exit(1)\n\n if not hostsconf or isinstance(hostsconf, str):\n logger.error(f\"Invalid Suzieq inventory file:{hosts_file}\")\n print(f\"ERROR: Invalid hosts Suzieq inventory file:{hosts_file}\")\n sys.exit(1)\n\n if not isinstance(hostsconf, list):\n if '_meta' in hostsconf.keys():\n logger.error(\"Invalid Suzieq inventory format, Ansible format??\"\n \" Use -a instead of -D with inventory\")\n print(\"ERROR: Invalid Suzieq inventory format, Ansible format??\"\n \" Use -a instead of -D with inventory\")\n else:\n logger.error(f\"Invalid Suzieq inventory file:{hosts_file}\")\n print(f\"ERROR: Invalid hosts Suzieq inventory file:{hosts_file}\")\n sys.exit(1)\n\n for conf in hostsconf:\n if any(x not in conf.keys() for x in ['namespace', 'hosts']):\n logger.error(\"Invalid inventory:{}, no namespace/hosts sections\")\n print(\"ERROR: Invalid inventory:{}, no namespace/hosts sections\")\n sys.exit(1)\n\n return hostsconf", "def get_facts_and_features(content):\n facts_features = content.find(\"div\", {\"class\": \"ds-home-facts-and-features reso-facts-features sheety-facts-features\"})\n items_list = [li.get_text(strip=True) for uls in facts_features.find_all(\"ul\") for li in uls]\n item_keys = ['_'.join(parse_text(item, ':', 0).split()).lower() for item in items_list]\n item_values = [parse_text(item, ':', -1) for item in items_list]\n\n return dict(zip(item_keys, item_values))", "def transform(data):\n structured = {}\n\n structured[\"ansible_port\"] = data[\"Port\"]\n structured[\"ansible_host\"] = data[\"HostName\"]\n structured[\"ansible_user\"] = data[\"User\"]\n structured[\"ansible_private_key_file\"] = data[\"IdentityFile\"]\n\n structured[\"ansible_ssh_common_args\"] = \" \".join(\n [\n \"-o StrictHostKeyChecking=no\",\n \"-o UserKnownHostsFile=/dev/null\",\n \"-o ControlMaster=auto\",\n \"-o ControlPersist=30m\",\n \"-o ConnectionAttempts=100\",\n ]\n )\n\n return structured", "def ansible_inventory(self):\n path_inventory = u'%s/inventories/%s' % (self.ansible_path, self.environment)\n path_lib = u'%s/library/beehive/' % (self.ansible_path)\n runner = Runner(inventory=path_inventory, verbosity=self.verbosity, \n module=path_lib)\n res = runner.get_inventory()\n resp = []\n for k,v in res.items():\n resp.append({u'group':k, u'hosts':u', '.join(v)})\n self.logger.debug(u'Ansible inventory nodes: %s' % res)\n self.result(resp, headers=[u'group', u'hosts'])", "def format_content(content):\n element = {} # dictionary to hold the class\n result = { 'ansible_facts': {} } # the result is a dictionary with one element called 'ansible_facts'\n content = json.loads(content)[\"imdata\"] # remove the IMDATA wrapper\n for item in content: # content is a *list* of one or more elements returned for the class query\n d_item = dict(item)\n aci_class = d_item.keys()[0] # get the name of the class we queried\n try:\n element[aci_class]\n except KeyError:\n element[aci_class] = [] # each returned MO is a list element\n\n attributes = d_item[aci_class][\"attributes\"]\n element[aci_class].append(attributes) # append the MO to our class dictionary\n\n result[\"ansible_facts\"] = element\n return result", "def host_list(self):\n try:\n scode, hosts = Rest.get('Host')\n except Exception as e:\n Console.error(e.message)\n return\n if len(hosts) == 0:\n print(\"No hosts exist\")\n return\n\n n = 1\n e = {}\n for host in hosts:\n d = {}\n d['Ip'] = str(host['Ip'])\n d['Name'] = str(host['Name'])\n d['Port'] = str(host['Port'])\n d['Swarmmode'] = str(host['Swarmmode'])\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Name', 'Port', 'Swarmmode'])))", "def get_graph_facts(duthost, localhost, host_name):\n conn_graph_facts = dict()\n base_path = os.path.dirname(os.path.realpath(__file__))\n # yaml file contains mapping from inventory file name to its corresponding graph file\n inv_mapping_file = os.path.join(base_path, \"../../../ansible/group_vars/all/inv_mapping.yml\")\n if os.path.exists(inv_mapping_file):\n with open(inv_mapping_file) as fd:\n inv_map = yaml.load(fd, Loader=yaml.FullLoader)\n inv_opt = duthost.host.options['inventory']\n inv_files = []\n if isinstance(inv_opt, str):\n inv_files = [duthost.host.options['inventory']] # Make it iterable for later use\n elif isinstance(inv_opt, list) or isinstance(inv_opt, tuple):\n inv_files = duthost.host.options['inventory']\n\n for inv_file in inv_files:\n inv_file = os.path.basename(inv_file)\n\n # Loop through the list of inventory files supplied in --inventory argument.\n # For the first inventory file that has a mapping in inv_mapping.yml, return\n # its conn_graph_facts.\n if inv_map and inv_file in inv_map:\n lab_conn_graph_file = os.path.join(base_path, \"../../../ansible/files/{}\".format(inv_map[inv_file]))\n conn_graph_facts = localhost.conn_graph_facts(host=host_name, filename=lab_conn_graph_file)['ansible_facts']\n return conn_graph_facts\n return conn_graph_facts", "def repr_for_yaml(self, what=None, resolve=False):\n retVal = list()\n all_iids = self.items_table.get_all_iids()\n all_vars = sorted(config_vars.keys())\n if what is None: # None is all\n what = all_vars + all_iids\n\n defines = OrderedDict()\n indexes = OrderedDict()\n unknowns = list()\n for identifier in what:\n if identifier in all_vars:\n defines.update({identifier: config_vars.repr_var_for_yaml(identifier)})\n elif identifier in all_iids:\n indexes.update({identifier: self.items_table.repr_item_for_yaml(identifier, resolve=resolve)})\n else:\n unknowns.append(aYaml.YamlDumpWrap(value=\"UNKNOWN VARIABLE\",\n comment=identifier + \" is not in variable list\"))\n if defines:\n retVal.append(aYaml.YamlDumpDocWrap(defines, '!define', \"Definitions\",\n explicit_start=True, sort_mappings=True))\n if indexes:\n retVal.append(\n aYaml.YamlDumpDocWrap(indexes, '!index', \"Installation index\",\n explicit_start=True, sort_mappings=True))\n if unknowns:\n retVal.append(\n aYaml.YamlDumpDocWrap(unknowns, '!unknowns', \"Installation index\",\n explicit_start=True, sort_mappings=True))\n\n return retVal", "def fact():\n result = []\n\n interfaces = [SCNetworkInterfaceGetBSDName(i) for i in SCNetworkInterfaceCopyAll()]\n\n for i in interfaces:\n try:\n active = subprocess.check_output(\n [\"/usr/sbin/ipconfig\", \"getifaddr\", i]\n ).strip()\n if active:\n result.append(i)\n except subprocess.CalledProcessError:\n continue\n\n return {factoid: result}", "def _find_facts_for_namespace(system, fact_namespace):\n # TODO: we are assuming we just need to handle one namespace\n for facts in system['facts']:\n if facts['namespace'] == fact_namespace:\n dataframe = json_normalize(_flatten_list_facts(facts['facts']), sep='.')\n # TODO: we should transform this in PUP, not here\n dataframe = dataframe.replace({True: \"enabled\", False: \"disabled\"})\n return dataframe.to_dict(orient='records')[0]\n return {}", "def describe_collect(self):\n logger.info(\"describe_collect()\")\n return {self.name: {}}", "def _describe(self) -> Dict[str, Any]:\n return {\n \"run_id\": self._run_id,\n \"prefix\": self._prefix,\n }", "def describe(self):\n return {\n 'movie': {'id': self.movie_id, 'title': self.movies.title},\n 'actor': {'id': self.actor_id, 'name': self.actors.name}\n }", "def get_alerts(node: CephAdmin) -> dict:\n cmd = \"ceph health detail\"\n all_alerts = {}\n out, err = node.shell([cmd])\n regex = r\"(\\(MUTED[\\w\\s,-]*\\))?\\s*\\[\\w{3}\\]\\s([\\w_]*):\"\n alerts = re.findall(regex, out)\n all_alerts[\"active_alerts\"] = [alert[1] for alert in alerts if not alert[0]]\n all_alerts[\"muted_alerts\"] = [alert[1] for alert in alerts if alert[0]]\n return all_alerts", "def generate_inventory(baremetal_info, server_info):\n\n hosts = defaultdict(list)\n hosts_meta = {}\n\n for node in baremetal_info:\n if node['Provisioning State'].lower() == 'active':\n role = re.findall('.*profile:(compute|control)', node['Properties']['capabilities'])[0]\n for server in server_info:\n if server['ID'] == node['Instance UUID']:\n node_ip = re.findall('.+=(\\d+.\\d+.\\d+.\\d+)$', server['Networks'])[0]\n hosts[role].append(node_ip)\n # To match ssh.cfg.j2 template\n hosts_meta[node_ip] = {'ansible_ssh_host': node_ip,\n 'ansible_user': 'heat-admin'}\n\n for host in hosts:\n hosts[host].sort()\n\n return {'hosts': hosts, 'hosts_meta': hosts_meta}", "def _get_vm_ids_and_names_dict(self):\r\n vm_ids = {}\r\n vm_names = {}\r\n\r\n for content in self.content:\r\n if content['type'].lower() in ('vm', 'virtual machine'):\r\n vm_ids[content['id']] = content['display_name']\r\n vm_names[content['display_name']] = content['id']\r\n\r\n return vm_ids, vm_names", "def facts(self, query=None, **kwargs):\n q = EqualsOperator(\"certname\", self.name)\n if query:\n q = AndOperator()\n q.add(EqualsOperator(\"certname\", self.name))\n q.add(query)\n\n return self.__api.facts(query=q, **kwargs)", "def _get_conf():\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder" ]
[ "0.72143894", "0.6945958", "0.6938341", "0.6432432", "0.62723744", "0.61998487", "0.6169367", "0.61409837", "0.60772204", "0.59841335", "0.5809634", "0.57634574", "0.5564063", "0.55350655", "0.5498731", "0.5497586", "0.5489451", "0.5477204", "0.54573256", "0.54452604", "0.5384291", "0.53729296", "0.53550607", "0.53521144", "0.53513813", "0.5344901", "0.5318632", "0.5312083", "0.5301547", "0.52548736" ]
0.75194734
0
Load environment or sourced credentials. If the credentials are specified in either environment variables or in a credential file the sourced variables will be loaded IF the not set within the ``module.params``.
def _env_vars(self, cred_file=None, section='default'): if cred_file: parser = ConfigParser.SafeConfigParser() parser.optionxform = str parser.read(os.path.expanduser(cred_file)) for name, value in parser.items(section): if name == 'OS_AUTH_URL': if not self.module.params.get('login_url'): self.module.params['login_url'] = value if name == 'OS_USERNAME': if not self.module.params.get('login_user'): self.module.params['login_user'] = value if name == 'OS_PASSWORD': if not self.module.params.get('login_password'): self.module.params['login_password'] = value if name == 'OS_TENANT_ID': if not self.module.params.get('login_tenant_name'): self.module.params['login_tenant_name'] = value else: if not self.module.params.get('login_url'): authurl = os.getenv('OS_AUTH_URL') self.module.params['login_url'] = authurl if not self.module.params.get('login_user'): username = os.getenv('OS_USERNAME') self.module.params['login_user'] = username if not self.module.params.get('login_password'): password = os.getenv('OS_PASSWORD') self.module.params['login_password'] = password if not self.module.params.get('login_tenant_name'): tenant = os.getenv('OS_TENANT_ID') self.module.params['login_tenant_name'] = tenant
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadenv(self):\n logging.debug('Loading OpenStack authentication information from environment')\n # Grab any OS_ found in environment\n for var in os.environ:\n if var[0:3] == 'OS_':\n value = os.environ[var]\n # Don't print out password or token to debug\n if 'PASSWORD' not in var or 'TOKEN' not in var:\n logging.debug('Using %s from environment for %s', value, var)\n self.creds[var[3:].lower()] = value", "def _get_credentials(self):\n if self.config_file:\n with open(self.config_file) as f:\n config_str = f.read()\n credentials_dict = json.loads(config_str)\n self.credentials = credentials_dict[self.account][self.auth_type]\n else:\n self.credentials = {\n \"account\": os.environ.get('SNOWSQL_ACCOUNT'),\n \"user\": os.environ.get('SNOWSQL_USER'),\n \"password\": os.environ.get('SNOWSQL_PWD')\n }", "def check_for_credential_file(self):\r\n if 'AWS_CREDENTIAL_FILE' in os.environ:\r\n path = os.environ['AWS_CREDENTIAL_FILE']\r\n path = os.path.expanduser(path)\r\n path = os.path.expandvars(path)\r\n if os.path.isfile(path):\r\n fp = open(path)\r\n lines = fp.readlines()\r\n fp.close()\r\n for line in lines:\r\n if line[0] != '#':\r\n if '=' in line:\r\n name, value = line.split('=', 1)\r\n if name.strip() == 'AWSAccessKeyId':\r\n if 'aws_access_key_id' not in self.args:\r\n value = value.strip()\r\n self.args['aws_access_key_id'] = value\r\n elif name.strip() == 'AWSSecretKey':\r\n if 'aws_secret_access_key' not in self.args:\r\n value = value.strip()\r\n self.args['aws_secret_access_key'] = value\r\n else:\r\n print 'Warning: unable to read AWS_CREDENTIAL_FILE'", "def load_creds(self):\n home = expanduser(\"~\")\n with open(os.path.join(home, 'creds.json')) as creds_file:\n self.creds_data = json.load(creds_file)", "def load_credentials(cred=\"credentials_prod.json\"):\n if isinstance(cred, dict):\n # Easy way to handle if a function was handed valid credentials\n pass\n elif isinstance(cred, str):\n with open(cred, 'r') as f:\n cred = json.load(f)\n else:\n raise ValueError(\"Invalid input cred={0}\".format(cred))\n\n # Check for correct entries\n cred_keys = [ \"access_token\", \"expires_in\", \"refresh_token\", \"scope\", \"token_type\"]\n for k in cred_keys:\n if k not in cred:\n raise ValueError(\"Credentials missing key {0}\".format(k))\n return cred", "def _load_credentials(creds_file=None):\n\n creds = None\n\n # Validate the credentials file\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit('Could not find a credentials.json file. ' \\\n 'Either pass one as argument or make sure credentials.json exists in ' \\\n 'the current directory or ' + expanduser('~'))\n\n # Creates CACHE_DIR if it does not exist\n # mode 0x777 (the default) is used because the system's umask value is masked out first\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first time.\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES)\n creds = flow.run_local_server(port=0)\n\n # Save the credentials for the next run\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n\n return creds", "def _set_credentials():\n # Override credentials here if necessary\n if env.user == 'ubuntu':\n env.key_filename = [\n os.path.expanduser('~/.ssh/ubuntu-id_dsa')]\n env.abort_on_prompts = True\n env.disable_known_hosts = True\n env.use_shell = False", "def load_credentials(secrets: Secrets = None): # noqa: E501\n secrets = secrets or {}\n service_account_file = secrets.get(\"service_account_file\")\n service_account_info = secrets.get(\"service_account_info\")\n\n if not service_account_file:\n google_app_creds = os.getenv(\n \"GOOGLE_APPLICATION_CREDENTIALS\",\n os.getenv(\"GCP_APPLICATION_CREDENTIALS\"),\n )\n if google_app_creds:\n service_account_file = google_app_creds\n\n credentials = None\n if service_account_file:\n service_account_file = os.path.expanduser(service_account_file)\n if not os.path.exists(service_account_file):\n raise FailedActivity(\n \"GCP account settings not found at {}\".format(\n service_account_file\n )\n )\n\n logger.debug(\n \"Using GCP credentials from file: {}\".format(service_account_file)\n )\n credentials = Credentials.from_service_account_file(\n service_account_file\n )\n elif service_account_info and isinstance(service_account_info, dict):\n logger.debug(\"Using GCP credentials embedded into secrets\")\n credentials = Credentials.from_service_account_info(\n service_account_info\n )\n else:\n raise FailedActivity(\n \"missing GCP credentials settings in secrets of this activity\"\n )\n\n if credentials is not None and credentials.expired:\n logger.debug(\"GCP credentials need to be refreshed as they expired\")\n credentials.refresh(httplib2.Http())\n\n if not credentials:\n raise FailedActivity(\n \"missing a service account to authenticate with the \"\n \"Google Cloud Platform\"\n )\n\n return credentials", "def get_credentials_from_file(credentials_file):\n # Change the scope username and password variables to global\n global username\n global password\n try:\n # Open and reads the credentials.pwd file and save the lines in the username and password\n with open(os.path.dirname(__file__) + credentials_file) as credential_file:\n credentials = credential_file.readlines()\n username = credentials[0].strip()\n password = credentials[1].strip()\n\n credential_file.close()\n except FileNotFoundError as error:\n print(error)\n sys.exit(1)", "def validate_credentials(self, *args, dotenv_path=None, **kwargs):\n if not hasattr(self, \"credentials\"):\n return set()\n\n # Load any variables from the .env file into the environment.\n dotenv_path = dotenv_path or os.path.join(\".\", \".env\")\n load_dotenv(dotenv_path)\n\n for credential in self.credentials:\n if credential[\"name\"] not in os.environ:\n raise exceptions.CredentialNotFound(\n f\"Credential {credential['name']!r} missing from environment or .env file.\"\n )\n\n return self.credentials", "def _authenticate_from_file(self, credentials):\n self._gauth.LoadCredentialsFile(credentials)", "def load_credentials(self, credentials_file):\n credentials = ET.parse(credentials_file)\n self.db_host = credentials.find('db_host').text\n self.db_port = credentials.find('db_port').text\n if self.db_port is not None:\n self.db_port = int(self.db_port)\n self.db_user = credentials.find('db_user').text\n self.db_name = credentials.find('db_name').text\n self.db_password = credentials.find('db_password').text", "def import_credentials(password, cred_file):\n\t\tself.exchanges = decrypt(password, cred_file)", "def _load_credentials(self, datasource):\n\n self.credentials = datasource.credentials # Access the credentials\n\n # If there are credentials then make the api call\n if self.credentials:\n self.credentials = yaml.load(self.credentials)\n if self._validate_credentials():\n return self.credentials[\"client_id\"], self.credentials[\"client_secret\"]\n\n raise InvalidOrMissingCredentials(\"client_id and client_secret are missing or invalid\")", "def _load_credential_store(self): \n try:\n return shelve.open(self._store_pathname)\n\n except Exception:\n raise CredentialError('Unable to open credential store: ' + self._store_pathname)", "def cfg_credentials(context):\n arguments = {\n '--config': context.config_file,\n 'authorize': False,\n 'account_summary': False\n }\n pychex_cli = PychexCli(arguments)\n pychex_cli.read_config()\n # Check that the values pulled from the read_config method match what we\n # know\n print(pychex_cli.username)\n assert pychex_cli.username == context.username\n assert pychex_cli.security_image_path == context.security_image_path\n assert pychex_cli.password == context.password\n # Check that the unencrypted values are not present\n with open(arguments['--config']) as cfg:\n cfg_txt = cfg.read()\n assert cfg_txt.find(context.username) == -1\n assert cfg_txt.find(context.security_image_path) == -1\n assert cfg_txt.find(context.password) == -1", "def __get_credentials_from_config(self):\n cr = ConfigFileReader()\n\n self.username = cr.get_value(Config.EDUROAM_USER)\n debug(\"Username set to : \" + self.username)\n self.password = cr.get_value(Config.EDUROAM_PWD)", "def load_configurations(conf_file = CONFIG_FILE, credentials_file = CREDENTIALS_FILE):\n try:\n if not has_credentials(credentials_file):\n raise FileNotFoundError(\"Missing configuration file: run the configuration script secret_wallet_conf\") \n \n if not has_configuration(conf_file):\n raise FileNotFoundError(\"Missing configuration file: run the configuration script secret_wallet_conf\")\n parameters.set_data(get_configuration(conf_file))\n except Exception as e:\n print(e)\n exit(1)", "def _add_cred_variables(self):\n self.credentialKey = {}\n authInfo = None\n if self.client:\n try:\n authInfo = self.client.getAuthenticatorInfo()\n except VersionMethodError:\n pass\n authArgOpts = dict(help=\"authentication plugin\")\n if authInfo:\n self.authenticatorInfo = AuthenticatorInfo(authInfo)\n authArgOpts['choices'] = self.authenticatorInfo.getAuthNames()\n else:\n self.authenticatorInfo = LegacyAuthenticatorInfo()\n\n var = self.add_variable('auth', (\"-a\", \"--auth\"), authArgOpts,\n envvar='ICAT_AUTH')\n var.postprocess = _post_auth\n for key in self.authenticatorInfo.getCredentialKeys(hide=False):\n self._add_credential_key(key)\n hidden = self.authenticatorInfo.getCredentialKeys(hide=True)\n if hidden:\n var = self.add_variable('promptPass', (\"-P\", \"--prompt-pass\"), \n dict(help=\"prompt for the password\", \n action='store_const', const=True), \n type=boolean, default=False)\n var.postprocess = _post_promptPass\n for key in hidden:\n self._add_credential_key(key, hide=True)", "def LoadCredentials(json_credentials_path=None, scope_url=None):\n json_credentials_path = FindCredentialsFile(json_credentials_path)\n\n # This is the way to support both service account credentials (JSON generated\n # from Pantheon) or authenticated users (similar to `gcloud auth login`).\n google_creds = oauth2client.client.GoogleCredentials.from_stream(\n json_credentials_path)\n\n if scope_url is None:\n scope_url = DEFAULT_SCOPE_URL\n\n # We need to rescope the credentials which are currently unscoped.\n scoped_creds = google_creds.create_scoped(scope_url)\n return scoped_creds", "def load_credential_file(self, path):\r\n c_data = StringIO.StringIO()\r\n c_data.write(\"[Credentials]\\n\")\r\n for line in open(path, \"r\").readlines():\r\n c_data.write(line.replace(\"AWSAccessKeyId\", \"aws_access_key_id\").replace(\"AWSSecretKey\", \"aws_secret_access_key\"))\r\n c_data.seek(0)\r\n self.readfp(c_data)", "def __init__(self, openrc_file=None, password=None, no_env=False, interactive=False, use_admin=False):\n self.creds = {}\n self.api_version = 2\n # List of accepted keys for Keystone version 2 and 3\n self.auth_keys = {\n 2: ['auth_url', 'username', 'password', 'token', 'user_id', 'trust_id', 'tenant_id', 'tenant_name'],\n 3: ['auth_url', 'username', 'password', 'token', 'token_id', 'user_id', 'user_domain_id',\n 'user_domain_name', 'trust_id', 'domain_id', 'domain_name', 'project_id', 'project_name',\n 'project_domain_id', 'project_domain_name']\n }\n\n # Make sure we have something to load from\n if not openrc_file and no_env:\n raise CredError('No OpenRC file specified and no environment flag set. No credentials to load')\n\n # Load in OpenRC file\n if openrc_file:\n if not os.path.isfile(openrc_file):\n raise CredError('OpenRC file %s not found' % openrc_file)\n self.loadrc(openrc_file)\n\n # Load in environment if no_env is False\n if not no_env:\n self.loadenv()\n\n # Set password if specified\n if password:\n if 'username' in self.creds:\n self.creds['password'] = password\n else:\n self.creds['token'] = password\n\n # Check for required credentials\n if 'auth_url' not in self.creds:\n raise CredError('OS_AUTH_URL is missing from OpenRC file and environment')\n\n # Check for project if admin mode is disabled\n if not use_admin:\n found = False\n for name in ['tenant_name', 'tenant_id', 'project_name', 'project_id']:\n if name in self.creds:\n found = True\n if not found:\n raise CredError('Project information is missing from OpenRC file and environment')\n\n # Warn if no region_name\n if 'region_name' not in self.creds:\n logging.warning('OS_REGION_NAME is missing from OpenRC file and environment. May cause issues')\n self.creds['region_name'] = None\n\n # Password is used when there is a username, otherwise it needs a token\n auth_type = 'password'\n if 'username' not in self.creds:\n auth_type = 'token'\n\n if auth_type not in self.creds:\n # Fail out if interactive is false\n if not interactive:\n raise CredError('OS_PASSWORD and OS_TOKEN missing from OpenRC file and environment')\n # Ask user for password / token if we don't have one\n password = ''\n while len(password) == 0:\n ask_str = 'Enter your OpenStack %s for %s on region %s: ' % (auth_type,\n self.creds['auth_url'],\n self.creds['region_name'])\n password = getpass.getpass(ask_str)\n self.creds[auth_type] = password\n\n # Set API version to 3 if needed\n if self.creds['auth_url'][-2:] == 'v3':\n self.api_version = 3", "def get_credentials(env=\"development\") -> dict:\n load_dotenv()\n credentials = {}\n\n credentials[\"AWS_ACCESS_KEY_ID\"] = os.getenv(\"DEV_AWS_ACCESS_KEY_ID\")\n credentials[\"AWS_SECRET_ACCESS_KEY\"] = os.getenv(\n \"DEV_AWS_SECRET_ACCESS_KEY\")\n credentials[\"AWS_REGION\"] = os.getenv(\"DEV_AWS_REGION\")\n\n if env == \"production\":\n credentials[\"AWS_ACCESS_KEY_ID\"] = os.getenv(\"PROD_AWS_ACCESS_KEY_ID\")\n credentials[\"AWS_SECRET_ACCESS_KEY\"] = os.getenv(\n \"PROD_AWS_SECRET_ACCESS_KEY\")\n credentials[\"AWS_REGION\"] = os.getenv(\"PROD_AWS_REGION\")\n\n return credentials", "def _load_user_credentials(self, storage):\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(\n self.client_secrets,\n scope=self.api_scopes,\n message=tools.message_if_missing(self.client_secrets))\n\n # Retrieve credentials from storage.\n # If the credentials don't exist or are invalid run through the installed\n # client flow. The storage object will ensure that if successful the good\n # credentials will get written back to file.\n\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage)\n\n return credentials", "def loadrc(self, openrc_file):\n logging.debug('Loading OpenStack authentication information from file %s', openrc_file)\n with open(openrc_file, 'r') as f:\n contents = f.read()\n # Regex to find export OS_****=****\n export_re = re.compile('export OS_([A-Z_]*)=\"?(.*)')\n for line in contents.splitlines():\n line = line.strip()\n mstr = export_re.match(line)\n if mstr:\n # OS_**** is index 1 (only the *'s)\n # after = is index 2\n name = mstr.group(1)\n value = mstr.group(2)\n # Take out ending \"\n if value.endswith('\"'):\n value = value[:-1]\n # Ignore any dynamic values\n if value.startswith('$'):\n continue\n # Don't print out password or token to debug\n if 'PASSWORD' not in name or 'TOKEN' not in name:\n logging.debug('Using %s from OpenRC file for OS_%s', value, name)\n self.creds[name.lower()] = value", "def _use_existing_creds(self, admin):\n # Read the files that have the existing persistent resources\n compute_base_path = os.path.dirname(os.path.abspath(__file__))\n file_path = os.path.join(compute_base_path, 'persistent.resource')\n with open(file_path, 'rb') as f:\n resources = pickle.load(f)\n user = {'name': resources['username'], 'id': resources['user_id']}\n project = {'name': resources['tenant_name'], 'id': resources['tenant_id']}\n user_password = resources['password']\n creds = self.creds_client.get_credentials(user, project, user_password)\n return TestResources(creds)", "def __init__(self, cred_file, yaml_key):\n self.premium_search_args = load_credentials(cred_file,\n yaml_key=yaml_key,\n env_overwrite=False)", "def set_credentials():", "def credentials():\n\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant_name = (os.environ.get('OS_TENANT_NAME') or\n os.environ.get('OS_PROJECT_NAME'))\n auth_url = os.environ.get('OS_AUTH_URL')\n\n config = configparser.RawConfigParser()\n if config.read(_CREDS_FILE):\n username = username or config.get('admin', 'user')\n password = password or config.get('admin', 'pass')\n tenant_name = tenant_name or config.get('admin', 'tenant')\n auth_url = auth_url or config.get('auth', 'uri')\n\n return {\n 'username': username,\n 'password': password,\n 'tenant_name': tenant_name,\n 'uri': auth_url\n }", "def get_credential(host,\n credential_file=DEFAULT_CREDENTIAL_FILE,\n globus_credential_file=DEFAULT_GLOBUS_CREDENTIAL_FILE,\n config_file=DEFAULT_CONFIG_FILE,\n requested_scope=None,\n force_scope_lookup=False,\n match_scope_tag=\"deriva-all\"):\n # load deriva credential set first\n credentials = read_credential(credential_file or DEFAULT_CREDENTIAL_FILE, create_default=True)\n creds = credentials.get(host, credentials.get(host.lower(), dict()))\n\n # load globus credentials and merge, if present\n if globus_credential_file is not None and \\\n os.path.isfile(globus_credential_file) and \\\n os.path.getsize(globus_credential_file) > 10: # Don't load empty json\n try:\n globus_client = GlobusNativeLogin(hosts=[host], config_file=config_file)\n scope_map = globus_client.hosts_to_scope_map(hosts=[host], match_scope_tag=match_scope_tag,\n force_refresh=force_scope_lookup,\n warn_on_discovery_failure=True if not creds else False)\n tokens = globus_client.is_logged_in(exclude_defaults=True)\n if tokens:\n # 1. look for the explicitly requested scope in the token store, if specified\n token = globus_client.find_access_token_for_scope(requested_scope, tokens)\n if not token:\n # 2. try to determine the scope to use based on host-to-scope(s) mappings\n token = globus_client.find_access_token_for_host(host,\n scope_map,\n tokens,\n match_scope_tag=match_scope_tag)\n if token:\n creds[\"bearer-token\"] = token\n except Exception as e:\n logging.warning(\"Exception while getting Globus credentials: %s\" % format_exception(e))\n\n return creds or None" ]
[ "0.6508687", "0.6438341", "0.6277127", "0.6266207", "0.6249797", "0.618712", "0.615878", "0.61461294", "0.6112473", "0.6100724", "0.5858952", "0.58386284", "0.57719797", "0.57441735", "0.57082015", "0.5674215", "0.56662875", "0.5662995", "0.5652891", "0.56504303", "0.5598469", "0.5564333", "0.5551846", "0.5543371", "0.55415654", "0.5525458", "0.5503656", "0.5500722", "0.5499678", "0.5497793" ]
0.656147
0
Upload an object to a swift object store.
def _upload(self, variables): required_vars = ['container', 'src', 'object'] variables_dict = self._get_vars(variables, required=required_vars) container_name = variables_dict.pop('container') object_name = variables_dict.pop('object') src_path = variables_dict.pop('src') self._create_container(container_name=container_name) with open(src_path, 'rb') as f: self.swift.put_object(container_name, object_name, contents=f) object_data = self.swift.head_object(container_name, object_name) self.state_change = True return self._facts(facts=[object_data])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _put_object(self, sha: str) -> None:\n data = git.encode_object(sha)\n path = self._object_path(sha)\n self._trace(\"writing: %s\" % path)\n retries = 0\n mode = dropbox.files.WriteMode.overwrite\n\n if len(data) <= CHUNK_SIZE:\n while True:\n try:\n self._connection.files_upload(data, path, mode, strict_conflict=True, mute=True)\n except dropbox.exceptions.InternalServerError:\n self._trace(\"internal server error writing %s, retrying\" % sha)\n if retries < MAX_RETRIES:\n retries += 1\n else:\n raise\n else:\n break\n else:\n cursor = dropbox.files.UploadSessionCursor(offset=0)\n done_uploading = False\n\n while not done_uploading:\n try:\n end = cursor.offset + CHUNK_SIZE\n chunk = data[(cursor.offset) : end]\n\n if cursor.offset == 0:\n # upload first chunk\n result = self._connection.files_upload_session_start(chunk)\n cursor.session_id = result.session_id\n elif end < len(data):\n # upload intermediate chunks\n self._connection.files_upload_session_append_v2(chunk, cursor)\n else:\n # upload the last chunk\n commit_info = dropbox.files.CommitInfo(\n path, mode, strict_conflict=True, mute=True\n )\n self._connection.files_upload_session_finish(chunk, cursor, commit_info)\n done_uploading = True\n\n # advance cursor to next chunk\n cursor.offset = end\n\n except dropbox.files.UploadSessionOffsetError as offset_error:\n self._trace(\"offset error writing %s, retrying\" % sha)\n cursor.offset = offset_error.correct_offset\n if retries < MAX_RETRIES:\n retries += 1\n else:\n raise\n except dropbox.exceptions.InternalServerError:\n self._trace(\"internal server error writing %s, retrying\" % sha)\n if retries < MAX_RETRIES:\n retries += 1\n else:\n raise", "def put_object(self, account, container, object, content):#put a file to server\n \n pass", "def sendObjectUpload(self, obj, mesh, data, materials):\n editor = self._parent\n data = data.replace(b'MeshSerializer_v1.41', b'MeshSerializer_v1.40')\n\n b64data = base64.urlsafe_b64encode(data).decode('ascii')\n obj_name = obj.name\n obj_uuid = obj.opensim.uuid\n mesh_name = mesh.name\n mesh_uuid = mesh.opensim.uuid\n pos, rot, scale = editor.getObjectProperties(obj)\n \n self.simrt.Create(obj_name, obj_uuid, mesh_name, mesh_uuid,\n editor.unapply_position(obj, pos),\n editor.unapply_rotation(rot),\n editor.unapply_scale(obj, scale), b64data,\n materials)", "def upload(self, bucket, object, filename, mime_type='application/octet-stream'):\n service = self.get_conn()\n media = MediaFileUpload(filename, mime_type)\n response = service \\\n .objects() \\\n .insert(bucket=bucket, name=object, media_body=media) \\\n .execute()", "def upload_from_file(self, file_obj, name_on_storage, **keyword_args):\n blob = self.bucket.blob(name_on_storage)\n blob.upload_from_file(file_obj, **keyword_args)\n print(f\"Upload object {name_on_storage}\")", "def upload(self, nameObjectDict):\n return self.cpp.upload(nameObjectDict)", "def save(self, obj):\n raise NotImplementedError", "def save(self, obj):", "def save_object(self, object, **kwargs):\n object.save()", "def upload_object(object_location: ObjectLocation, stream: io.BytesIO) -> None:\n s3 = boto3.client(\"s3\")\n result = s3.upload_fileobj(stream, object_location.bucket.name, object_location.key)\n log.debug(f\"Result of upload to {object_location}: {result}\")", "def put(cls, obj):\n pass", "def doRtObjectUpload(self, context, obj):\n editor = self._parent\n mesh = obj.data\n has_mesh_uuid = mesh.opensim.uuid\n b2rexpkg.editor.set_loading_state(obj, 'UPLOADING')\n if has_mesh_uuid:\n def finish_clone(materials):\n self.sendObjectClone(obj, materials)\n editor.doExportMaterials(obj, cb=finish_clone)\n return\n def finish_upload(materials):\n def send_upload(data):\n self.sendObjectUpload(obj, mesh, data, materials)\n editor.doAsyncExportMesh(context, obj, send_upload)\n editor.doExportMaterials(obj, cb=finish_upload)\n # export mesh\n # upload prim\n # self.sendObjectUpload(selected, mesh, data)\n # send new prim", "def save_object(self, name: str, object):\n file_path = self.__get_file_path(name)\n self.__serialize_object(file_path, object)", "def upload_obj(bucketname, dateiname, zielname=None):\n pass", "def insert_object(self, object: ObjectHandle):\n # Serialize the object descriptor and data part. Both items are stored\n # as separate objects.\n descriptor, data = self.factory.serialize(object)\n object_id = self.store.write_object(descriptor)\n data_id = self.store.write_object(data)\n # Add the object information to the index and write the modified index\n # to the data store.\n self.index[object.namespace][object.name] = StoredObject(\n object_id=object_id,\n data_id=data_id,\n name=object.name,\n descriptor=descriptor\n )\n self._write_index()\n # If the object refers to a default object that object is removed since\n # it has been overwritten by the new object.\n try:\n del self.defaults.get(object.namespace, {})[object.name]\n except KeyError:\n pass", "def save_object(self, obj):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"PUT\", \"/1/indexes/%s/%s\" % (self.url_index_name, quote((\"%s\" % obj[\"objectID\"]).encode('utf8'), safe='')), self.client.timeout, obj)", "def add_object(self, object):\n object.save()", "def _upload_file_to_container(self, container, object_key, file_path, connection):\r\n data = open(file_path, 'rb')\r\n logger.debug('upload {file_path} to {container} {object_key}'.format(file_path=file_path, container=container,\r\n object_key=object_key))\r\n\r\n result = connection.object_store.upload_object(container=container,\r\n name=object_key,\r\n data=open(file_path, 'r'))\r\n\r\n logger.debug(result)", "def upload_file_obj_db_s3():\n\n # TODO: upload metadata to database\n temp_engine = create_engine(NEX2_URI)\n session_factory = sessionmaker(\n bind=temp_engine, extension=ZopeTransactionExtension(), expire_on_commit=False)\n db_session = scoped_session(session_factory)\n readme_file_id = None\n file_content_list = file_upload_to_obj()\n try:\n if file_content_list:\n sorted_content = sorted(\n file_content_list, key=itemgetter('file_extension'))\n for item in sorted_content:\n if item['readme_name']:\n readme = db_session.query(Filedbentity).filter(\n Filedbentity.display_name == obj['readme_name']).one_or_none()\n\n if readme is None:\n logging.warning(\n 'unable to find README ' + obj['readme_name'])\n else:\n readme_file_id = readme.dbentity_id\n\n # see if file_meta already exists, else create\n existing_file_meta_data = db_session.query(Filedbentity).filter(\n Filedbentity.display_name == item['display_name']).one_or_none()\n source_id = db_session.query(Source.source_id).filter(\n Source.display_name == item['source']).one_or_none()[0]\n\n d_name = item['display_name']\n f_ext = item['file_extension']\n temp_file_path = get_file_from_path_collection(f_ext, d_name)\n\n if not existing_file_meta_data:\n try:\n data_id = db_session.query(Edam.edam_id).filter(\n Edam.edamid == item['data_edam_id']).one_or_none()[0]\n\n format_id = db_session.query(Edam.edam_id).filter(\n Edam.edamid == item['format_edam_id']).one_or_none()[0]\n topic_id = db_session.query(Edam.edam_id).filter(\n Edam.edamid == item['topic_edam_id']).one_or_none()[0]\n item[\"data_id\"] = data_id\n item[\"format_id\"] = format_id\n item[\"topic_id\"] = topic_id\n item[\"source_id\"] = source_id\n item[\"readme_file_id\"] = readme_file_id\n\n except TypeError:\n logging.error(\n 'invalid EDAM id or source in row ' +\n str(row_num) + ' val in ' + item['data_edam_id'] +\n ', ' + item['format_edam_id'] +\n ', ' + item['topic_edam_id'])\n\n if temp_file_path:\n with open(temp_file_path, 'r') as remote_file:\n upload_file_helper(CREATED_BY, remote_file, item)\n\n db_session.flush()\n else:\n existing_file_meta_data.display_name = item['display_name']\n existing_file_meta_data.description = item['description']\n existing_file_meta_data.status = item['status']\n existing_file_meta_data.is_public = item['is_public']\n existing_file_meta_data.is_in_spell = item['is_in_spell']\n existing_file_meta_data.is_in_browser = item['is_in_browser']\n existing_file_meta_data.source_id = source_id\n\n if temp_file_path:\n with open(temp_file_path, 'r') as remote_file:\n #update file size\n if not existing_file_meta_data.file_size and existing_file_meta_data.s3_url:\n remote_file.seek(0, os.SEEK_END)\n file_size = remote_file.tell()\n remote_file.seek(0)\n existing_file_meta_data.file_size = file_size\n\n if item['file_date']:\n existing_file_meta_data.file_date = item['file_date']\n existing_file_meta_data.year = item['file_date'].year\n existing_file_meta_data.readme_file_id = readme_file_id\n remote_file.seek(0, os.SEEK_END)\n\n #transaction.commit()\n existing_file_meta_data = db_session.query(Filedbentity).filter(\n Filedbentity.display_name == item['display_name']).one_or_none()\n # only upload s3 file if not defined\n if existing_file_meta_data.s3_url is None:\n existing_file_meta_data.upload_file_to_s3(\n remote_file, item['display_name'])\n db_session.flush()\n\n except Exception as e:\n logging.error(\"Exception occurred\", exc_info=True)", "def store_object(self, _object):\n\n # replace an existing list member, else, append\n\n index = [self.object_store.index(_object_) for _object_ in self.object_store if _object_.LocalID == _object.LocalID]\n\n if index != []:\n\n self.object_store[index[0]] = _object\n\n #if self.settings.LOG_VERBOSE: logger.debug('Updating a stored object: %s in region \\'%s\\'' % (_object.FullID, self.region.SimName))\n\n else:\n\n self.object_store.append(_object)\n\n #if self.settings.LOG_VERBOSE: logger.debug('Stored a new object: %s in region \\'%s\\'' % (_object.LocalID, self.region.SimName))", "def save(self, key, sort_key, _object):\n return self.storage.set(key, sort_key, _object.to_json())", "def put(self, obj):\n\n if obj is None:\n return\n\n assert os.path.exists(obj), f'path {obj} does not exist.'\n\n return shutil.make_archive(obj, 'tar', obj)", "def put_object_as_file(self, ctx):\n req = ctx.req\n\n virtual_path = urllib_parse.unquote(req.path)\n put_location_req = rpc.put_location_request(virtual_path)\n\n request_etag = req.headers.get(\"ETag\", \"\")\n hasher = hashlib.md5()\n wsgi_input = SnoopingInput(req.environ[\"wsgi.input\"], hasher.update)\n\n # TODO: when the upload size is known (i.e. Content-Length is set),\n # ask for enough locations up front that we can consume the whole\n # request with only one call to RpcPutLocation(s).\n\n # TODO: ask to validate the path a bit better; if we are putting an\n # object at /v1/a/c/kitten.png/whoops.txt (where kitten.png is a\n # file), we should probably catch that before reading any input so\n # that, if the client sent \"Expect: 100-continue\", we can give them\n # an error early.\n\n physical_path_gen = (\n rpc.parse_put_location_response(\n self.rpc_call(ctx, put_location_req))\n for _ in itertools.repeat(None))\n\n error_response = swift_code.check_object_creation(req)\n if error_response:\n return error_response\n\n # Since this upload can be arbitrarily large, we split it across\n # multiple log segments.\n log_segments = []\n i = 0\n while True:\n # First, make sure there's more data to read from the client. No\n # sense allocating log segments and whatnot if we're not going\n # to use them.\n subinput = LimitedInput(wsgi_input, self.max_log_segment_size)\n if not subinput.has_more_to_read:\n break\n\n # Ask ProxyFS for the next log segment we can use\n phys_path = next(physical_path_gen)\n\n # Set up the subrequest with the bare minimum of useful headers.\n # This lets us avoid headers that will break the PUT immediately\n # (ETag), headers that may complicate GETs of this object\n # (X-Static-Large-Object, X-Object-Manifest), things that will\n # break the GET some time in the future (X-Delete-At,\n # X-Delete-After), and things that take up xattr space for no\n # real gain (user metadata).\n subreq = swob.Request.blank(phys_path)\n subreq.method = 'PUT'\n subreq.environ['wsgi.input'] = subinput\n subreq.headers[\"Transfer-Encoding\"] = \"chunked\"\n\n # This ensures that (a) every subrequest has its own unique\n # txid, and (b) a log search for the txid in the response finds\n # all of the subrequests.\n trans_id = req.headers.get('X-Trans-Id')\n if trans_id:\n subreq.headers['X-Trans-Id'] = trans_id + (\"-%03x\" % i)\n\n # Actually put one chunk of the data into Swift\n subresp = subreq.get_response(self.app)\n if not 200 <= subresp.status_int < 299:\n # Something went wrong; may as well bail out now\n return subresp\n\n log_segments.append((phys_path, subinput.bytes_read))\n i += 1\n\n if should_validate_etag(request_etag) and \\\n hasher.hexdigest() != request_etag:\n return swob.HTTPUnprocessableEntity(request=req)\n\n # All the data is now in Swift; we just have to tell proxyfsd\n # about it. Mung any passed ETags values to include the\n # number of writes to the file (basically, the object's update\n # count) and supply the MD5 hash computed here which becomes\n # object's future ETag value until the object updated.\n obj_metadata = extract_object_metadata_from_headers(req.headers)\n mung_etags(obj_metadata, hasher.hexdigest(), len(log_segments))\n\n put_complete_req = rpc.put_complete_request(\n virtual_path, log_segments, serialize_metadata(obj_metadata))\n try:\n mtime_ns, inode, __writes = rpc.parse_put_complete_response(\n self.rpc_call(ctx, put_complete_req))\n except utils.RpcError as err:\n # We deliberately don't try to clean up our log segments on\n # failure. ProxyFS is responsible for cleaning up unreferenced\n # log segments.\n if err.errno == pfs_errno.NotEmptyError:\n return swob.HTTPConflict(\n request=req,\n headers={\"Content-Type\": \"text/plain\"},\n body=\"This is a non-empty directory\")\n elif err.errno == pfs_errno.NotDirError:\n return swob.HTTPConflict(\n request=req,\n headers={\"Content-Type\": \"text/plain\"},\n body=\"Path element is a file, not a directory\")\n else:\n # punt to top-level error handler\n raise\n\n # For reference, an object PUT response to plain Swift looks like:\n # HTTP/1.1 201 Created\n # Last-Modified: Thu, 08 Dec 2016 22:51:13 GMT\n # Content-Length: 0\n # Etag: 9303a8d23189779e71f347032d633327\n # Content-Type: text/html; charset=UTF-8\n # X-Trans-Id: tx7b3e2b88df2f4975a5476-005849e3e0dfw1\n # Date: Thu, 08 Dec 2016 22:51:12 GMT\n #\n # We get Content-Length, X-Trans-Id, and Date for free, but we need\n # to fill in the rest.\n resp_headers = {\n \"Etag\": hasher.hexdigest(),\n \"Content-Type\": guess_content_type(req.path, False),\n \"Last-Modified\": last_modified_from_epoch_ns(mtime_ns)}\n return swob.HTTPCreated(request=req, headers=resp_headers, body=\"\")", "def put_object(self, parent_object, connection_name, **data):\n assert self.access_token, \"Write operations require an access token\"\n return self.request(\n \"{0}/{1}/{2}\".format(self.version, parent_object, connection_name),\n post_args=data,\n method=\"POST\",\n )", "def upload_file(cls, uri, fobj):\n msg = \"Backend doesn't implement upload_file()\"\n raise NotImplementedError(msg)", "def put_object(self, parent_object, connection_name, **data):\n assert self.access_token, \"Write operations require an access token\"\n return self.request(parent_object + \"/\" + connection_name, post_args=data)", "def test_storeObject(self):\n contentStore = ContentStore(store=self.store)\n backendStore = MockContentStore(store=self.store)\n self.store.powerUp(backendStore, IBackendStore)\n backendStore2 = MockContentStore(store=self.store)\n self.store.powerUp(backendStore2, IBackendStore)\n scheduler = MockUploadScheduler()\n self.store.inMemoryPowerUp(scheduler, IUploadScheduler)\n\n contentStore.storeObject(content='somecontent',\n contentType=u'application/octet-stream')\n testObject = self.store.findUnique(ImmutableObject)\n pu = scheduler.uploads\n self.assertEquals(len(pu), 2)\n self.assertEquals(pu[0][0], testObject.objectId)\n self.assertEquals(pu[1][0], testObject.objectId)\n for objectId, backend in pu:\n if backend is backendStore:\n break\n else:\n self.fail('No pending upload for backendStore')\n\n for objectId, backend in pu:\n if backend is backendStore2:\n break\n else:\n self.fail('No pending upload for backendStore2')", "def put_object(local_path: str, file_name: str, configuration):\n pass", "def _save(self, name, content):\n cloud_obj = self.container.create_object(name)\n mimetype, _ = mimetypes.guess_type(name)\n cloud_obj.content_type = mimetype\n cloud_obj.send(content)\n return name", "def test_storeObject(self):\n content = 'blahblah some data blahblah'\n contentType = u'application/octet-stream'\n expectedDigest = u'9aef0e119873bb0aab04e941d8f76daf21dedcd79e2024004766ee3b22ca9862'\n\n d = self.contentStore.storeObject(content, contentType)\n def _cb(oid):\n self.oid = oid\n d.addCallback(_cb)\n self.assertEquals(self.oid, u'sha256:' + expectedDigest)" ]
[ "0.6730462", "0.6696284", "0.66696876", "0.6654185", "0.6636817", "0.6436489", "0.63662285", "0.6357225", "0.63389313", "0.63029313", "0.6297057", "0.6283346", "0.62567437", "0.6248196", "0.62060946", "0.61957824", "0.61772907", "0.6157573", "0.6127972", "0.6127851", "0.6125625", "0.6121919", "0.6078523", "0.6033415", "0.6016252", "0.60074335", "0.5988928", "0.59628063", "0.5959749", "0.59557533" ]
0.67648774
0
Ensure a container exists. If it does not, it will be created.
def _create_container(self, container_name): try: container = self.swift.head_container(container_name) except client.ClientException: self.swift.put_container(container_name) else: return container
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_container_if_missing(container, swift_conn, options):\n try:\n swift_conn.head_container(container)\n except swift_client.ClientException, e:\n if e.http_status == httplib.NOT_FOUND:\n add_container = config.get_option(options,\n 'swift_store_create_container_on_put',\n type='bool', default=False)\n if add_container:\n try:\n swift_conn.put_container(container)\n except ClientException, e:\n msg = _(\"Failed to add container to Swift.\\n\"\n \"Got error from Swift: %(e)s\") % locals()\n raise glance.store.BackendException(msg)\n else:\n msg = (_(\"The container %(container)s does not exist in \"\n \"Swift. Please set the \"\n \"swift_store_create_container_on_put option\"\n \"to add container to Swift automatically.\")\n % locals())\n raise glance.store.BackendException(msg)\n else:\n raise", "def ensure_container():\n return exec_fn(_init_container)", "def test_create_container(self):\n pass", "def put_container(self, container):\n if self.onest.create_bucket(container):\n LOG.debug('put_container, create success. '\n 'Container: %s.', container)\n else:\n # If return false, means exist\n LOG.info(_LI('put_container, '\n 'container(%s) exist, just use it.'), container)", "def _ensure_exists(name, path=None):\n if not exists(name, path=path):\n raise CommandExecutionError(f\"Container '{name}' does not exist\")", "def test_create(set_env, container: Container):\n # pylint: disable=unused-argument\n assert container\n assert isinstance(container, Container)", "def test_container_exists():\n return exec_fn(_test_container_exists)", "def container(name, ostemplate, **kwargs):\n if not openvz.exists(name):\n ctid = openvz.get_available_ctid()\n openvz.create(ctid, ostemplate=ostemplate, **kwargs)\n openvz.set(ctid, name=name)\n return Container(name)", "def __create_db_container(self):\n self.__check_db_container(mode='running')\n self.__check_db_container(mode='exist')\n\n if self.__is_db_running:\n LOGGER.info('db container ({}) is already up and'\n ' running. Skipping creation step...'.format(self.__db_cont_name))\n self.__remove_create_db()\n pass\n elif self.__is_db_exist and not self.__is_db_running:\n LOGGER.info('db container({}) already exists. '\n 'Restarting db container'.format(self.__db_cont_name))\n subprocess.run(['docker', 'restart', self.__db_cont_name])\n time.sleep(10)\n self.__remove_create_db()\n\n else:\n # create the db container\n LOGGER.debug('Creating db container with name {}'.format(self.__db_cont_name))\n arg_port = ['-p', '{}:5432'.format(self.__dbport)]\n arg_name = ['--name', self.__db_cont_name]\n arg_env1 = ['-e', 'POSTGRES_PASSWORD={}'.format(self.__dbpassword)]\n arg_env2 = ['-e', 'POSTGRES_USER={}'.format(self.__dbuser)]\n arg_img = ['-d', self.__db_image]\n command2 = ['docker', 'run'] + arg_port + arg_name + arg_env1 + arg_env2 + arg_img\n try:\n createproc = subprocess.run(command2)\n time.sleep(50)\n self.__remove_create_db()\n except subprocess.CalledProcessError:\n LOGGER.warning('There was an error while attempting creating the db container.')\n raise DockerExecError('There was an error while attempting creating the db container.')", "def create(self, resource):\r\n self._load_resource(resource)\r\n blob_folder = self._blob_folder()\r\n if is_folder(blob_folder):\r\n logger.warning(self._context(\"Container already exists\"))\r\n else:\r\n # create new container\r\n logger.info(self._context(\"Creating container\"))\r\n create_folder(blob_folder)\r\n\r\n self.disconnect()\r\n return is_folder(blob_folder)", "def _create_container(self, finding):\n\n container_dict = {}\n container_dict['name'] = finding['Title']\n container_dict['source_data_identifier'] = finding['Id']\n container_dict['description'] = finding['Description']\n\n container_creation_status, container_creation_msg, container_id = self.save_container(container=container_dict)\n\n if phantom.is_fail(container_creation_status):\n self.debug_print(container_creation_msg)\n self.save_progress('Error while creating container for finding {finding_id}. '\n '{error_message}'.format(finding_id=finding['Id'],\n error_message=container_creation_msg))\n return None\n\n return container_id", "def container_exists(self, id=None, name=None):\n exists = False\n if id and self.container_by_id(id):\n exists = True\n elif name and self.container_by_name(name):\n exists = True\n\n return exists", "def test_create_container_w_null_secret_name(self):\n responses = self.behaviors.create_container_with_secret(\n name='name', secret_name=None)\n secret_resp, container_resp = responses\n self._check_container_create_response(container_resp)\n\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name='name', type='generic',\n num_secrets=1)\n\n # verify the secret's name is returned correctly\n secret_ref = get_resp.entity.secret_refs[0]\n self.assertEqual(secret_ref.name, None)", "def _process_createContainer(self, data):\r\n try:\r\n self._avatar.createContainer(data['containerTag'],\r\n data.get('containerData', {}))\r\n except KeyError as e:\r\n raise InvalidRequest(\"Can not process 'CreateContainer' request. \"\r\n 'Missing key: {0}'.format(e))", "def ddtest_create_generic_container_w_empty_or_null_name(self, name=None):\n if name is None:\n self._skip_on_issue('launchpad', '1354767')\n\n container_resp = self.behaviors.create_container(name, 'generic', [])\n self._check_container_create_response(container_resp)\n\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name=container_resp.id, type='generic')", "def _create_container(self, docker_client: \"DockerClient\", **kwargs) -> \"Container\":\n # Create the container with retries on name conflicts (with an incremented idx)\n index = 0\n container = None\n name = original_name = kwargs.pop(\"name\")\n\n while not container:\n from docker.errors import APIError\n\n try:\n display_name = repr(name) if name else \"with auto-generated name\"\n self.logger.info(f\"Creating Docker container {display_name}...\")\n container = docker_client.containers.create(name=name, **kwargs)\n except APIError as exc:\n if \"Conflict\" in str(exc) and \"container name\" in str(exc):\n self.logger.info(\n f\"Docker container name {display_name} already exists; \"\n \"retrying...\"\n )\n index += 1\n name = f\"{original_name}-{index}\"\n else:\n raise\n\n self.logger.info(\n f\"Docker container {container.name!r} has status {container.status!r}\"\n )\n return container", "def create(self):\n print('Creating container: {}'.format(self.cfg['name']))\n create = self.docker_client.create(**self.env)\n return create['id']", "def test_get_nonexistant_container(self):\n ref = self.container_client._get_base_url() + '/invalid_uuid'\n get_resp = self.container_client.get_container(ref)\n self.assertEqual(get_resp.status_code, 404)", "def test_add_container(self):\n with DockerHost('host', dind=False) as host:\n # Create a container with --net=none, add a calico interface to\n # it then check felix programs a route.\n node = host.create_workload(\"node\", network=NET_NONE)\n host.calicoctl(\"container add %s 192.168.1.1\" % node)\n\n # Create the profile, get the endpoint IDs for the containers and\n # add the profile to the endpoint so felix will pick it up.\n host.calicoctl(\"profile add TEST_GROUP\")\n ep = host.calicoctl(\"container %s endpoint-id show\" % node)\n host.calicoctl(\"endpoint %s profile set TEST_GROUP\" % ep)\n\n # Wait for felix to program down the route.\n check_route = partial(host.execute,\n \"ip route | grep '192\\.168\\.1\\.1'\")\n retry_until_success(check_route, ex_class=CalledProcessError)", "def __check_db_container(self, mode='running'):\n if mode == 'running':\n cmd_docker = ['docker', 'ps']\n elif mode == 'exist':\n cmd_docker = ['docker', 'ps', '-a']\n else:\n raise DockerExecError('Invalid container check mode: {}.'.format(mode))\n\n\n proc_docker = subprocess.Popen(cmd_docker,\n stdout=subprocess.PIPE)\n proc_grep = subprocess.Popen(['grep', self.__db_cont_name],\n stdin=proc_docker.stdout,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = proc_grep.communicate()\n output = str(stdout).split()\n LOGGER.debug(output)\n try:\n container_image = output[1]\n container_name = output[-1]\n container_port = output[-2]\n # remove new line spacial character\n container_name = container_name.rstrip(\"\\\\n'\")\n container_port = find_xtport(container_port) \n except IndexError:\n container_name = None\n container_image = None\n container_port = None\n \n LOGGER.debug('Found that there is an existing container with the name: {}'.format(container_name))\n\n if container_name == self.__db_cont_name:\n if container_image == self.__db_image:\n if mode == 'running':\n self.__is_db_running = True\n elif mode == 'exist':\n self.__is_db_exist = True\n if container_port != self.__dbport:\n LOGGER.warning('Using as external container port: {}'.format(container_port))\n self.__dbport = container_port\n else:\n msg = ('The name \\\"{}\\\" is used by another container.'\n 'Could not create postgres database container.' \n 'Please use other db container name.').format(self.__db_cont_name)\n raise DockerExecError(msg)", "def create_container(self, container_name):\n response = self.client.put_container(container_name)\n return response", "def create_container(ContainerName=None, Tags=None):\n pass", "def test001_create_containers(self):\n self.log('%s STARTED' % self._testID)\n\n self.log('Create a two container on that node, should succeed.')\n self.cont1_name = self.random_string()\n self.containers = {self.cont1_name: {'hostname': self.cont1_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage}}\n\n self.cont2_name = self.random_string()\n self.containers.update({self.cont2_name: {'hostname': self.cont2_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage}})\n\n res = self.create_container(containers=self.containers, temp_actions=self.temp_actions)\n self.assertEqual(type(res), type(dict()))\n self.wait_for_service_action_status(self.cont1_name, res[self.cont1_name]['install'])\n self.wait_for_service_action_status(self.cont2_name, res[self.cont2_name]['install'])\n\n self.log('Check that the container have been created.')\n conts = self.zos_client.container.list()\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name])\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont2_name])\n cont1 = [c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name][0]\n self.assertTrue(cont1['container']['arguments']['storage'], self.cont_storage)\n self.assertTrue(cont1['container']['arguments']['root'], self.cont_flist)\n self.assertTrue(cont1['container']['arguments']['hostname'], self.cont_flist)\n\n self.log('%s ENDED' % self._testID)", "def test_rackspace_uploader_creates_container(self, mock, mock2):\r\n with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf:\r\n mycf.get_container.side_effect = NoSuchContainer\r\n mycf.create_container.return_value = True\r\n mycf.make_container_public.return_value = True\r\n u = RackspaceUploader()\r\n res = u.init_app(self.flask_app)\r\n err_msg = \"Init app should return the container.\"\r\n assert res is True, err_msg", "def post(self, run=False, **container_dict):\n context = pecan.request.context\n compute_api = pecan.request.compute_api\n policy.enforce(context, \"container:create\",\n action=\"container:create\")\n\n try:\n run = strutils.bool_from_string(run, strict=True)\n except ValueError:\n msg = _('Valid run values are true, false, 0, 1, yes and no')\n raise exception.InvalidValue(msg)\n try:\n container_dict['tty'] = strutils.bool_from_string(\n container_dict.get('tty', False), strict=True)\n container_dict['stdin_open'] = strutils.bool_from_string(\n container_dict.get('stdin_open', False), strict=True)\n except ValueError:\n msg = _('Valid tty and stdin_open values are ''true'', '\n '\"false\", True, False, \"True\" and \"False\"')\n raise exception.InvalidValue(msg)\n\n # NOTE(mkrai): Intent here is to check the existence of image\n # before proceeding to create container. If image is not found,\n # container create will fail with 400 status.\n images = compute_api.image_search(context, container_dict['image'],\n True)\n if not images:\n raise exception.ImageNotFound(container_dict['image'])\n container_dict['project_id'] = context.project_id\n container_dict['user_id'] = context.user_id\n name = container_dict.get('name') or \\\n self._generate_name_for_container()\n container_dict['name'] = name\n if container_dict.get('memory'):\n container_dict['memory'] = \\\n str(container_dict['memory']) + 'M'\n if container_dict.get('restart_policy'):\n self._check_for_restart_policy(container_dict)\n container_dict['status'] = fields.ContainerStatus.CREATING\n new_container = objects.Container(context, **container_dict)\n new_container.create(context)\n\n if run:\n compute_api.container_run(context, new_container)\n else:\n compute_api.container_create(context, new_container)\n # Set the HTTP Location Header\n pecan.response.location = link.build_url('containers',\n new_container.uuid)\n pecan.response.status = 202\n return view.format_container(pecan.request.host_url, new_container)", "def ddtest_create_container_w_secret_name(self, name=None):\n # create a container with a particular secret name\n responses = self.behaviors.create_container_with_secret(\n name='name', secret_name=name)\n secret_resp, container_resp = responses\n self._check_container_create_response(container_resp)\n\n # verify the container exists with the expected data\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name='name', type='generic',\n num_secrets=1)\n\n # verify the secret's name is returned correctly\n secret_ref = get_resp.entity.secret_refs[0]\n self.assertEqual(secret_ref.name, name)", "def test_get_container(self):\n pass", "def create_containers(self, containers, script, arguments):\n try:\n self.verify_execution_status()\n except Exception:\n self.log_stack_trace(traceback.format_exc())\n self.log_message(\"ERROR: Could not verify execution mode status.\")\n return\n\n more_than_one = len(containers) > 1\n created_containers = []\n for container in containers:\n my_script = os.path.join(container.directory, script)\n try:\n container.create(my_script, arguments, more_than_one)\n created_containers.append(container)\n except Exception:\n self.log_message(\n f\"ERROR: Could not create container {container.name}\"\n f\" with image {container.image}\"\n )\n self.log_stack_trace(traceback.format_exc())\n\n # Failing to create a container is a critical error.\n # Try to clean up any containers we successfully created, then raise.\n for c in created_containers:\n try:\n c.cleanup_container()\n except Exception:\n pass\n raise", "def _register_container(self, container):\n found = False\n try:\n for host, location, container in Container.Container.host_generator(container,\n known_networks=self.networks.keys()):\n websocket = \"ws\" in host.scheme or \"wss\" in host.scheme\n secured = 'https' in host.scheme or 'wss' in host.scheme\n http = 'http' in host.scheme or 'https' in host.scheme\n # it might return string if there's a error in processing\n if type(host) is not str:\n if (host.hostname, host.port) in self.hosts:\n existing_host: Host = self.hosts[(host.hostname, host.port)]\n existing_host.add_container(location, container, websocket=websocket, http=http)\n ## if any of the containers in for the virtualHost require https, the all others will be redirected to https.\n if secured:\n existing_host.secured = True\n host = existing_host\n else:\n host.secured = secured\n host.add_container(location, container, websocket=websocket, http=http)\n self.hosts[(host.hostname, host.port)] = host\n\n if host.secured:\n if host.hostname not in self.ssl_certificates:\n host.ssl_expiry = self.ssl.expiry_time(host.hostname)\n else:\n host.ssl_expiry = self.ssl_certificates[host.hostname]\n if (host.ssl_expiry - datetime.datetime.now()).days > 2:\n self.ssl_certificates[host.hostname] = host.ssl_expiry\n\n found = True\n self.containers.add(container.id)\n\n except Container.NoHostConiguration:\n print(\"Skip Container:\", \"No VIRTUAL_HOST configuration\", \"Id:\" + container.id,\n \"Name:\" + container.attrs[\"Name\"].replace(\"/\", \"\"), sep=\"\\t\")\n except Container.UnreachableNetwork:\n print(\"Skip Container:\", \"UNREACHABLE Network \", \"Id:\" + container.id,\n \"Name:\" + container.attrs[\"Name\"].replace(\"/\", \"\"), sep=\"\\t\")\n return found", "def __create_cont(self, path, filesystem, cont_stat, component_number):\n try:\n self.logger.debug('Create container interface called')\n status_obj = Status()\n cont_id = \"container\"\n #cont_id = get_container_id()\n tmp_path = '%s/%s/%s/%s/%s' % (self.__fs_base, \\\n filesystem, TMPDIR, cont_id,component_number)\n self.asyn_helper.call(\"create_container\", \\\n tmp_path, path, cont_stat, status_obj)\n return status_obj\n except Exception as err:\n self.logger.error(('create_container for %(con_dir)s failed ',\n 'close failure: %(exc)s : %(stack)s'),\n {'con_dir' : path, \n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err" ]
[ "0.73393464", "0.71082866", "0.6982969", "0.69364905", "0.6810107", "0.6559245", "0.65530443", "0.63620865", "0.6323742", "0.62845564", "0.6264618", "0.6248145", "0.6152857", "0.61038315", "0.60116714", "0.5990125", "0.59747225", "0.59611005", "0.5951962", "0.59108293", "0.5903832", "0.5888574", "0.5887081", "0.587551", "0.58689153", "0.58454114", "0.5844283", "0.58213496", "0.5819696", "0.58092415" ]
0.72043264
1
Build a dictionary of numbers from zero to fifteen and the hexadecimal equivalent
def DictFunction2(): print "Create Second Dictionary" NumberDict = dict(zip((i for i in range(16)), (hex(i) for i in range(16)))) print NumberDict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hex_probabilities(self):\n return {hex(key): value for key, value in self.items()}", "def int2hex(n: int) -> str:", "def test_int_to_hex():\n hex_values = ['61', '62', '63', '64', '65', '66', '67', '68', '69', '6a', '6b', '6c', '6d', '6e', '6f',\n '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '7a', '7b', '7c', '7d', '7e', '7f']\n index = 0\n for x in range(97, 123):\n assert pi_finder.int_to_hex(x, hex_dict) == hex_values[index]\n index += 1", "def generarDiccionario():\n return {\n 'a' : '#ffff00',\n 'c' : '#00ffff',\n 'd' : '#ffc90e',\n 'm' : '#800080',\n 'r' : '#ff0000',\n 'v' : '#00bb00'\n }", "def create_dictionary():\n chars = sorted(ch for ch in string.printable if ch not in (\"\\x0b\", \"\\x0c\", \"\\r\"))\n char2id = dict((ch, i + 1) for i, ch in enumerate(chars))\n char2id.update({\"\": 0})\n id2char = dict((char2id[ch], ch) for ch in char2id)\n vocab_size = len(char2id)\n id2char.update({98:'\\\\unk',99:'\\\\unk'})\n return char2id, id2char, vocab_size,chars", "def int_to_hex(n):\r\n #return \"0x%X\" % n\r\n return hex(n)", "def _create_subscript_mapping():\n # Create the normal and subscript digits list.\n normal_digits = [i for i in range(10)]\n subscript_digits = [chr(0x2080 + i) for i in range(10)]\n\n # Convert the normal digits to strings.\n normal_digits = [str(i) for i in normal_digits]\n\n # Create a dict mapping the two.\n return DefaultDictionary(zip(normal_digits, subscript_digits))", "def hx(i):\n a = hex(i)[2:]\n if len(a)<2: a = ''.join(['0',a])\n return a", "def fn(c):\n ans = 0\n for k in range(1, 16): \n ans = min(ans, k*16+k, key=lambda x: abs(x - int(c, 16)))\n return hex(ans)[2:].zfill(2)", "def conv_hex(num):\n\n if num < 10:\n return str(num)\n if num == 10:\n return 'A'\n if num == 11:\n return 'B'\n if num == 12:\n return 'C'\n if num == 13:\n return 'D'\n if num == 14:\n return 'E'\n if num == 15:\n return 'F'", "def getFi():\n fi = {}\n for i in range(4):\n for k in range(1,9):\n arg = i+1+(4*(k-1))\n val = (8*i)+k\n if arg <= 32 :\n fi[arg]=val\n return fi", "def hex_calc(value):\r\n hex_dict = { # Dictionary for hex values over 9\r\n 10: \"A\",\r\n 11: \"B\",\r\n 12: \"C\",\r\n 13: \"D\",\r\n 14: \"E\",\r\n 15: \"F\"\r\n }\r\n\r\n hex_stack = deque() # Queue to hold hexidecimal representation\r\n\r\n while value > 0:\r\n remainder = value % 16\r\n if remainder > 9:\r\n remainder = hex_dict[remainder]\r\n hex_stack.append(remainder)\r\n else:\r\n hex_stack.append(remainder)\r\n value = value // 16\r\n\r\n print(\"Hexadecimal Value: \", end=\"\")\r\n while hex_stack:\r\n print(hex_stack.pop(), end=\"\")", "def int_to_hex(num):\n return hex(num)", "def stringify(self):\n hexcode = \"#\"\n for x in self.value:\n part = hex(x)[2:]\n if len(part) < 2: part = \"0\" + part\n hexcode += part\n return hexcode", "def input_to_hash(self, keys):\n basic_keys = []\n for i, key in enumerate(keys):\n s = ''\n #print(max(key), min(key))\n for val in key:\n s += \"{:04x}\".format(val)\n basic_keys.append(s)\n return basic_keys", "def color_hex(x):\n\n quest_hex = {\"choice\": \"FF4530\",\n \"short\": \"FCAA03\",\n \"code\": \"5CB130\"\n }\n\n if x == \"programming\":\n hex_code = quest_hex[\"code\"]\n elif x == \"short_answer\":\n hex_code = quest_hex[\"short\"]\n else:\n hex_code = quest_hex[\"choice\"]\n\n return hex_code", "def _prepare_encryption_table():\n seed = 0x00100001\n crypt_table = {}\n\n for i in range(256):\n index = i\n for j in range(5):\n seed = (seed * 125 + 3) % 0x2AAAAB\n temp1 = (seed & 0xFFFF) << 0x10\n\n seed = (seed * 125 + 3) % 0x2AAAAB\n temp2 = (seed & 0xFFFF)\n\n crypt_table[index] = (temp1 | temp2)\n\n index += 0x100\n\n return crypt_table", "def dump( n ):\n\n s = '%x' % n\n if len(s) & 1:\n s = '0' + s\n return s.decode('hex')", "def getT9dict():\r\n T9dict = {}\r\n all_letters = string.lowercase\r\n T9dict.update(mapkeystoletter(2, all_letters[0:3]))\r\n T9dict.update(mapkeystoletter(3, all_letters[3:6]))\r\n T9dict.update(mapkeystoletter(4, all_letters[6:9]))\r\n T9dict.update(mapkeystoletter(5, all_letters[9:12]))\r\n T9dict.update(mapkeystoletter(6, all_letters[12:15]))\r\n T9dict.update(mapkeystoletter(7, all_letters[15:19]))\r\n T9dict.update(mapkeystoletter(8, all_letters[19:22]))\r\n T9dict.update(mapkeystoletter(9, all_letters[22:26]))\r\n T9dict[' '] = 0\r\n\r\n return T9dict", "def decimal_to_hexadecimal(number):\n if number >= 1 and number <= 10: #if the positive integer is less than 10, its binary form is itself\n print(number)\n else:\n \"\"\"\n divide number by 16, take the reminder and start again until the result is 0\n \"\"\"\n new_number = []\n while number > 0:\n new_number.append(int(number%16))\n number = number // 16\n if number == 10: #for number greater than 10, the integer will be represented as hexadecimal element\n number == \"A\"\n elif number == 11:\n number == \"B\"\n elif number == 12:\n number == \"C\"\n elif number == 13:\n number == \"D\"\n elif number == 14:\n number == \"E\"\n elif number == 15:\n number == \"F\"\n print(str(new_number))", "def Hex(num):\n return hex(CInt(num))[2:].upper()", "def string_features_hex(hexstr):\n out = dict([(x,0) for x in hexabet])\n ct = dict(Counter(hexstr.split()))\n N = len(hexstr.split())\n for k in out.keys():\n if k in ct.keys():\n out[k] += ct[k]\n out = [v[1] for v in sorted(out.iteritems(), key=lambda (k,v): k)]\n out = [float(x)/N for x in out]\n return out", "def color_hex(self):\n n = 2\n return tuple(\n hex(int(self.color[i : i + n], 16)) for i in range(0, len(self.color), n)\n )", "def weekday_to_bits(int):\n return {\n 0: 0b1,\n 1: 0b10,\n 2: 0b100,\n 3: 0b1000,\n 4: 0b10000,\n 5: 0b100000,\n 6: 0b1000000\n }[int]", "def create_hex(num):\n\n # Leverage method 2 outlined here: https://www.wikihow.com/Convert-from-Decimal-to-Hexadecimal\n\n hexadecimal = ''\n while num >= 16:\n remainder = num % 16\n num = num // 16\n # Convert the remainder to hex & append to hexadecimal string\n hexadecimal = conv_hex(remainder) + hexadecimal\n # Convert the final quotient to hex & append to hexadecimal string\n hexadecimal = conv_hex(num) + hexadecimal\n\n return hexadecimal", "def set_chrom_dict():\n chrom_dict = {\n str(i):'chr' + str(i) for i in range(1, MAXCHROM)\n }\n chrom_dict.update({\n 'X':'chr23',\n 'Y':'chr24',\n 'XY':'chr25',\n 'M':'chr26',\n 'MT':'chr26',\n 'chrX':'chr23',\n 'chrY':'chr24',\n 'chrXY':'chr25',\n 'chrM':'chr26',\n 'chrMT':'chr26'\n })\n return chrom_dict, MAXCHROM", "def get_hexa(num: int) -> str:\n return str(hex(num))[2:].upper()", "def _base32_to_hex(base32):\n ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'\n x = 0\n for digit in str(base32.upper().strip(' ')):\n x = x * len(ALPHABET) + ALPHABET.index(digit)\n return hex(x).lstrip('0x').rstrip('L').upper()", "def number_as_bignum_words(number):\n result = []\n while number != 0:\n result.append(\"0x%08x\" % (number & 0xFFFFFFFF))\n number >>= 32\n return \"{ \" + \", \".join(result) + \" }\"", "def _pettifor_numbers():\n return { \"Li\": 0.45,\n \"Be\": 1.5,\n \"B\": 2.0,\n \"C\": 2.5,\n \"N\": 3.0, \n \"O\": 3.5,\n \"F\": 4.0,\n \n \"Na\": 0.4,\n \"Mg\": 1.28,\n \"Al\": 1.66,\n \"Si\": 1.92,\n \"P\": 2.18,\n \"S\": 2.44,\n \"Cl\": 2.70,\n \n \"K\": 0.35,\n \"Ca\": 0.60,\n \"Sc\": 0.74,\n \"Ti\": 0.79,\n \"V\": 0.84,\n \"Cr\": 0.89,\n \"Mn\": 0.94,\n \"Fe\": 0.99,\n \"Co\": 1.04,\n \"Ni\": 1.09,\n \"Cu\": 1.20,\n \"Zn\": 1.44,\n \"Ga\": 1.68,\n \"Ge\": 1.92,\n \"As\": 2.16,\n \"Se\": 2.40,\n \"Br\": 2.64,\n\n \"Rb\": 0.30,\n \"Sr\": 0.55,\n \"Y\": 0.70,\n \"Zr\": 0.76,\n \"Nb\": 0.82,\n \"Mo\": 0.88,\n \"Tc\": 0.94,\n \"Ru\": 1.00,\n \"Rh\": 1.06,\n \"Pd\": 1.12,\n \"Ag\": 1.18,\n \"Cd\": 1.36,\n \"In\": 1.60,\n \"Sn\": 1.84,\n \"Sb\": 2.08,\n \"Te\": 2.32,\n \"I\": 2.56,\n \n \"Cs\": 0.25,\n \"Ba\": 0.50,\n \"La\": 0.748,\n \"Hf\": 0.775,\n \"Ta\": 0.83,\n \"W\": 0.885,\n \"Re\": 0.94,\n \"Os\": 0.995,\n \"Ir\": 1.05,\n \"Pt\": 1.105,\n \"Au\": 1.16,\n \"Hg\": 1.32,\n \"Tl\": 1.56,\n \"Pb\": 1.80,\n \"Bi\": 2.04,\n \"Po\": 2.28, \n \"At\": 2.52 }" ]
[ "0.6906895", "0.662318", "0.6495815", "0.62746984", "0.62481356", "0.6218296", "0.6171378", "0.61579126", "0.6128711", "0.6069626", "0.60268325", "0.60072726", "0.5870963", "0.5868089", "0.5852337", "0.580648", "0.5801199", "0.5781645", "0.57676095", "0.57466954", "0.57459706", "0.56859547", "0.56466204", "0.56445086", "0.5627588", "0.5618394", "0.56175464", "0.55808985", "0.5580866", "0.5580684" ]
0.7334854
0
create a few sets with numbers divisible by 2, 3, 4 and test if they're subsets of each other
def SetFunction(): s2 = [] s3 = [] s4 = [] s2 = { i for i in range(21) if i%2 == 0} s3 = { i for i in range(21) if i%3 == 0} s4 = { i for i in range(21) if i%4 == 0} s2 = set(s2) s3 = set(s3) s4 = set(s4) print s3.issubset(s2) print s4.issubset(s2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_example(self):\n\n solution = Solution()\n\n nums = [1, 2, 3]\n\n expected_output = [\n (3,),\n (1,),\n (2,),\n (1, 2, 3),\n (1, 3),\n (2, 3),\n (1, 2),\n ()\n ]\n actual_output = solution.subsets(nums)\n\n for ss in expected_output:\n self.assertIn(ss, actual_output)", "def trivial_phase(indivs):\r\n\tpool=make_pool(len(indivs[0]))\r\n\r\n\tfor i in xrange(1,len(pool)+1):\r\n\t\tall_combi=itertools.combinations(pool,i)\r\n\t\tfor t in all_combi:\r\n\t\t\tt+=t\r\n\t\t\tcandidate_couples=list(itertools.combinations(t,2))\r\n\t\t\tgeno_list=map(lambda x: mix(x[0],x[1]), candidate_couples)\r\n\t \t\tif check(indivs, geno_list):\r\n\t \t\t\treturn list(set(t)), candidate_couples\r\n\tprint \"It's impossible to execute this, something must be wrong.\"", "def between_two_sets(factors, elements):\n results = []\n for i in xrange(max(factors), min(elements) + 1):\n tally = True\n for factor in factors:\n tally = tally and (i % factor == 0)\n for element in elements:\n tally = tally and (element % i == 0)\n if tally:\n results.append(i)\n return len(results)", "def test(nums: list):\n # check if a subset with more nums than another subset has also a greater sum\n nums.sort()\n\n for i in range(1, int(math.ceil(len(nums)/2))):\n small_nums_sum = sum(nums[:i+1])\n big_nums_sum = sum(nums[-i:])\n if small_nums_sum <= big_nums_sum:\n return False\n\n for subset_len in range(2, len(nums)//2 + 1):\n for subset1 in itertools.combinations(nums, subset_len):\n remaining = copy.deepcopy(nums)\n for num in subset1:\n remaining.remove(num)\n s1 = sum(subset1)\n for subset2 in itertools.combinations(remaining, subset_len):\n s2 = sum(subset2)\n if s1 == s2:\n return False\n return True", "def div_by(n, list_of_num):\n for num in list_of_num:\n if not n % num:\n return True\n return False", "def test_only_three_card_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [0], False, True))\n\n self.assertFalse( f( 1, 0, [0], False, True))\n self.assertTrue( f( 1, 1, [0], False, True))\n self.assertTrue( f( 1, 0, [3], False, True))\n self.assertTrue( f( 1, 3, [0], False, True))\n\n self.assertFalse( f( 1, 1, [2], False, True))\n self.assertFalse( f( 1, 1, [3], False, True))\n self.assertFalse( f( 1, 1, [4], False, True))\n\n self.assertTrue( f( 2, 2, [0], False, True))\n self.assertTrue( f( 2, 1, [3], False, True))\n self.assertTrue( f( 2, 3, [3], False, True))\n self.assertTrue( f( 2, 6, [0], False, True))\n self.assertTrue( f( 2, 0, [6], False, True))\n self.assertFalse( f( 2, 4, [3], False, True))\n\n self.assertFalse( f( 3, 1, [], False, True))\n self.assertFalse( f( 3, 2, [], False, True))\n self.assertFalse( f( 3, 0, [3], False, True))\n self.assertFalse( f( 3, 0, [6], False, True))\n self.assertTrue( f( 3, 3, [], False, True))\n self.assertTrue( f( 3, 2, [3], False, True))\n self.assertTrue( f( 3, 3, [6], False, True))\n self.assertTrue( f( 3, 1, [6], False, True))\n self.assertTrue( f( 3, 0, [9], False, True))\n\n self.assertTrue( f(13,13, [], False, True))\n self.assertTrue( f(13,39, [], False, True))\n self.assertTrue( f(13, 0, [39], False, True))\n self.assertTrue( f(13,15, [24], False, True))\n self.assertTrue( f(13,15, [], False, True))\n self.assertTrue( f(13,12, [3], False, True))\n self.assertFalse( f(13,14, [], False, True))\n\n self.assertFalse( f( 6, 1, [3,6,9], False, True))\n self.assertTrue( f( 7, 1, [3,6,9], False, True))\n self.assertFalse( f( 8, 1, [3,6,9], False, True))", "def test_no_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [ 0], False, False))\n\n self.assertFalse( f( 1, 0, [], False, False))\n self.assertFalse( f( 1, 1, [2], False, False))\n self.assertFalse( f( 1, 1, [3], False, False))\n self.assertFalse( f( 1, 1, [4], False, False))\n\n self.assertTrue( f( 1, 1, [], False, False))\n self.assertFalse( f( 1, 2, [], False, False))\n self.assertFalse( f( 1, 3, [], False, False))\n\n self.assertFalse( f( 2, 1, [], False, False))\n self.assertTrue( f( 2, 2, [], False, False))\n self.assertFalse( f( 2, 3, [], False, False))\n\n self.assertFalse( f( 3, 1, [], False, False))\n self.assertFalse( f( 3, 2, [], False, False))\n self.assertTrue( f( 3, 3, [], False, False))\n\n self.assertTrue( f(13,13, [], False, False))\n\n self.assertFalse( f( 1, 1, [0,0,0,3], False, False))\n self.assertFalse( f( 2, 1, [0,0,0,3], False, False))\n self.assertFalse( f( 3, 1, [0,0,0,3], False, False))", "def canPartitionKSubsets(self, nums: List[int], k: int) -> bool:\n if not nums or len(nums) < k:\n return False\n if sum(nums) % k != 0:\n return False\n nums.sort(reverse=True) # 倒排更快\n set_sum = [0] * k\n average_sum = sum(nums) // k\n\n def dfs(index):\n if index == len(nums):\n return True\n for i in range(k):\n set_sum[i] += nums[index]\n if set_sum[i] <= average_sum and dfs(index + 1):\n return True\n set_sum[i] -= nums[index]\n if set_sum[i] == 0: # 如果这个数不符合条件就没必要尝试别的空篮子,速度提高很多\n break\n return False\n\n return dfs(0)", "def test_two_and_three_card_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [], True, True))\n\n self.assertFalse( f( 1, 0, [], True, True))\n self.assertFalse( f( 1, 0, [1], True, True))\n self.assertTrue( f( 1, 0, [2], True, True))\n self.assertTrue( f( 1, 0, [3], True, True))\n self.assertFalse( f( 1, 0, [4], True, True))\n self.assertTrue( f( 1, 1, [], True, True))\n self.assertTrue( f( 1, 2, [], True, True))\n self.assertTrue( f( 1, 3, [], True, True))\n self.assertFalse( f( 1, 4, [], True, True))\n\n self.assertFalse( f( 1, 1, [2], True, True))\n self.assertFalse( f( 1, 1, [3], True, True))\n self.assertFalse( f( 1, 2, [2], True, True))\n self.assertFalse( f( 1, 3, [2], True, True))\n self.assertFalse( f( 1, 3, [3], True, True))\n\n self.assertTrue( f( 2, 1, [2], True, True))\n self.assertTrue( f( 2, 1, [3], True, True))\n self.assertTrue( f( 2, 0, [4], True, True))\n self.assertTrue( f( 2, 0, [5], True, True))\n self.assertTrue( f( 2, 0, [6], True, True))\n self.assertTrue( f( 2, 4, [], True, True))\n self.assertTrue( f( 2, 5, [], True, True))\n self.assertTrue( f( 2, 6, [], True, True))\n \n self.assertTrue( f(13, 26, [], True, True))\n self.assertTrue( f(13, 39, [], True, True))\n self.assertTrue( f(13, 0, [26], True, True))\n self.assertTrue( f(13, 14, [12], True, True))\n self.assertTrue( f(13, 13, [10], True, True))\n self.assertTrue( f(13, 15, [11], True, True))\n self.assertFalse( f(13, 40, [], True, True))\n self.assertFalse( f(13, 11, [3], True, True))\n\n self.assertFalse( f(4, 1, [2,3,6], True, True))\n self.assertTrue( f(5, 1, [2,3,6], True, True))\n self.assertTrue( f(6, 1, [2,3,6], True, True))\n self.assertFalse( f(7, 1, [2,3,6], True, True))", "def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:\n nums.sort()\n n = len(nums)\n ans, res = [], []\n\n for i in range(2**n, 2**(n+1)):\n # generate bitmask, from 0..00 to 1..11\n bitmask = bin(i)[3:]\n res = [nums[j] for j in range(n) if bitmask[j] == '1']\n if res not in ans:\n ans.append(res)\n\n return ans\n # print(ans)", "def divisors(number: int) -> Set[int]:\n\n if number == 0:\n return {0}\n divisor = 2\n while divisor * divisor <= number:\n if number % divisor == 0:\n smaller_result = divisors(number // divisor)\n multiplied_result = {d * divisor for d in smaller_result}\n\n return smaller_result | multiplied_result\n divisor = divisor + 1\n\n return {1, number}", "def McNuggets(n):\n # Your Code Here\n for c in xrange( n/20+2):\n for b in xrange( (n-20*c)/9+2):\n for a in xrange ((n-20*c-9*b)/6 +2):\n if (6*a + 9*b + 20*c) == n :\n return True\n return False", "def quick_test():\n if PERIOD < 2:\n return False\n if SIZE % PERIOD != 0:\n return False\n return True", "def McNuggets(n):\n # Your Code Here\n\n for a in range(0, n/6+1):\n for b in range(0, n/9+1):\n for c in range(0, n/20+1):\n if 6*a+9*b+20*c == n:\n return True\n return False", "def Tests(): \n\t# Test 1 \n\tS = [2,1,5,7]\n\tt = 4\n\tk = 2\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\t# Test 2\n\tS = [2,1,5,7]\n\tt = 6\n\tk = 2\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\n\t# Test 3\n\tS = [2,1,5,7]\n\tt = 6\n\tk = 3\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\t# Test 4\n\tS = [3,2,7,1]\n\tt = 7\n\tk = 1\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\t# Test 5\n\tS = [3,2,7,1]\n\tt = 4\n\tk = 3\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\t# Test 6\n\tS = [3,2,7,1]\n\tt = 4\n\tk = 2\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\t# Test 7\n\tS = [2,4,7,8,9]\n\tt = 11\n\tk = 3\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\t# Test 8\n\tS = [2,4,7,8,9]\n\tt = 11\n\tk = 2\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\n\t# Test 9\n\tS = [3,6,2,1]\n\tt = 3\n\tk = 2\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")\n\n\t# Test 10\n\tS = [3,6,2,1]\n\tt = 3\n\tk = 1\n\tprint(\"Input: s = \" + str(S) + \", \" + \"t = \" + str(t) + \", \" + \"k = \" + str(k))\n\tprint(\"Output:\", existsSubset(S, t, k), \"\\n\")", "def checkStrictSuperset(a, n):\n for i in range(n):\n b = set(map(int, input().split()))\n if not a.issuperset(b):\n return False\n if not any(a.difference(b)):\n return False\n\n return True", "def check_divisible(n: int, divisors: Iterable) -> bool:\n for i in divisors:\n if n % i != 0:\n return False\n return True", "def subsets(n):\n binary = lambda x: x>0 and binary(x>>1) + [x&1] or []\n pad = lambda l: [0]*(n-len(l)) + l #Always returns a list of length 'n'\n return [pad(binary(i)) for i in range(1, 2**n)]", "def test_only_two_card_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [0], True, False))\n\n self.assertFalse( f( 1, 0, [], True, False))\n self.assertFalse( f( 1, 0, [1], True, False))\n self.assertTrue( f( 1, 0, [2], True, False))\n self.assertFalse( f( 1, 0, [3], True, False))\n self.assertFalse( f( 1, 0, [4], True, False))\n\n self.assertTrue( f( 1, 1, [], True, False))\n self.assertFalse( f( 1, 1, [2], True, False))\n\n self.assertFalse( f( 2, 0, [2], True, False))\n self.assertFalse( f( 2, 0, [3], True, False))\n self.assertTrue( f( 2, 0, [4], True, False))\n self.assertFalse( f( 2, 0, [5], True, False))\n \n self.assertTrue( f( 2, 1, [2], True, False))\n self.assertFalse( f( 2, 1, [3], True, False))\n self.assertFalse( f( 2, 1, [4], True, False))\n\n self.assertTrue( f(13, 26, [], True, False))\n self.assertTrue( f(13, 0, [26], True, False))\n self.assertTrue( f(13, 14, [12], True, False))\n self.assertTrue( f(13, 13, [10], True, False))\n self.assertFalse( f(13, 15, [11], True, False))\n\n self.assertFalse( f( 6, 1, [2,4,6], True, False))\n self.assertTrue( f( 7, 1, [2,4,6], True, False))\n self.assertFalse( f( 8, 1, [2,4,6], True, False))", "def straight(ranks):\n return (max(ranks)-min(ranks) == 4) and len(set(ranks)) == 5", "def do_tests(n, s, d, t):\n for i in range(t):\n if is_composite(n, s, d):\n return False\n return True", "def canPartition(self, nums):\n cache = {}\n\n def helper(nums, i, k):\n if (i, k) in cache:\n return False\n if i >= len(nums):\n return False\n if k == 0:\n return True\n include_curr = helper(nums, i + 1, k - nums[i])\n exclude_curr = helper(nums, i + 1, k)\n if include_curr:\n cache[(i, k)] = False\n return include_curr or exclude_curr\n if not nums:\n return True\n s = sum(nums)\n if s % 2 != 0:\n return False\n return helper(nums, 0, s/2)", "def fullIn(C, g):\n for set in C:\n if not fullCmpSets(set, g):\n return 1", "def solution(limit=28123):\n sum_divs = [1] * (limit + 1)\n\n for i in range(2, int(limit**0.5) + 1):\n sum_divs[i * i] += i\n for k in range(i + 1, limit // i + 1):\n sum_divs[k * i] += k + i\n\n abundants = set()\n res = 0\n\n for n in range(1, limit + 1):\n if sum_divs[n] > n:\n abundants.add(n)\n\n if not any((n - a in abundants) for a in abundants):\n res += n\n\n return res", "def is_multiple(n,m):\n return n % m == 0", "def straight(ranks):\n return max(ranks) - min(ranks) == 4 and len(set(ranks)) == 5", "def getDivisors(n):", "def test_find_sets(self):\n cards = numpy.array([[1,1,1,2,0],\n [0,1,2,2,2],\n [0,1,2,2,2],\n [0,1,2,2,2]])\n\n set_indices = set_solver.find_sets(cards)\n self.assertEqual(len(set_indices), 2)\n self.assertTrue((0, 1, 2) in set_indices)\n self.assertTrue((2, 3, 4) in set_indices)", "def prime_divisors(n):\n\treturn tuple(set(factors(n)))", "def beautifulSubsets(self, nums: List[int], k: int) -> int:\n\n \"\"\"\n queue = deque([([], -1)])\n res = 0\n\n while queue:\n cur, idx = queue.popleft()\n res += 1\n\n for i in range(idx + 1, len(nums)):\n if nums[i] - k in cur or nums[i] + k in cur:\n continue\n\n queue.append((cur + [nums[i]], i))\n\n return res - 1\n \"\"\"\n\n \"\"\"\n # dp0 is the ways that without A[i]\n # dp1 is the ways that with A[i]\n\n count = [Counter() for i in range(k)]\n for n in nums:\n count[n % k][n] += 1\n\n res = 1\n for i in range(k):\n prev, dp0, dp1 = 0, 1, 0\n for n in sorted(count[i]):\n v = pow(2, count[i][n])\n if prev + k == n:\n dp0, dp1 = dp0 + dp1, dp0 * (v - 1)\n else:\n dp0, dp1 = dp0 + dp1, (dp0 + dp1) * (v - 1)\n\n prev = n\n\n res *= dp0 + dp1\n\n return res - 1\n \"\"\"\n\n # Count the frequency of A, and then consider all the arithmetic sequence with difference k.\n # Each arithmetic sequence can be solve as a hourse robber problem.\n # We solve the hourse robber by dp.\n # dp(a) return the result for sequence no bigger than a.\n\n # dp(a)[0] is the ways that without a\n # dp(a)[1] is the ways that with a\n\n # dp(a)[0] = dp(a - k)[0] + dp(a - k)[1]\n # dp(a)[1] = dp(a - k)[0] * (2 ^ count(a) - 1\n\n count = Counter(nums)\n\n def dp(n):\n dp0, dp1 = dp(n - k) if n - k in count else (1, 0)\n return dp0 + dp1, dp0 * (pow(2, count[n]) - 1)\n\n return functools.reduce(operator.mul, (sum(dp(n)) for n in count if not count[n + k])) - 1" ]
[ "0.69097584", "0.64683", "0.6412315", "0.6408911", "0.62525964", "0.62090695", "0.6167503", "0.6090646", "0.6082563", "0.60608363", "0.60449654", "0.60165566", "0.59783715", "0.5973267", "0.5971005", "0.5965851", "0.59292936", "0.5929147", "0.59095657", "0.5892798", "0.5868436", "0.5861274", "0.5855593", "0.5840417", "0.5838968", "0.58324033", "0.5828532", "0.5809697", "0.5801886", "0.5790221" ]
0.6884488
1
This strategy always tries to steer the hunter directly towards where the target last said it was and then moves forwards at full speed. This strategy also keeps track of all the target measurements, hunter positions, and hunter headings over time, but it doesn't do anything with that information.
def next_move(hunter_position, hunter_heading, target_measurement, max_distance, OTHER = None): # This function will be called after each time the target moves. # The OTHER variable is a place for you to store any historical information about # the progress of the hunt (or maybe some localization information). Your return format # must be as follows in order to be graded properly. # helper function to map all angles onto [-pi, pi] def angle_truncate(a): while a < 0.0: a += pi * 2 return ((a + pi) % (pi * 2)) - pi #print "true heading" #print test_target.heading I = matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) #identity matrix R = matrix([[measurement_noise, 0], [0, measurement_noise]]) H = matrix([[0, 1, 0], [0, 0, 1]]) #Jacobian of the measurement function u = matrix([[0], [0], [0]]) F = [] heading = 0 #WILD ASS GUESS if OTHER is not None: print "-----------------" current_measurement = target_measurement last_measurement = OTHER['last_measurement'] OTHER['measurements'].append(target_measurement) #I know this is stupid but I just want to save the data... Memory management be damned heading = atan2(target_measurement[1] - last_measurement[1], target_measurement[0] - last_measurement[0]) print "calculated heading" print heading X = OTHER['X'] P = OTHER['P'] if 'last_heading' not in OTHER: OTHER['last_heading'] = heading xy_estimate = [X.value[1][0], X.value[2][0]] OTHER['last_measurement'] = target_measurement else: print "OTHER is:", OTHER turning_angle = heading - OTHER['last_heading'] print "turning angle:", turning_angle print "turning angle actual:", target.turning #last_heading = OTHER['last_heading'] #do some guessing D = distance_between(target_measurement, last_measurement) print "this is the D" print D theta = (heading+turning_angle)%(2*pi) print "theta:", theta print "theta - heading current:", theta - target.heading #estimation step #is it "last heading" or "theta"???? # X = matrix([[theta], # [X.value[1][0] + D * cos(theta)], # [X.value[2][0] + D * sin(theta)]]) delta_x = D * cos(theta) delta_y = D * sin(theta) nextX = target_measurement[0] + delta_x nextY = target_measurement[1] + delta_y # nextX = X.value[1][0] + delta_x # nextY = X.value[2][0] + delta_y #print "the distance to the next guessed point is:", distance_between([nextX,nextY], measurement) X = matrix([[theta], [nextX], [nextY]]) print "I'm projecting X out to:", X print "Note, the current robot stats:", target.heading, target.x, target.y F = matrix([[1, 0, 0], [-D*sin(theta), 1, 0], [D*cos(theta), 0, 1]]) P = OTHER['P'] #X = OTHER['X'] H = matrix([[0, 1, 0], [0, 0, 1]]) # #Prediction # X = (F * X) + u # P = F * P * F.transpose() # + Q P = F * P * F.transpose() # + Q #measurement update observations = matrix([[target_measurement[0]], [target_measurement[1]]]) #truth Z = H*X Y = observations - Z print "this is Y" print Y S = H * P * H.transpose() + R K = P * H.transpose() * S.inverse() X = X + (K*Y) P = (I - (K * H)) * P X.value[0][0] = angle_truncate(X.value[0][0]) OTHER['X'] = X OTHER['P'] = P x_estimate = OTHER['X'].value[1][0] y_estimate = OTHER['X'].value[2][0] print "Currently, the robot state is:", target.heading, observations print "This is what Kalman thinks X will be:", OTHER['X'] xy_estimate = [x_estimate, y_estimate] OTHER['last_heading'] = heading OTHER['last_measurement'] = target_measurement else: #x = theta, x, y X = matrix([[0.5], [2], [4]]) #convariance matrix P = matrix([[1000, 0, 0], [0, 1000, 0], [0, 0, 1000]]) OTHER = {'last_measurement': target_measurement, 'X': X, 'P': P, 'measurements': [target_measurement]} xy_estimate = [X.value[1][0], X.value[2][0]] # if not OTHER: # first time calling this function, set up my OTHER variables. # measurements = [target_measurement] # hunter_positions = [hunter_position] # hunter_headings = [hunter_heading] # OTHER = (measurements, hunter_positions, hunter_headings) # now I can keep track of history # else: # not the first time, update my history # OTHER[0].append(target_measurement) # OTHER[1].append(hunter_position) # OTHER[2].append(hunter_heading) # measurements, hunter_positions, hunter_headings = OTHER # now I can always refer to these variables #plugging in the Hunter to target the next anticipated area for the target if distance_between(hunter_position, xy_estimate) > max_distance: #if I can't get to the position in time # I want to go to a known point and keep going there. heading_to_target = get_heading(hunter_position, OTHER['measurements'][0]) #grab the first measurement heading_difference = heading_to_target - hunter_heading turning = heading_difference distance = max_distance # full speed ahead! print "I'm moving to the point" if distance_between(hunter_position, OTHER['measurements'][0]) <= max_distance/2: distance = 0 #stay put heading_to_target = get_heading(hunter_position, OTHER['measurements'][1]) #point at the next one heading_difference = heading_to_target - hunter_heading turning = heading_difference print "I'm staying at the point in waiting" else: heading_to_target = get_heading(hunter_position, xy_estimate) heading_difference = heading_to_target - hunter_heading turning = heading_difference # turn towards the target distance_to_point = distance_between(hunter_position, xy_estimate) distance = distance_to_point #I don't want to travel full speed LOL print "ATTACK!" return turning, distance, OTHER
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def warm_up(self):\n self.velocity = self.steering_behaviours.calculate()\n self.pos += self.velocity\n self.pos = Point(int(self.pos.x), int(self.pos.y))\n if not self.is_moving():\n if self.steering_behaviours.target == self.soccer_field.ball.pos:\n # let's go back towards where I was.\n self.steering_behaviours.target = self.initial_pos\n else:\n # let's go towards the ball.\n self.steering_behaviours.target = self.soccer_field.ball.pos\n self.direction = Vec2d(self.steering_behaviours.target - self.pos).normalized()", "def naive_next_move(hunter_position, hunter_heading, target_measurement, max_distance, OTHER):\n if not OTHER: # first time calling this function, set up my OTHER variables.\n measurements = [target_measurement]\n hunter_positions = [hunter_position]\n hunter_headings = [hunter_heading]\n OTHER = (measurements, hunter_positions, hunter_headings) # now I can keep track of history\n else: # not the first time, update my history\n OTHER[0].append(target_measurement)\n OTHER[1].append(hunter_position)\n OTHER[2].append(hunter_heading)\n measurements, hunter_positions, hunter_headings = OTHER # now I can always refer to these variables\n\n heading_to_target = get_heading(hunter_position, target_measurement)\n heading_difference = heading_to_target - hunter_heading\n turning = heading_difference # turn towards the target\n distance = max_distance # full speed ahead!\n return turning, distance, OTHER", "def naive_next_move(hunter_position, hunter_heading, target_measurement, max_distance, OTHER):\n if not OTHER: # first time calling this function, set up my OTHER variables.\n measurements = [target_measurement]\n hunter_positions = [hunter_position]\n hunter_headings = [hunter_heading]\n OTHER = (measurements, hunter_positions, hunter_headings) # now I can keep track of history\n else: # not the first time, update my history\n OTHER[0].append(target_measurement)\n OTHER[1].append(hunter_position)\n OTHER[2].append(hunter_heading)\n measurements, hunter_positions, hunter_headings = OTHER # now I can always refer to these variables\n\n heading_to_target = get_heading(hunter_position, target_measurement)\n heading_difference = heading_to_target - hunter_heading\n turning = heading_difference # turn towards the target\n distance = max_distance # full speed ahead!\n return turning, distance, OTHER", "def demo_grading(hunter_bot, target_bot, next_move_fcn, OTHER=None):\n max_distance = 0.98 * target_bot.distance # 0.98 is an example. It will change.\n separation_tolerance = 0.02 * target_bot.distance # hunter must be within 0.02 step size to catch target\n caught = False\n ctr = 0\n\n # We will use your next_move_fcn until we catch the target or time expires.\n while not caught and ctr < 1000:\n\n # Check to see if the hunter has caught the target.\n hunter_position = (hunter_bot.x, hunter_bot.y)\n target_position = (target_bot.x, target_bot.y)\n separation = distance_between(hunter_position, target_position)\n if separation < separation_tolerance:\n print(\"You got it right! It took you \", ctr, \" steps to catch the target.\")\n caught = True\n\n # The target broadcasts its noisy measurement\n target_measurement = target_bot.sense()\n\n # This is where YOUR function will be called.\n turning, distance, OTHER = next_move_fcn(hunter_position, hunter_bot.heading, target_measurement, max_distance,\n OTHER)\n\n # Don't try to move faster than allowed!\n if distance > max_distance:\n distance = max_distance\n\n # We move the hunter according to your instructions\n hunter_bot.move(turning, distance)\n\n # The target continues its (nearly) circular motion.\n target_bot.move_in_circle()\n\n ctr += 1\n if ctr >= 1000:\n print(\"It took too many steps to catch the target.\")\n return ctr-1", "def demo_grading(hunter_bot, target_bot, next_move_fcn, OTHER = None):\n max_distance = 0.97 * target_bot.distance # 0.98 is an example. It will change.\n separation_tolerance = 0.02 * target_bot.distance # hunter must be within 0.02 step size to catch target\n caught = False\n ctr = 0\n\n # We will use your next_move_fcn until we catch the target or time expires.\n while not caught and ctr < 1000:\n\n # Check to see if the hunter has caught the target.\n hunter_position = (hunter_bot.x, hunter_bot.y)\n target_position = (target_bot.x, target_bot.y)\n separation = distance_between(hunter_position, target_position)\n if separation < separation_tolerance:\n print \"You got it right! It took you \", ctr, \" steps to catch the target.\"\n caught = True\n\n # The target broadcasts its noisy measurement\n target_measurement = target_bot.sense()\n\n # This is where YOUR function will be called.\n turning, distance, OTHER = next_move_fcn(hunter_position, hunter_bot.heading, target_measurement, max_distance, OTHER)\n\n # Don't try to move faster than allowed!\n if distance > max_distance:\n distance = max_distance\n\n # We move the hunter according to your instructions\n hunter_bot.move(turning, distance)\n\n # The target continues its (nearly) circular motion.\n target_bot.move_in_circle()\n\n ctr += 1\n if ctr >= 1000:\n print \"It took too many steps to catch the target.\"\n return caught", "def faceTowards(self, target):\n current_tile = self.current_tile()\n if(target and current_tile):\n x_dist = target.coordinates()[0] - current_tile.coordinates()[0]\n if x_dist == 0: return\n self.direction_val = x_dist/abs(x_dist)\n #TEMP\n if self.direction_val == -1:\n self.direction_id = 'left'\n if self.direction_val == 1:\n self.direction_id = 'right'", "def chase(self, target):\n linear_dist = lambda x1, x2, y1, y2: math.sqrt((x1 - x2)**2 + \n (y1 - y2)**2)\n min_dist_to_target = linear_dist(self.x, target.x, \n self.y, target.y)\n possible_posn = [[1, 0], [-1, 0], [0, 1], [0, -1]]\n move_to_make = None\n\n for posn in possible_posn:\n if (self.x + posn[0] == self.handler.player.x and \n self.y + posn[1] == self.handler.player.y and \n self.handler.game_state != data.DEAD):\n dmg = self.deal_damage(self.handler.player)\n\n if dmg:\n self.handler.message_box.add_msg(\"{} attacks you for {} damage!\".format(self.name, dmg), \n data.COLOURS['mob_atk_text'])\n else:\n self.handler.message_box.add_msg(\"{} missed!\".format(self.name), \n data.COLOURS['mob_atk_text'])\n\n if self.handler.game_state == data.DEAD:\n self.handler.message_box.add_msg(\"{} killed you!\".format(self.name),\n data.COLOURS['player_die_text'])\n elif not self.handler.world.is_solid(self.x + posn[0], self.y + posn[1]):\n new_dist = linear_dist(self.x + posn[0], target.x,\n self.y + posn[1], target.y)\n if new_dist < min_dist_to_target:\n min_dist_to_target = new_dist\n move_to_make = posn\n\n if move_to_make:\n self.move(move_to_make[0], move_to_make[1])", "def autoMove(self) :\n\n\t\tdx = Places.getLoc(self.targetPlace)[0] - self.avatarNP.getX()\n\t\tdy = Places.getLoc(self.targetPlace)[1] - self.avatarNP.getY()\n\t\tdist = math.sqrt(dx*dx + dy*dy)\n\t\th0 = self.avatarNP.getH()\n\t\tif dist < 4 :\n\t\t\t# pick new target and determine deltaH\n\t\t\tnbors = Places.getNeighbors(self.targetPlace)\n\t\t\tx = random.randint(0,len(nbors)-1)\n\t\t\tif nbors[x] == self.oldPlace :\n\t\t\t\tx = (1 if x == 0 else x-1)\n\t\t\tt = nbors[x]\n\t\t\th = self.heading(\n\t\t\t\tself.avatarNP.getX(), self.avatarNP.getY(),\n\t\t\t\tPlaces.getLoc(t)[0], Places.getLoc(t)[1])\n\t\t\tself.deltaH = h - h0\n\t\t\tif self.deltaH > 180 : self.deltaH -= 360\n\t\t\telif self.deltaH < -180 : self.deltaH += 360\n\t\t\tself.deltaH /= 2\n\t\t\tself.oldPlace = self.targetPlace\n\t\t\tself.targetPlace = t\n\t\t\tself.turning = True\n\n\t\t# adjust heading and position\n\t\tt = self.targetPlace\n\t\th = self.heading(self.avatarNP.getX(), self.avatarNP.getY(),\n\t\t\t\t Places.getLoc(t)[0], Places.getLoc(t)[1])\n\t\tdh1 = h - h0\n\t\tif dh1 > 180 : dh1 -= 360\n\t\telif dh1 < -180 : dh1 += 360\n\t\tif self.turning :\n\t\t\tdh2 = self.deltaH * globalClock.getDt()\n\t\t\tif math.fabs(dh1) <= math.fabs(dh2) : \n\t\t\t\tself.turning = False\n\t\t\telse :\n\t\t\t\th = h0 + dh2\n\t\tself.avatarNP.setH(h)\n\t\tself.avatarNP.setFluidY(self.avatarNP,-2 * globalClock.getDt())\n\t\t\n\t\treturn\n\n\t\t\"\"\"\n\t\tif self.rotateDir == -1:\n\t\t\tself.rotateDir = random.randint(1,25) #chances to rotate\n\t\tif self.rotateDuration == -1:\n\t\t\tself.rotateDuration = random.randint(200,400)\n\n\t\t# guide the moving direction of the bot\n\t\tif self.rotateDir <= 3 : # turn left\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() + \\\n\t\t\t\t\t 40 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telif self.rotateDir <= 6 : # turn right\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() - \\\n\t\t\t\t\t 50 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telif self.rotateDir == 7 : # turn big left\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() + \\\n\t\t\t\t\t 102 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telif self.rotateDir == 8 : # turn big right\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() - \\\n\t\t\t\t\t 102 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telse :\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\t\tself.avatarNP.setFluidPos(self.avatarNP, 0,\n\t\t\t\t\t-1 * globalClock.getDt(),\n\t\t\t\t\tself.avatarNP.getZ() )\n\t\t# moving forward\n\t\t#self.avatarNP.setFluidPos(self.avatarNP, 0,\n\t#\t\t\t\t-1 * globalClock.getDt(),\n\t#\t\t\t\tself.avatarNP.getZ() )\n\t\treturn\n\t\t\"\"\"", "def move_to_stage_1(self, target, any_hostiles):\n # type: (RoomPosition, bool) -> None\n ordered_members = self.members_movement_order()\n\n self.log(\"Members {} moving - stage 1.\", _.pluck(ordered_members, 'name'))\n\n options = self.new_movement_opts()\n\n home = ordered_members[0].home\n origin = self.find_origin()\n\n serialized_obj = home.hive.honey.get_serialized_path_obj(origin, target, options)\n ordered_rooms_in_path = honey.get_room_list_from_serialized_obj(serialized_obj)\n\n room_path_lengths = []\n for room_name in ordered_rooms_in_path:\n room_path_lengths.push(len(serialized_obj[room_name]) - 1)\n\n members_path_positions = []\n any_member_off_path = False\n\n furthest_back_hurt_index = None\n\n for index in range(0, len(ordered_members)):\n drone = ordered_members[index]\n\n if drone.creep.hits < drone.creep.hitsMax:\n furthest_back_hurt_index = index\n\n room_index = ordered_rooms_in_path.indexOf(drone.pos.roomName)\n if not room_index:\n # if drone != ordered_members[0]:\n any_member_off_path = True\n members_path_positions.push(None)\n continue\n room_path = serialized_obj[drone.pos.roomName]\n\n path_index, moving_direction, reverse_dir = drone.creep.findIndexAndDirectionInPath(room_path)\n\n if path_index < 0:\n self.log(\"..: position ({},{}) is not within {} ({}, {}, {})\",\n drone.pos.x, drone.pos.y, room_path, path_index, moving_direction, reverse_dir)\n any_member_off_path = True\n members_path_positions.push(None)\n continue\n\n members_path_positions.push({\n 'room': room_index,\n 'path': path_index,\n 'dir': moving_direction,\n 'rev': reverse_dir,\n })\n\n if any_member_off_path:\n for i in range(len(ordered_members) - 1, -1, -1):\n member = ordered_members[i]\n\n moving_now = False\n if members_path_positions[i] is None:\n # Since the member is definitely off the path\n self.log(\"Member {} ({}) off path - individually following military path ({} -> {})..\",\n member.name, member.pos, origin, target)\n\n else:\n if member.pos.x <= 2 or member.pos.x >= 48 or member.pos.y <= 2 or member.pos.y >= 48 \\\n or _.some(member.room.look_for_in_area_around(LOOK_STRUCTURES, member.pos, 1),\n lambda s: s.destination):\n moving_now = True\n else:\n # members near members that are off path should also move, to make room available.\n for i2 in range(0, len(ordered_members)):\n other_member = ordered_members[i2]\n if members_path_positions[i2] is None \\\n and movement.chebyshev_distance_room_pos(other_member.pos, member.pos) \\\n <= len(ordered_members) + 1:\n moving_now = True\n break\n\n if moving_now:\n direction = members_path_positions[i].dir\n # key code turned from findIndexAndDirectionInPath when we're at an exit and we should\n # just say put.\n if direction != -30:\n result = member.creep.move(direction)\n member.creep.__direction_moved = direction\n if result != OK and result != ERR_TIRED:\n member.log(\"Error moving by squad path ({}.move({})): {}\",\n member.creep, direction, result)\n member.follow_military_path(origin, target, options)\n else:\n more_to_move_without_near_edge = Infinity\n # iterate backwards over every member so we can break the loop easily if any further back members are\n # too far behind.\n # ordered_members[0] is the head of the group\n any_fatigued = False\n for i in range(len(ordered_members) - 1, -1, -1):\n drone = ordered_members[i]\n\n if drone.creep.fatigue:\n any_fatigued = True\n\n # will sometimes be undefined, but that's ok since it's only used if furthest_back_hurt_index > 1\n prev_drone = ordered_members[i + 1]\n move_obj = members_path_positions[i]\n\n if drone.memory.off_path_for:\n del drone.memory.next_ppos\n del drone.memory.off_path_for\n del drone.memory.lost_path_at\n\n if more_to_move_without_near_edge <= 0 and not movement.is_edge_position(drone.pos):\n continue\n else:\n more_to_move_without_near_edge -= 1\n\n # self.log(\"[{}] regular stage1 movement in dir {}\", drone.name, move_obj.dir)\n\n # key code turned from findIndexAndDirectionInPath when we're at an exit and we should\n # just say put.\n if not move_obj and i == 0:\n drone.follow_military_path(origin, target, options)\n else:\n if furthest_back_hurt_index > i:\n drone.log(\"moving backwards to help out.\")\n if not drone.pos.isNearTo(prev_drone.pos) and any_fatigued:\n if move_obj.rev != -30:\n result = drone.creep.move(move_obj.rev)\n drone.creep.__direction_moved = move_obj.rev\n if result != OK and result != ERR_TIRED:\n drone.log(\"Error moving by squad path ({}.move({})): {}\",\n drone.creep, move_obj.rev, result)\n continue\n\n if move_obj.dir != -30:\n result = drone.creep.move(move_obj.dir)\n drone.creep.__direction_moved = move_obj.dir\n if result != OK and result != ERR_TIRED:\n drone.log(\"Error moving by squad path ({}.move({})): {}\", drone.creep, move_obj.dir, result)\n\n if i != 0:\n next_member_obj = members_path_positions[i - 1]\n\n room_diff = next_member_obj['room'] - move_obj['room']\n if room_diff < 0:\n self.log(\"[{}] we're ahead - moving backwards ({})\", drone.name, move_obj.rev)\n if move_obj.rev != -30:\n result = drone.creep.move(move_obj.rev)\n drone.creep.__direction_moved = move_obj.rev\n if result != OK and result != ERR_TIRED:\n drone.log(\"Error moving by squad path ({}.move({})): {}\",\n drone.creep, move_obj.rev, result)\n continue\n elif room_diff == 0:\n abs_path_diff = next_member_obj['path'] - move_obj['path']\n\n if abs_path_diff < 0:\n self.log(\"[{}] we're ahead - moving backwards ({}).\", drone.name, move_obj.rev)\n if move_obj.rev != -30:\n result = drone.creep.move(move_obj.rev)\n drone.creep.__direction_moved = move_obj.rev\n if result != OK and result != ERR_TIRED:\n drone.log(\"Error moving by squad path ({}.move({})): {}\",\n drone.creep, move_obj.rev, result)\n continue\n elif room_diff == 1:\n # use the room path length to see how far we are to the edge of the room, to get an accurate\n # diff\n abs_path_diff = (next_member_obj['path'] - 4) \\\n + (room_path_lengths[move_obj['room']] - move_obj['path'])\n\n if abs_path_diff < 0:\n # room_path_lengths is an estimation, and may be off.\n abs_path_diff = next_member_obj['path']\n else:\n # just a message that we're quite far behind.\n abs_path_diff = 100\n\n self.log(\"[{}] room diff: {}, path diff: {}, pos: {}\",\n drone.name, room_diff, abs_path_diff, drone.pos)\n if abs_path_diff > 10 or (any_hostiles and abs_path_diff > 1):\n more_to_move_without_near_edge = 0\n continue\n elif abs_path_diff <= 1:\n more_to_move_without_near_edge += 1\n # TODO: move backwards to re-unite when there are hostiles.", "def _targeting_mode(self):\n if self._stack:\n pos = self._stack.pop(0)\n hit = grid.shoot(pos)\n shot = hit.cell\n # if we hit a ship\n if shot in SHIPS:\n self._target_ships.add(shot)\n self._stack += self._get_neighbours(pos)\n # if we sunk a ship\n if hit.result == SUNK_SHIP:\n self._target_ships.remove(shot)\n log(\"[TARGET]: Sunk \" + SHIP_NAME[shot] + \" at \" + str(pos))\n if not self._target_ships:\n self._stack = []\n self._mode = HUNTING\n log(\"[TARGET]: All targets destroyed, return to hunt.\")\n # if we just hit a ship\n else:\n log(\"[TARGET]: Hit a ship at \" + str(pos))\n elif shot == WATER:\n log(\"[TARGET]: Missed at \" + str(pos))\n # if we already hit the position\n if shot in HITS:\n shot = self.fire()\n else:\n self.shots.add(pos)\n return shot\n # if stack is empty, go back to hunting mode\n else:\n self._mode = HUNTING\n return self.fire()", "def head_towards(self):\n dest = self.target_destination - self.location\n if dest.length() != 0:\n dest.scale_to_length(self.speed)\n dest.normalize()\n self.rect.left += dest.x\n self.rect.top += dest.y", "def run_ai(self):\n state = self.brainstate\n me = self.owner\n if not self.owner.alive:\n return\n self._acquire_target()\n if 'target' in state:\n self._move_towards_target()\n else:\n # No target, wander around\n if random.random() > 0.3:\n possible_directions = [i for i in\n ([-1, -1], [-1, 0], [-1, 1], [0, -1],\n [0, 1], [1, -1], [1, 0], [1, 1])\n if afr.map.map.tile_is_traversable(\n me.x + i[0],\n me.y + i[1]\n )\n ]\n logging.debug(\"possible directions: %s\", possible_directions)\n movement = random.choice(possible_directions)\n me.x += movement[0]\n me.y += movement[1]", "def move_to(self, target):\n # type: (RoomPosition) -> None\n hive = self.home.hive\n home = self.find_home()\n origin = self.find_origin()\n\n total_distance = hive.honey.find_path_length(origin, target, self.new_movement_opts())\n\n min_distance_from_home = Infinity\n min_distance_to_origin = Infinity\n min_distance_to_target = movement.chebyshev_distance_room_pos(self.members_movement_order()[0].pos, target)\n max_distance_to_target = -Infinity\n any_hostiles = False\n for member in self.members:\n distance_to_home = movement.chebyshev_distance_room_pos(member.pos, home)\n distance_to_origin = movement.chebyshev_distance_room_pos(member.pos, origin)\n distance_to_target = movement.chebyshev_distance_room_pos(member.pos, target)\n if distance_to_home < min_distance_from_home:\n min_distance_from_home = distance_to_home\n if distance_to_target > max_distance_to_target:\n max_distance_to_target = distance_to_target\n if distance_to_origin < min_distance_to_origin:\n min_distance_to_origin = distance_to_origin\n if len(member.room.find(FIND_HOSTILE_CREEPS)):\n any_hostiles = True\n\n if min_distance_to_origin > 100:\n mv_order = self.members_movement_order()\n self.set_origin(mv_order[len(mv_order) - 1].pos)\n if min_distance_from_home < 50 and (max_distance_to_target < total_distance / 2):\n self.log(\"move_to: chose stage 0 (minimum distance from home: {}, maximum distance from home: {},\"\n \" total distance: {})\"\n .format(min_distance_from_home, max_distance_to_target, total_distance))\n self.move_to_stage_0(target)\n elif min_distance_to_target < 300 and any_hostiles:\n self.move_to_stage_2(target)\n elif min_distance_to_target > 60 or max_distance_to_target > 200:\n # self.log(\"move_to: chose stage 1 (minimum distance from home: {}, total distance: {}, \"\n # \"minimum distance to target: {}, maximum distance to target: {})\"\n # .format(min_distance_from_home, total_distance,\n # min_distance_to_target, max_distance_to_target))\n self.move_to_stage_1(target, any_hostiles)\n else:\n # self.log(\"move_to: chose stage 2 (minimum distance from home: {}, total distance: {}, \"\n # \"minimum distance to target: {}, maximum distance to target: {})\"\n # .format(min_distance_from_home, total_distance,\n # min_distance_to_target, max_distance_to_target))\n self.move_to_stage_2(target)", "def demo_grading_visual(hunter_bot, target_bot, next_move_fcn, OTHER = None):\n max_distance = 0.97 * target_bot.distance # 1.94 is an example. It will change.\n separation_tolerance = 0.02 * target_bot.distance # hunter must be within 0.02 step size to catch target\n caught = False\n ctr = 0\n #For Visualization\n import turtle\n window = turtle.Screen()\n window.bgcolor('white')\n chaser_robot = turtle.Turtle()\n chaser_robot.shape('arrow')\n chaser_robot.color('blue')\n chaser_robot.resizemode('user')\n chaser_robot.shapesize(0.3, 0.3, 0.3)\n broken_robot = turtle.Turtle()\n broken_robot.shape('turtle')\n broken_robot.color('green')\n broken_robot.resizemode('user')\n broken_robot.shapesize(0.3, 0.3, 0.3)\n size_multiplier = 15.0 #change Size of animation\n chaser_robot.hideturtle()\n chaser_robot.penup()\n chaser_robot.goto(hunter_bot.x*size_multiplier, hunter_bot.y*size_multiplier-100)\n chaser_robot.showturtle()\n broken_robot.hideturtle()\n broken_robot.penup()\n broken_robot.goto(target_bot.x*size_multiplier, target_bot.y*size_multiplier-100)\n broken_robot.showturtle()\n measuredbroken_robot = turtle.Turtle()\n measuredbroken_robot.shape('circle')\n measuredbroken_robot.color('red')\n measuredbroken_robot.penup()\n measuredbroken_robot.resizemode('user')\n measuredbroken_robot.shapesize(0.1, 0.1, 0.1)\n broken_robot.pendown()\n chaser_robot.pendown()\n #End of Visualization\n # We will use your next_move_fcn until we catch the target or time expires.\n while not caught and ctr < 1000:\n # Check to see if the hunter has caught the target.\n hunter_position = (hunter_bot.x, hunter_bot.y)\n target_position = (target_bot.x, target_bot.y)\n separation = distance_between(hunter_position, target_position)\n if separation < separation_tolerance:\n print \"You got it right! It took you \", ctr, \" steps to catch the target.\"\n caught = True\n\n # The target broadcasts its noisy measurement\n target_measurement = target_bot.sense()\n\n # This is where YOUR function will be called.\n turning, distance, OTHER = next_move_fcn(hunter_position, hunter_bot.heading, target_measurement, max_distance, OTHER)\n\n # Don't try to move faster than allowed!\n if distance > max_distance:\n distance = max_distance\n\n # We move the hunter according to your instructions\n hunter_bot.move(turning, distance)\n\n # The target continues its (nearly) circular motion.\n target_bot.move_in_circle()\n #Visualize it\n measuredbroken_robot.setheading(target_bot.heading*180/pi)\n measuredbroken_robot.goto(target_measurement[0]*size_multiplier, target_measurement[1]*size_multiplier-100)\n measuredbroken_robot.stamp()\n broken_robot.setheading(target_bot.heading*180/pi)\n broken_robot.goto(target_bot.x*size_multiplier, target_bot.y*size_multiplier-100)\n chaser_robot.setheading(hunter_bot.heading*180/pi)\n chaser_robot.goto(hunter_bot.x*size_multiplier, hunter_bot.y*size_multiplier-100)\n #End of visualization\n ctr += 1\n if ctr >= 1000:\n print \"It took too many steps to catch the target.\"\n return caught", "def head_to(self, target: Tuple[float, float], speed: float = 1.5):\n pos = np.array(self.pos)\n target = np.array(target)\n\n heading = np.array(self.model.space.get_heading(pos, target))\n vector = speed * heading / np.linalg.norm(heading)\n self.model.space.move_agent(self, pos + vector)\n return", "def follow_target(self,\n target,\n home=False,\n offset=[0., 0., 0.],\n yaw_offset=0.,\n desired_follower_alt=None,\n follow_duration=rospy.Duration(60, 0),\n duration=rospy.Duration(600, 0)):\n # Start and set to guide mode\n start = rospy.Time.now()\n self.guided_mode(duration=duration)\n # Collect pose and heading of the target\n self.target_heading = [0.0 for _ in range(len(self.target_heading))]\n self.target_global_pose = [\n NavSatFix() for _ in range(len(self.target_global_pose))\n ]\n pose_sub = rospy.Subscriber('/%s/mavros/global_position/raw/unfix' %\n target,\n NavSatFix,\n self._target_global_pose_cb,\n queue_size=1)\n head_sub = rospy.Subscriber('/%s/mavros/global_position/compass_hdg' %\n target,\n Float64,\n self._target_heading_cb,\n queue_size=1)\n # Start following the target\n followed_duration = rospy.Duration(0, 0)\n duration = duration - (rospy.Time.now() - start)\n start = rospy.Time.now()\n while (rospy.Time.now() - start < duration) and not (\n rospy.is_shutdown()) and (not self.external_intervened) and (\n followed_duration < follow_duration):\n if self.low_battery and not home:\n rospy.logwarn('%s battery is below minimum voltage!!!' %\n self.namespace)\n break\n heading = self.target_heading[-1]\n if self.target_global_pose[-1] == NavSatFix():\n self._rate.sleep()\n continue\n elif self.target_global_pose[0] == NavSatFix():\n latitude = self.target_global_pose[-1].latitude\n longitude = self.target_global_pose[-1].longitude\n altitude = self.target_global_pose[-1].altitude\n else:\n latitude, longitude, altitude = self.predict_target_pose(5)\n # convert offset from meters to lat and long in ENU system\n offset_x = (offset[0] * np.cos(heading) +\n offset[1] * np.sin(heading))\n offset_y = (-1 * offset[0] * np.sin(heading) +\n offset[1] * np.cos(heading))\n latitude_offset, longitude_offset = xy_to_longlat(\n offset_x, offset_y, latitude)\n # Setup target position\n target = GlobalPositionTarget()\n target.header.seq = 1\n target.header.stamp = rospy.Time.now()\n target.header.frame_id = 'map'\n target.type_mask = 0b001111111000\n # Due to yaw_ned_to_enu conversion, the sin and cos are flipped\n target.latitude = latitude + latitude_offset\n target.longitude = longitude + longitude_offset\n target.coordinate_frame = GlobalPositionTarget.FRAME_GLOBAL_REL_ALT\n if (self._min_range >\n -1) and (self._rangefinder[-1] - self._min_range <\n self.MINIMUM_ALTITUDE):\n rospy.logerr(\"%s is %.3f meters away from impact!\" %\n (self.namespace,\n (self._rangefinder[-1] - self._min_range)))\n target_alt = self.global_pose.altitude + 0.2\n elif desired_follower_alt is not None:\n target_alt = desired_follower_alt\n else:\n target.coordinate_frame = GlobalPositionTarget.FRAME_GLOBAL_INT\n target_alt = altitude + offset[2]\n target.altitude = target_alt\n target.yaw = yaw_ned_to_enu(heading + (yaw_offset / 180.) * np.pi)\n target.yaw_rate = 0.2\n # Publish aimed position\n # rospy.loginfo(target)\n self._setpoint_pub.publish(target)\n # Check uav position with target\n latitude_offset, longitude_offset = xy_to_longlat(\n offset_x, offset_y, self.target_global_pose[-1].latitude)\n target_pose = np.array([\n self.target_global_pose[-1].latitude + latitude_offset,\n self.target_global_pose[-1].longitude + longitude_offset\n ])\n uav_pose = np.array([\n self.global_pose.latitude,\n self.global_pose.longitude,\n ])\n if abs(target_alt - self.global_pose.altitude\n ) < 0.7 and np.linalg.norm(uav_pose - target_pose) < 6e-6:\n rospy.loginfo(\"%s has found target, following %d seconds\" %\n (self.namespace, followed_duration.secs))\n followed_duration += self._rate.sleep_dur\n else:\n followed_duration = rospy.Duration(0, 0)\n rospy.loginfo(\"Target is out of range, resetting the duration\")\n self._rate.sleep()\n # Unregister subscriptions\n pose_sub.unregister()\n head_sub.unregister()\n # Prepare response\n response = int(followed_duration >= follow_duration)\n if (rospy.Time.now() - start) > duration:\n response = self.OUT_OF_DURATION\n elif self.external_intervened:\n response = self.EXTERNAL_INTERVENTION\n return response", "def steer(self, direction):\n\n if -1 <= direction <= 1:\n target_position = self.steering_limit * direction\n self.brick_pi.set_motor_position(\n self.motor_steer, -target_position)", "def perform(self):\n\t\tif self.turns_remaining <= 0:\n\t\t\tself.engine.message_log.add_message(\n\t\t\t\tf\"The {self.entity.name} is no longer confused.\",\n\t\t\t)\n\t\t\tself.entity.ai = self.previous_ai\n\t\telse:\n\t\t\t# Pick a random direction\n\t\t\tdir_x, dir_y = random.choice(\n\t\t\t\t[\n (-1, -1), # Northwest\n (0, -1), # North\n (1, -1), # Northeast\n (-1, 0), # West\n (1, 0), # East\n (-1, 1), # Southwest\n (0, 1), # South\n (1, 1), # Southeast\n ]\n\t\t\t)\n\n\t\t\tself.turns_remaining -= 1\n\t\t\t# The actor will either try to move or attack in the chosen random direction.\n\t\t\t# It's possible the actor will just bump into the wall, wasting a turn.\n\t\t\treturn BumpAction(self.entity, dir_x, dir_y,).perform()", "def demo_grading_graph(hunter_bot, target_bot, next_move_fcn, OTHER = None):\n max_distance = 0.98 * target_bot.distance # 0.98 is an example. It will change.\n separation_tolerance = 0.02 * target_bot.distance # hunter must be within 0.02 step size to catch target\n caught = False\n ctr = 0\n #For Visualization\n import turtle\n window = turtle.Screen()\n window.bgcolor('white')\n chaser_robot = turtle.Turtle()\n chaser_robot.shape('arrow')\n chaser_robot.color('blue')\n chaser_robot.resizemode('user')\n chaser_robot.shapesize(0.3, 0.3, 0.3)\n broken_robot = turtle.Turtle()\n broken_robot.shape('turtle')\n broken_robot.color('green')\n broken_robot.resizemode('user')\n broken_robot.shapesize(0.3, 0.3, 0.3)\n size_multiplier = 15.0 #change size of animation\n chaser_robot.hideturtle()\n chaser_robot.penup()\n chaser_robot.goto(hunter_bot.x*size_multiplier, hunter_bot.y*size_multiplier-100)\n chaser_robot.showturtle()\n broken_robot.hideturtle()\n broken_robot.penup()\n broken_robot.goto(target_bot.x*size_multiplier, target_bot.y*size_multiplier-100)\n broken_robot.showturtle()\n measuredbroken_robot = turtle.Turtle()\n measuredbroken_robot.shape('circle')\n measuredbroken_robot.color('red')\n measuredbroken_robot.penup()\n measuredbroken_robot.resizemode('user')\n measuredbroken_robot.shapesize(0.1, 0.1, 0.1)\n broken_robot.pendown()\n chaser_robot.pendown()\n\n prediction = turtle.Turtle()\n prediction.shape('arrow')\n prediction.color('pink')\n prediction.resizemode('user')\n prediction.shapesize(0.2, 0.2, 0.2)\n prediction.penup()\n\n meeting = turtle.Turtle()\n meeting.shape('circle')\n meeting.color('red')\n meeting.resizemode('user')\n meeting.shapesize(0.3, 0.3, 0.3)\n meeting.penup()\n #End of Visualization\n # We will use your next_move_fcn until we catch the target or time expires.\n while not caught and ctr < 1000:\n # Check to see if the hunter has caught the target.\n hunter_position = (hunter_bot.x, hunter_bot.y)\n target_position = (target_bot.x, target_bot.y)\n separation = distance_between(hunter_position, target_position)\n if separation < separation_tolerance:\n print(\"You got it right! It took you \", ctr, \" steps to catch the target.\")\n caught = True\n\n # The target broadcasts its noisy measurement\n target_measurement = target_bot.sense()\n\n # This is where YOUR function will be called.\n turning, distance, OTHER = next_move_fcn(hunter_position, hunter_bot.heading, target_measurement, max_distance, OTHER)\n position_guess = OTHER['meeting_position']\n next_target_guess = OTHER['target_position']\n\n # Don't try to move faster than allowed!\n if distance > max_distance:\n distance = max_distance\n\n # We move the hunter according to your instructions\n hunter_bot.move(turning, distance)\n\n # The target continues its (nearly) circular motion.\n target_bot.move_in_circle()\n #Visualize it\n measuredbroken_robot.setheading(target_bot.heading*180/pi)\n measuredbroken_robot.goto(target_measurement[0]*size_multiplier, target_measurement[1]*size_multiplier-100)\n measuredbroken_robot.stamp()\n broken_robot.setheading(target_bot.heading*180/pi)\n broken_robot.goto(target_bot.x*size_multiplier, target_bot.y*size_multiplier-100)\n chaser_robot.setheading(hunter_bot.heading*180/pi)\n chaser_robot.goto(hunter_bot.x*size_multiplier, hunter_bot.y*size_multiplier-100)\n\n prediction.setheading(target_bot.heading*180/pi)\n prediction.goto(next_target_guess[0]*size_multiplier, next_target_guess[1]*size_multiplier-100)\n prediction.stamp()\n\n meeting.clear()\n meeting.setheading(target_bot.heading*180/pi)\n meeting.goto(position_guess[0]*size_multiplier, position_guess[1]*size_multiplier-100)\n meeting.stamp()\n #End of visualization\n\n ctr += 1\n if ctr >= 1000:\n print(\"It took too many steps to catch the target.\")\n return caught", "def move(self):\n \n # checks for bots nearby\n next_move = self.follow()\n \n # finds a random move if no bot\n if next_move is self.position:\n self.position = self.wander()\n else:\n self.position = next_move", "def _run_trial(self, victim, kettle):\n poison_delta = kettle.initialize_poison()\n if self.args.full_data:\n dataloader = kettle.trainloader\n else:\n dataloader = kettle.poisonloader\n\n validated_batch_size = max(min(kettle.args.pbatch, len(kettle.poisonset)), 1)\n self.temp_sourceset = self._get_temp_sources(kettle)\n self.patch_temp_sources(kettle)\n '''\n num_workers = kettle.get_num_workers()\n sourceloader = torch.utils.data.DataLoader(kettle.sourceset, batch_size=validated_batch_size,\n shuffle=True, drop_last=False, num_workers=num_workers,\n pin_memory=PIN_MEMORY)\n tloader_iter = iter(sourceloader)\n '''\n\n if self.args.attackoptim in ['Adam', 'signAdam', 'momSGD', 'momPGD']:\n # poison_delta.requires_grad_()\n if self.args.attackoptim in ['Adam', 'signAdam']:\n att_optimizer = torch.optim.Adam([poison_delta], lr=self.tau0, weight_decay=0)\n else:\n att_optimizer = torch.optim.SGD([poison_delta], lr=self.tau0, momentum=0.9, weight_decay=0)\n if self.args.scheduling:\n scheduler = torch.optim.lr_scheduler.MultiStepLR(att_optimizer, milestones=[self.args.attackiter // 2.667, self.args.attackiter // 1.6,\n self.args.attackiter // 1.142], gamma=0.1)\n poison_delta.grad = torch.zeros_like(poison_delta)\n dm, ds = kettle.dm.to(device=torch.device('cpu')), kettle.ds.to(device=torch.device('cpu'))\n poison_bounds = torch.zeros_like(poison_delta)\n else:\n poison_bounds = None\n\n for step in range(self.args.attackiter):\n source_losses = 0\n poison_correct = 0\n for batch, example in enumerate(dataloader):\n sources, source_labels = [], []\n indcs = random.sample(list(range(len(self.temp_sourceset))), validated_batch_size)\n for i in indcs:\n temp_source, temp_label, _ = self.temp_sourceset[i]\n sources.append(temp_source)\n # source_labels.append(temp_label)\n sources = torch.stack(sources)\n loss, prediction = self._batched_step(poison_delta, poison_bounds, example, victim, kettle, sources)\n source_losses += loss\n poison_correct += prediction\n\n if self.args.dryrun:\n break\n\n # Note that these steps are handled batch-wise for PGD in _batched_step\n # For the momentum optimizers, we only accumulate gradients for all poisons\n # and then use optimizer.step() for the update. This is math. equivalent\n # and makes it easier to let pytorch track momentum.\n if self.args.attackoptim in ['Adam', 'signAdam', 'momSGD', 'momPGD']:\n if self.args.attackoptim in ['momPGD', 'signAdam']:\n poison_delta.grad.sign_()\n att_optimizer.step()\n if self.args.scheduling:\n scheduler.step()\n att_optimizer.zero_grad()\n with torch.no_grad():\n # Projection Step\n poison_delta.data = torch.max(torch.min(poison_delta, self.args.eps /\n ds / 255), -self.args.eps / ds / 255)\n poison_delta.data = torch.max(torch.min(poison_delta, (1 - dm) / ds -\n poison_bounds), -dm / ds - poison_bounds)\n\n source_losses = source_losses / (batch + 1)\n poison_acc = poison_correct / len(dataloader.dataset)\n if step % (self.args.attackiter // 5) == 0 or step == (self.args.attackiter - 1):\n print(f'Iteration {step}: Source loss is {source_losses:2.4f}, '\n f'Poison clean acc is {poison_acc * 100:2.2f}%')\n\n if self.args.step:\n if self.args.clean_grad:\n victim.step(kettle, None, self.sources, self.true_classes)\n else:\n victim.step(kettle, poison_delta, self.sources, self.true_classes)\n\n if self.args.dryrun:\n break\n\n return poison_delta, source_losses", "def move(self):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception \n if self.destination is None:\n return array([0,0])\n\n if not self.awake:\n return array([0,0])\n\n\n if self.phase == 4 and self.proper_formation is not None:\n no_go = []\n for i in range(0,len(self.proper_formation)):\n if i != self.order and self.proper_formation[i][0] == self.proper_formation[self.order][0]:\n no_go.append(self.transform(self.proper_formation[i][1] - self.position))\n pos = merge_array_lists(pos, no_go)\n\n if self.phase == 2:\n point = self.destination.copy() - self.position\n elif self.phase > 2:\n point = self.transform(self.destination.copy() - self.position)\n else:\n point = self.destination.copy()\n\n if not array_equal(point, array([0,0])):\n reachable, path = findpathtoclosest(array([0,0]), point, pos)\n \n if len(path) == 0:\n move = array([0,0]) \n else:\n move = path[0]\n if not reachable and not array_equal(move,array([0,0])):\n if self.phase == 2:\n self.closest_i_could_get = path[-1] + self.position\n elif self.phase > 2:\n self.closest_i_could_get = self.transform2(path[-1]) + self.position\n else:\n self.closest_i_could_get = path[-1]\n elif not reachable:\n if self.phase > 1:\n self.closest_i_could_get = self.position\n else:\n self.closest_i_could_get = array([0,0])\n else:\n self.closest_i_could_get = None\n\n if reachable and self.phase == 4 and array_equal(move,array([0,0])):\n move = self.randomStep()\n self.closest_i_could_get = None\n\n else:\n move = array([0,0])\n self.closest_i_could_get = None\n\n return move", "def turn_towards(heading):\r\n\tprint (\"In turn towards:\")\r\n\thead = vehicle.heading\r\n\tprint (\"Vehicle Heading: \",head)\r\n\tprint (\"Target Heading: \",heading)\r\n\trc1 = 1900\r\n\twhile True:\r\n\t\tif(head > heading - turnTowardsThreshold and head < heading + turnTowardsThreshold):\r\n\t\t\tbreak\r\n\t\tsendThrottleCommand(minimumThrottle, enableThrottle)\r\n\t\t#time.sleep(0.5)\r\n\t\tprint (\"Vehicle Heading: \",head)\r\n\t print (\"Target Heading: \",heading)\r\n\t\tvehicle.channels.overrides = {'1':rc1}\r\n\t\ttime.sleep(0.2)\r\n\t\thead = vehicle.heading", "def move(self):\n for agent in self.agents:\n if not agent.fidelity:\n options = agent.get_move_options(agent.hex, self.kernel_size, None, extend=True)\n target = random36.choices(population=options,weights=[x.quality**2 for x in options])\n agent.move(target[0])", "def move(self, model):\n\n for speed in self.speeds:\n\n # Direct\n\n new_location = self.lerp(self.loc_desire, self.location, speed)\n\n if not self.collision(model, new_location):\n\n break\n\n elif speed == self.speeds[-1]:\n\n # Wiggle\n\n new_location = self.location + np.random.randint(-1, 1+1, 2)\n\n # Rebound\n\n within_bounds = all(model.boundaries[0] <= new_location) and all(new_location <= model.boundaries[1])\n\n if not within_bounds:\n\n new_location = np.clip(new_location, model.boundaries[0], model.boundaries[1])\n\n # Move\n\n self.location = new_location\n\n return", "def head_direction(self, target, useAvoidance=False, verbose=False, turnSpeed=1, door = False):\n\n endVector = target\n\n speedLeft, speedRight = self.target_to_left_right_speeds(endVector)\n\n self.logger.debug(\"endVector: %s\" % repr(endVector))\n self.logger.debug(\"Wheel speeds: %d, %d\" % (speedLeft, speedRight))", "def process(self, entity):\n \n position = self.engine.positions.find(entity)\n # process as done if not in the current map\n if position.map_id != self.engine.world.id:\n return True\n info = self.engine.infos.find(entity)\n ai = self.engine.ais.find(entity)\n # simple ai logic (move, attack if enemy exists, run away)\n # behavior is updated during attack or update()\n movement = None\n while not movement:\n if ai.behavior == 'wander':\n # self.engine.logger.add(f\"{info.name}({entity.id}) wanders around\")\n movement = Movement.random_move()\n elif ai.behavior == 'attack':\n if ai.path:\n path = ai.path.pop(0)\n # self.engine.logger.add(f\"{ai.path}, {path}\")\n movement = Movement(path[0] - position.x, path[1] - position.y)\n # self.engine.logger.add(f\"{info.name}({entity.id}) saw player and is moving to attack on last path\")\n else:\n target_position = self.engine.positions.find(self.engine.player)\n # s = time.time()\n ai.path = pathfind(self.engine, position, target_position)\n # print(time.time() - s)\n if not ai.path:\n ai.behavior = 'wander'\n # movement = Movement.random_move()\n # self.engine.logger.add(f\"{info.name}({entity.id}) was attacking player but lost sight of him\")\n # else:\n # path = ai.path.pop(0)\n # movement = Movement(path[0] - position.x, path[1] - position.y)\n # self.engine.logger.add(f\"{info.name}({entity.id}) saw player and is moving to attack on recalc path\")\n elif ai.behavior == 'wait':\n movement = Movement(0, 0)\n return direction_to_keypress(movement.x, movement.y)", "def stepGenerator(self, current, target):\n\n while True:\n target = self.cfg[\"GOAL\"]\n if self.gotscript:\n if self.pathsteps in self.tc:\n terrain, topleft, botright = self.tc.get(self.pathsteps)\n pointlist = p4.getBlock(topleft, botright)\n # change logical map\n self.lmap.setPoints(terrain, pointlist)\n # change in gui, if running\n try:\n self.gui.clearPoints(pointlist)\n except:\n pass\n if self.pathsteps in self.gc:\n target = self.lmap.nearestPassable(self.gc.get(self.pathsteps))\n self.setGoal(target)\n if self.pathsteps in self.ac:\n newpos = p4.addVectors(current, self.ac.get(self.pathsteps))\n current = self.lmap.nearestPassable(newpos)\n yield newpos # scripted move is not costed or counted\n try:\n clockstart = timer() # start timer\n nextreturn = self.agent.getNext(self.lmap, current, target, self.timeremaining)\n logging.debug(nextreturn)\n clockend = timer()\n except:\n raise p4.BadAgentException()\n\n # Only time first step unless operating in 'realtime' mode. If this is realtime, and the step involved no reasoning (took less than FREE_TIME) do not count its time\n if ((not self.cfg.get(\"REALTIME\") and self.pathtime) or (\n (clockend - clockstart) < self.cfg.get(\"FREE_TIME\"))):\n steptime = 0\n else:\n steptime = (clockend - clockstart)\n previous = current\n\n # Agent may have returned single step or step plus sets of coords and colors.\n # Try/except distinguishes between them\n try:\n x = nextreturn[1][0] # fails if nextreturn is coord only\n current, configsets = nextreturn\n except TypeError:\n current = nextreturn\n finally:\n self.pathsteps += 1\n self.pathtime += steptime\n self.timeremaining -= steptime\n\n # We now consider every door open. In fact, we are just computing the final path cost, we are not\n # searching for it. So is reasonable to assume that I have all the keys along the path.\n allkeys = [k for k in self.lmap.key_and_doors.keys()]\n cost = self.lmap.getCost(current, previous, allkeys)\n # self.pathcost += self.lmap.getCost(current, previous, allkeys)\n if not self.lmap.isAdjacent(current, previous):\n cost = float('inf')\n # agent has made illegal move:\n if cost == float('inf'):\n self.updateStatus(\"Illegal move at \" + str(current) + \":\" + str(self.lmap.getCost(current)), False)\n if self.cfg[\"STRICT\"]:\n current = previous\n nextreturn = previous\n self.pathsteps -= 1\n cost = 0\n self.pathcost += cost\n yield nextreturn", "def getMovement(self):\n # store the robot's current location and set the directional movement to 0,0 so that the robot won't move by default\n currentLocation = (self.me['x'], self.me['y'])\n directionalMovement = (0,0)\n\n # ensure that target location is not none and not equal to the current location\n if self.targetLocation and not currentLocation == self.targetLocation:\n\n # store the direction, directional movement, and the new map location we will trying to move the robot to this round\n direction = self.getDirection(currentLocation, self.targetLocation)\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # store the current direction for use later\n initialDirection = direction\n\n # by default, the robot is ready to move in the event that the new map location is already passable\n readyToMove = True\n\n # while the new map location is not passable\n while not self.isPassable(newLocation):\n # if unit is a crusader moving diagonally at their fastest pace, set their directional movement to (1,1)\n if self.isCrusader and directionalMovement[0] == 2 and directionalMovement[1] == 2:\n directionalMovement[0] = 1\n directionalMovement[1] = 1\n # or if the unit is traveling faster than 1 block East\n elif directionalMovement[0] > 1:\n # lower the unit's movement East by 1 block\n directionalMovement[0] -= 1\n # or if the unit is traveling faster than 1 block West\n elif directionalMovement[0] < -1:\n # lower the unit's movement West by 1 block\n directionalMovement[0] += 1\n # or if the unit is traveling faster than 1 block South\n elif directionalMovement[1] > 1:\n # lower the unit's movement South by 1 block\n directionalMovement[1] -= 1\n # or if the unit is traveling faster than 1 block North\n elif directionalMovement[1] < -1:\n # lower the unit's movement North by 1 block\n directionalMovement[1] += 1\n # else the unit is already moving the shortest distance they can in the current direction\n else:\n # rotate the robots direction clockwise and proceed\n direction = self.getRotatedDirection(direction, 1)\n\n # if we ened up facing the same direction we started in\n if direction == initialDirection:\n # let the code know we're not ready to move\n readyToMove = False\n # break out of the while loop\n break\n\n # overwrite the directional movement with a new one based on the direction we just got\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n\n # overwrite the new location with the location we get from the directional movement we just got\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # if the robot ended up not being ready to move\n if not readyToMove:\n # change the directional movement back to (0,0) so that it doesn't move\n directionalMovement = (0,0)\n else :\n self.targetLocation = self.getRandomPassableLocation()\n # return the directional movement\n return directionalMovement", "def move(self): # AH note. Swich move with extra_steps?\n if self.adjustment < 0:\n self.position += self.extra_steps\n super().move()\n self.no_moves += 1\n # Do the regular move" ]
[ "0.7139936", "0.7001866", "0.697465", "0.65336674", "0.65109694", "0.6359429", "0.60754806", "0.60304415", "0.59477687", "0.5936428", "0.5919734", "0.5891667", "0.5789955", "0.5778767", "0.57660544", "0.5735114", "0.5714579", "0.5646112", "0.5636291", "0.5635955", "0.56281716", "0.5607269", "0.5594633", "0.5581148", "0.5530476", "0.55292434", "0.55283946", "0.5520743", "0.5519804", "0.55147314" ]
0.7422416
0
Returns True if your next_move_fcn successfully guides the hunter_bot to the target_bot. This function is here to help you understand how we will grade your submission.
def demo_grading(hunter_bot, target_bot, next_move_fcn, OTHER = None): max_distance = 0.97 * target_bot.distance # 0.98 is an example. It will change. separation_tolerance = 0.02 * target_bot.distance # hunter must be within 0.02 step size to catch target caught = False ctr = 0 # We will use your next_move_fcn until we catch the target or time expires. while not caught and ctr < 1000: # Check to see if the hunter has caught the target. hunter_position = (hunter_bot.x, hunter_bot.y) target_position = (target_bot.x, target_bot.y) separation = distance_between(hunter_position, target_position) if separation < separation_tolerance: print "You got it right! It took you ", ctr, " steps to catch the target." caught = True # The target broadcasts its noisy measurement target_measurement = target_bot.sense() # This is where YOUR function will be called. turning, distance, OTHER = next_move_fcn(hunter_position, hunter_bot.heading, target_measurement, max_distance, OTHER) # Don't try to move faster than allowed! if distance > max_distance: distance = max_distance # We move the hunter according to your instructions hunter_bot.move(turning, distance) # The target continues its (nearly) circular motion. target_bot.move_in_circle() ctr += 1 if ctr >= 1000: print "It took too many steps to catch the target." return caught
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def demo_grading(hunter_bot, target_bot, next_move_fcn, OTHER=None):\n max_distance = 0.98 * target_bot.distance # 0.98 is an example. It will change.\n separation_tolerance = 0.02 * target_bot.distance # hunter must be within 0.02 step size to catch target\n caught = False\n ctr = 0\n\n # We will use your next_move_fcn until we catch the target or time expires.\n while not caught and ctr < 1000:\n\n # Check to see if the hunter has caught the target.\n hunter_position = (hunter_bot.x, hunter_bot.y)\n target_position = (target_bot.x, target_bot.y)\n separation = distance_between(hunter_position, target_position)\n if separation < separation_tolerance:\n print(\"You got it right! It took you \", ctr, \" steps to catch the target.\")\n caught = True\n\n # The target broadcasts its noisy measurement\n target_measurement = target_bot.sense()\n\n # This is where YOUR function will be called.\n turning, distance, OTHER = next_move_fcn(hunter_position, hunter_bot.heading, target_measurement, max_distance,\n OTHER)\n\n # Don't try to move faster than allowed!\n if distance > max_distance:\n distance = max_distance\n\n # We move the hunter according to your instructions\n hunter_bot.move(turning, distance)\n\n # The target continues its (nearly) circular motion.\n target_bot.move_in_circle()\n\n ctr += 1\n if ctr >= 1000:\n print(\"It took too many steps to catch the target.\")\n return ctr-1", "def move(self, algMove):\n if self.d_engine.is_move_correct(algMove):\n print(\"correct\")", "def demo_grading_visual(hunter_bot, target_bot, next_move_fcn, OTHER = None):\n max_distance = 0.97 * target_bot.distance # 1.94 is an example. It will change.\n separation_tolerance = 0.02 * target_bot.distance # hunter must be within 0.02 step size to catch target\n caught = False\n ctr = 0\n #For Visualization\n import turtle\n window = turtle.Screen()\n window.bgcolor('white')\n chaser_robot = turtle.Turtle()\n chaser_robot.shape('arrow')\n chaser_robot.color('blue')\n chaser_robot.resizemode('user')\n chaser_robot.shapesize(0.3, 0.3, 0.3)\n broken_robot = turtle.Turtle()\n broken_robot.shape('turtle')\n broken_robot.color('green')\n broken_robot.resizemode('user')\n broken_robot.shapesize(0.3, 0.3, 0.3)\n size_multiplier = 15.0 #change Size of animation\n chaser_robot.hideturtle()\n chaser_robot.penup()\n chaser_robot.goto(hunter_bot.x*size_multiplier, hunter_bot.y*size_multiplier-100)\n chaser_robot.showturtle()\n broken_robot.hideturtle()\n broken_robot.penup()\n broken_robot.goto(target_bot.x*size_multiplier, target_bot.y*size_multiplier-100)\n broken_robot.showturtle()\n measuredbroken_robot = turtle.Turtle()\n measuredbroken_robot.shape('circle')\n measuredbroken_robot.color('red')\n measuredbroken_robot.penup()\n measuredbroken_robot.resizemode('user')\n measuredbroken_robot.shapesize(0.1, 0.1, 0.1)\n broken_robot.pendown()\n chaser_robot.pendown()\n #End of Visualization\n # We will use your next_move_fcn until we catch the target or time expires.\n while not caught and ctr < 1000:\n # Check to see if the hunter has caught the target.\n hunter_position = (hunter_bot.x, hunter_bot.y)\n target_position = (target_bot.x, target_bot.y)\n separation = distance_between(hunter_position, target_position)\n if separation < separation_tolerance:\n print \"You got it right! It took you \", ctr, \" steps to catch the target.\"\n caught = True\n\n # The target broadcasts its noisy measurement\n target_measurement = target_bot.sense()\n\n # This is where YOUR function will be called.\n turning, distance, OTHER = next_move_fcn(hunter_position, hunter_bot.heading, target_measurement, max_distance, OTHER)\n\n # Don't try to move faster than allowed!\n if distance > max_distance:\n distance = max_distance\n\n # We move the hunter according to your instructions\n hunter_bot.move(turning, distance)\n\n # The target continues its (nearly) circular motion.\n target_bot.move_in_circle()\n #Visualize it\n measuredbroken_robot.setheading(target_bot.heading*180/pi)\n measuredbroken_robot.goto(target_measurement[0]*size_multiplier, target_measurement[1]*size_multiplier-100)\n measuredbroken_robot.stamp()\n broken_robot.setheading(target_bot.heading*180/pi)\n broken_robot.goto(target_bot.x*size_multiplier, target_bot.y*size_multiplier-100)\n chaser_robot.setheading(hunter_bot.heading*180/pi)\n chaser_robot.goto(hunter_bot.x*size_multiplier, hunter_bot.y*size_multiplier-100)\n #End of visualization\n ctr += 1\n if ctr >= 1000:\n print \"It took too many steps to catch the target.\"\n return caught", "def next_move(hunter_position, hunter_heading, target_measurement, max_distance, OTHER = None):\n # This function will be called after each time the target moves.\n\n # The OTHER variable is a place for you to store any historical information about\n # the progress of the hunt (or maybe some localization information). Your return format\n # must be as follows in order to be graded properly.\n\n # helper function to map all angles onto [-pi, pi]\n def angle_truncate(a):\n while a < 0.0:\n a += pi * 2\n return ((a + pi) % (pi * 2)) - pi\n\n #print \"true heading\"\n #print test_target.heading\n I = matrix([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]]) #identity matrix\n\n R = matrix([[measurement_noise, 0], [0, measurement_noise]])\n\n H = matrix([[0, 1, 0],\n [0, 0, 1]]) #Jacobian of the measurement function\n\n u = matrix([[0],\n [0],\n [0]])\n\n F = []\n\n heading = 0 #WILD ASS GUESS\n\n if OTHER is not None:\n print \"-----------------\"\n current_measurement = target_measurement\n last_measurement = OTHER['last_measurement']\n OTHER['measurements'].append(target_measurement)\n #I know this is stupid but I just want to save the data... Memory management be damned\n\n heading = atan2(target_measurement[1] - last_measurement[1], target_measurement[0] - last_measurement[0])\n print \"calculated heading\"\n print heading\n X = OTHER['X']\n P = OTHER['P']\n\n if 'last_heading' not in OTHER:\n OTHER['last_heading'] = heading\n xy_estimate = [X.value[1][0], X.value[2][0]]\n OTHER['last_measurement'] = target_measurement\n else:\n print \"OTHER is:\", OTHER\n turning_angle = heading - OTHER['last_heading']\n print \"turning angle:\", turning_angle\n print \"turning angle actual:\", target.turning\n #last_heading = OTHER['last_heading']\n\n\n #do some guessing\n D = distance_between(target_measurement, last_measurement)\n print \"this is the D\"\n print D\n theta = (heading+turning_angle)%(2*pi)\n print \"theta:\", theta\n print \"theta - heading current:\", theta - target.heading\n\n #estimation step\n\n #is it \"last heading\" or \"theta\"????\n # X = matrix([[theta],\n # [X.value[1][0] + D * cos(theta)],\n # [X.value[2][0] + D * sin(theta)]])\n\n delta_x = D * cos(theta)\n delta_y = D * sin(theta)\n\n nextX = target_measurement[0] + delta_x\n nextY = target_measurement[1] + delta_y\n\n # nextX = X.value[1][0] + delta_x\n # nextY = X.value[2][0] + delta_y\n\n #print \"the distance to the next guessed point is:\", distance_between([nextX,nextY], measurement)\n\n X = matrix([[theta],\n [nextX],\n [nextY]])\n\n print \"I'm projecting X out to:\", X\n print \"Note, the current robot stats:\", target.heading, target.x, target.y\n\n F = matrix([[1, 0, 0],\n [-D*sin(theta), 1, 0],\n [D*cos(theta), 0, 1]])\n\n P = OTHER['P']\n #X = OTHER['X']\n\n\n H = matrix([[0, 1, 0],\n [0, 0, 1]])\n\n # #Prediction\n # X = (F * X) + u\n # P = F * P * F.transpose() # + Q\n\n P = F * P * F.transpose() # + Q\n\n #measurement update\n observations = matrix([[target_measurement[0]],\n [target_measurement[1]]]) #truth\n Z = H*X\n Y = observations - Z\n print \"this is Y\"\n print Y\n S = H * P * H.transpose() + R\n K = P * H.transpose() * S.inverse()\n X = X + (K*Y)\n\n P = (I - (K * H)) * P\n\n X.value[0][0] = angle_truncate(X.value[0][0])\n\n\n OTHER['X'] = X\n\n OTHER['P'] = P\n x_estimate = OTHER['X'].value[1][0]\n y_estimate = OTHER['X'].value[2][0]\n print \"Currently, the robot state is:\", target.heading, observations\n print \"This is what Kalman thinks X will be:\", OTHER['X']\n xy_estimate = [x_estimate, y_estimate]\n\n OTHER['last_heading'] = heading\n OTHER['last_measurement'] = target_measurement\n\n\n else:\n #x = theta, x, y\n X = matrix([[0.5],\n [2],\n [4]])\n #convariance matrix\n P = matrix([[1000, 0, 0],\n [0, 1000, 0],\n [0, 0, 1000]])\n OTHER = {'last_measurement': target_measurement, 'X': X, 'P': P, 'measurements': [target_measurement]}\n xy_estimate = [X.value[1][0], X.value[2][0]]\n\n # if not OTHER: # first time calling this function, set up my OTHER variables.\n # measurements = [target_measurement]\n # hunter_positions = [hunter_position]\n # hunter_headings = [hunter_heading]\n # OTHER = (measurements, hunter_positions, hunter_headings) # now I can keep track of history\n # else: # not the first time, update my history\n # OTHER[0].append(target_measurement)\n # OTHER[1].append(hunter_position)\n # OTHER[2].append(hunter_heading)\n # measurements, hunter_positions, hunter_headings = OTHER # now I can always refer to these variables\n\n #plugging in the Hunter to target the next anticipated area for the target\n\n if distance_between(hunter_position, xy_estimate) > max_distance: #if I can't get to the position in time\n # I want to go to a known point and keep going there.\n heading_to_target = get_heading(hunter_position, OTHER['measurements'][0]) #grab the first measurement\n heading_difference = heading_to_target - hunter_heading\n turning = heading_difference\n distance = max_distance # full speed ahead!\n print \"I'm moving to the point\"\n if distance_between(hunter_position, OTHER['measurements'][0]) <= max_distance/2:\n distance = 0 #stay put\n heading_to_target = get_heading(hunter_position, OTHER['measurements'][1]) #point at the next one\n heading_difference = heading_to_target - hunter_heading\n turning = heading_difference\n print \"I'm staying at the point in waiting\"\n else:\n heading_to_target = get_heading(hunter_position, xy_estimate)\n heading_difference = heading_to_target - hunter_heading\n turning = heading_difference # turn towards the target\n distance_to_point = distance_between(hunter_position, xy_estimate)\n distance = distance_to_point #I don't want to travel full speed LOL\n print \"ATTACK!\"\n\n return turning, distance, OTHER", "def checkGoal(self):\n # -- It is not included for simplifity --#\n if self.reward_cumulative != None:\n x = round((abs(self.reward_cumulative) - abs(round(self.reward_cumulative))) * 100);\n rem_goal = x % 25\n rem_timeout = x % 20\n if rem_goal == 0 and x != 0:\n self.is_goal = True\n else:\n self.is_goal = False\n\n if rem_timeout == 0 and x != 0:\n self.is_timeout = True\n else:\n self.is_timeout = False", "def check_ball_on_target():\n\n pass", "def process_move(player,board):\r\n\r\n \r\n print(str(player) + \"'s turn\") #shows which player's turn it is\r\n col = player.next_move(board)\r\n board.add_checker(player.checker,col) #adds checker to specific column\r\n print()\r\n print(board)\r\n print()\r\n if board.is_win_for(player.checker) == True:\r\n print(player, \"wins in\", player.num_moves,'moves.\\nCongratulations!')\r\n return True\r\n elif board.is_win_for(player.checker)== False and board.is_win_for(player.opponent_checker()) == False and board.is_full() == True:\r\n print(\"It's a tie!\")\r\n return True\r\n else:\r\n return False", "def agent_won_trick(state: State) -> bool:\n lead_won = lead_won_trick(\n lead_card=state.lead_play, second_card=state.second_play\n )\n # Either the agent was the lead and it won, or it wasn't the lead,\n # and it won.\n return lead_won == state.agent_goes_first", "def is_legal_move(state, action, player, rewarding_move=False): # TODO: Update this function to an more\n # optimized one.\n action = action.get_action_as_dict()\n if rewarding_move:\n if player == state.get_next_player() == state.get_latest_player():\n if action['action_type'] == YoteActionType.STEAL_FROM_HAND and state.in_hand[player * -1] > 0:\n return True\n elif action['action_type'] == YoteActionType.STEAL_FROM_BOARD:\n opponent_piece = state.get_board().get_player_pieces_on_board(Color(player * -1))\n if opponent_piece and action['action']['at'] in opponent_piece:\n return True\n return False\n else:\n if state.get_next_player() == player:\n if action['action_type'] == YoteActionType.ADD and state.in_hand[player] > 0:\n empty_cells = state.get_board().get_all_empty_cells()\n if empty_cells and action['action']['to'] in empty_cells:\n return True\n elif action['action_type'] == YoteActionType.MOVE:\n if state.get_board().get_cell_color(action['action']['at']) == Color(player):\n effective_moves = YoteRules.get_effective_cell_moves(state, action['action']['at'], player)\n if effective_moves and action['action']['to'] in effective_moves:\n return True\n return False\n return False", "def demo_grading_graph(hunter_bot, target_bot, next_move_fcn, OTHER = None):\n max_distance = 0.98 * target_bot.distance # 0.98 is an example. It will change.\n separation_tolerance = 0.02 * target_bot.distance # hunter must be within 0.02 step size to catch target\n caught = False\n ctr = 0\n #For Visualization\n import turtle\n window = turtle.Screen()\n window.bgcolor('white')\n chaser_robot = turtle.Turtle()\n chaser_robot.shape('arrow')\n chaser_robot.color('blue')\n chaser_robot.resizemode('user')\n chaser_robot.shapesize(0.3, 0.3, 0.3)\n broken_robot = turtle.Turtle()\n broken_robot.shape('turtle')\n broken_robot.color('green')\n broken_robot.resizemode('user')\n broken_robot.shapesize(0.3, 0.3, 0.3)\n size_multiplier = 15.0 #change size of animation\n chaser_robot.hideturtle()\n chaser_robot.penup()\n chaser_robot.goto(hunter_bot.x*size_multiplier, hunter_bot.y*size_multiplier-100)\n chaser_robot.showturtle()\n broken_robot.hideturtle()\n broken_robot.penup()\n broken_robot.goto(target_bot.x*size_multiplier, target_bot.y*size_multiplier-100)\n broken_robot.showturtle()\n measuredbroken_robot = turtle.Turtle()\n measuredbroken_robot.shape('circle')\n measuredbroken_robot.color('red')\n measuredbroken_robot.penup()\n measuredbroken_robot.resizemode('user')\n measuredbroken_robot.shapesize(0.1, 0.1, 0.1)\n broken_robot.pendown()\n chaser_robot.pendown()\n\n prediction = turtle.Turtle()\n prediction.shape('arrow')\n prediction.color('pink')\n prediction.resizemode('user')\n prediction.shapesize(0.2, 0.2, 0.2)\n prediction.penup()\n\n meeting = turtle.Turtle()\n meeting.shape('circle')\n meeting.color('red')\n meeting.resizemode('user')\n meeting.shapesize(0.3, 0.3, 0.3)\n meeting.penup()\n #End of Visualization\n # We will use your next_move_fcn until we catch the target or time expires.\n while not caught and ctr < 1000:\n # Check to see if the hunter has caught the target.\n hunter_position = (hunter_bot.x, hunter_bot.y)\n target_position = (target_bot.x, target_bot.y)\n separation = distance_between(hunter_position, target_position)\n if separation < separation_tolerance:\n print(\"You got it right! It took you \", ctr, \" steps to catch the target.\")\n caught = True\n\n # The target broadcasts its noisy measurement\n target_measurement = target_bot.sense()\n\n # This is where YOUR function will be called.\n turning, distance, OTHER = next_move_fcn(hunter_position, hunter_bot.heading, target_measurement, max_distance, OTHER)\n position_guess = OTHER['meeting_position']\n next_target_guess = OTHER['target_position']\n\n # Don't try to move faster than allowed!\n if distance > max_distance:\n distance = max_distance\n\n # We move the hunter according to your instructions\n hunter_bot.move(turning, distance)\n\n # The target continues its (nearly) circular motion.\n target_bot.move_in_circle()\n #Visualize it\n measuredbroken_robot.setheading(target_bot.heading*180/pi)\n measuredbroken_robot.goto(target_measurement[0]*size_multiplier, target_measurement[1]*size_multiplier-100)\n measuredbroken_robot.stamp()\n broken_robot.setheading(target_bot.heading*180/pi)\n broken_robot.goto(target_bot.x*size_multiplier, target_bot.y*size_multiplier-100)\n chaser_robot.setheading(hunter_bot.heading*180/pi)\n chaser_robot.goto(hunter_bot.x*size_multiplier, hunter_bot.y*size_multiplier-100)\n\n prediction.setheading(target_bot.heading*180/pi)\n prediction.goto(next_target_guess[0]*size_multiplier, next_target_guess[1]*size_multiplier-100)\n prediction.stamp()\n\n meeting.clear()\n meeting.setheading(target_bot.heading*180/pi)\n meeting.goto(position_guess[0]*size_multiplier, position_guess[1]*size_multiplier-100)\n meeting.stamp()\n #End of visualization\n\n ctr += 1\n if ctr >= 1000:\n print(\"It took too many steps to catch the target.\")\n return caught", "def betting(game, episode, buttons):\n potential_wager = process_user_input(game, game.player1, game.player2, buttons)\n\n if potential_wager:\n game.player1.wager = potential_wager\n game.update_tablepot()\n\n if game.player1.folded:\n print(\"player1 folded\")\n return False\n\n game.player2.wager = process_bot_input(game, game.player2, game.player1, episode)\n game.update_tablepot()\n\n if game.player2.folded:\n print(\"player2 folded\")\n return False\n\n if game.player1.wager == game.player2.wager:\n print(\"moving on\")\n return True\n else:\n print(\"you're stuck in betting\")\n return betting\n else:\n return 'no input'", "def is_done_turning(self):\n\n check = self.small_bot.is_done_turning()\n print(\"DONE TURNING CHECK: \", check)\n return check", "def move_check(self):\r\n \r\n if not self.run:\r\n return False\r\n \r\n if self.get_num_legal_moves() == 0:\r\n SlTrace.lg(\"NO more legal moves!\", \"nolegalmoves\")\r\n ###return False \r\n \r\n if self.new_move:\r\n self.announce_player(\"start_move\")\r\n if SlTrace.trace(\"selected\"):\r\n self.list_selected(\"After start_move\")\r\n self.new_move = False\r\n player = self.get_player()\r\n if player is None:\r\n return False\r\n \r\n return True", "def judge_goal(self):\n err_pos = math.sqrt((self.y_des - self.y)**2 +(self.x_des - self.x)**2)\n print(\"t= %s\" % rospy.get_time()+\"-----------\")\n print('destination position=['+str(self.x_des)+','+str(self.y_des)+\"]\")\n print('the current position=['+str(self.x)+','+str(self.y)+\"]\")\n print('the current yaw angle=['+str(self.yaw))\n print('distance to destination='+str(err_pos))\n\n if(err_pos < 0.8):\n print('reach goal!!!!!')\n self.goal_flag=1", "def done(self):\n return self.goal == (0, 0)", "async def is_target_reached(self) -> bool: # type: ignore\n ...", "def check(self,event=None):\n if self.kenken.checkit(self.movelist, self.counter): #Calls the checkit method of the KenKen object\n #If user wins the game, display congratulatory message and instructions.\n self.lbl2[\"text\"] = \"Congratulations, you finished the puzzle!\\nSelect Next Puzzle to go to another puzzle\" + \\\n \"\\nor Exit to quit the game.\"\n elif event:\n #If user has not won the game yet, but checks with the \"Win?\" button in GUI - show message to continue playing.\n self.lbl2[\"text\"] = \"This puzzle is not done. Keep trying!\"", "def goal_reached(self):\r\n pos_0=self.goal[0]\r\n pos_1=self.goal[1]\r\n #self.start_score=self.string(self.start[0],self.start[1])\r\n #self.data_with_string[self.start_score]=self.start\r\n #self.goal_score=self.string(pos_0,pos_1)\r\n if self.h(self.current_score[0],self.current_score[1],self.current_score[2]) <=10 :\r\n self.goal_score=self.string(self.current_score[0],self.current_score[1],self.current_score[2])\r\n print(\"goal_reached\")\r\n #print(len(self.expanded))\r\n #print(\"self.expanded\",self.expanded)\r\n return True\r\n return False", "def advance_check(self):\n values = [self.die_a.value, self.die_b.value]\n if self.stage == 3:\n if not self.cheating and \"5\" in values and \"6\" in values:\n return True\n if self.stage == 2 and \"ANGRY\" in values and \"4\" in values:\n self.stage = 3\n if self.stage == 1 and \"1\" in values and \"2\" in values:\n self.stage = 2\n if self.die_a.value == self.die_b.value == \"ANGRY\":\n print(\"WOW, you're ANGRY!\")\n self.stage = 1\n return False", "def check_goal(self):\n hero = self.objects[0]\n others = self.objects[1:]\n\n for other in others:\n if other.x == hero.x and other.y == hero.y:\n self.objects.remove(other)\n if other.reward == 1:\n self.objects.append(GameObject(self.__new_position(), 1,\n 1, 1, 1, \"goal\"))\n elif other.reward == -1:\n self.objects.append(GameObject(self.__new_position(), 1,\n 1, 0, -1, \"fire\"))\n return other.reward, False\n return 0.0, False", "def utility(state:State,maximizing_player):\n best_move_score = -1\n #######################[Goal]#########################\n is_current_player_stuck = is_stuck(state,state.player_type)\n other_player = RIVAL if state.player_type == PLAYER else PLAYER\n # Check if stuck\n if is_current_player_stuck:\n if state.player_type == PLAYER:\n state.players_score[state.player_type] -= state.penalty_score\n else:\n state.players_score[state.player_type] += state.penalty_score\n return state.players_score[state.player_type] - state.players_score[other_player] \n ######################################################\n # Else\n #--------------------------------------------------\n ################# Available Steps #################\n #--------------------------------------------------\n player_available_steps = availables(state.board, state.locations[PLAYER])\n h1 = 4-player_available_steps\n h4 = player_available_steps\n #--------------------------------------------------\n ################# Fruits Distance #################\n #--------------------------------------------------\n h2 = -1\n if state.fruits_ttl > 0 and len(state.fruits_dict) > 0:\n min_fruit_dist = float('inf')\n for fruit_loc in state.fruits_dict:\n curr_fruit_dist = Manhattan(state.locations[state.player_type], fruit_loc)\n # Check what is the closest fruit reachable\n if curr_fruit_dist < min_fruit_dist and curr_fruit_dist <= state.fruits_ttl:\n other_player_fruit_dist = Manhattan(state.locations[other_player], fruit_loc)\n if curr_fruit_dist < other_player_fruit_dist:\n min_fruit_dist = curr_fruit_dist\n max_dist = len(state.board)+len(state.board[0])\n h2 = (max_dist*10.0/min_fruit_dist)+1 if min_fruit_dist < float('inf') else -1\n #--------------------------------------------------\n ################# Reachable Squrs #################\n #--------------------------------------------------\n reachables_player = reachables(state.board,state.locations[PLAYER])\n reachables_rival = reachables(state.board,state.locations[RIVAL])\n h3 = reachables_player - reachables_rival # We want more for us\n #--------------------------------------------------\n ################# Combine it all. #################\n #--------------------------------------------------\n if not state.half_game():\n w = 0.8 if h2 > 0 else 1\n best_move_score = w*(h1-h3) + (1-w)*h2 \n else:\n w = 0.7 if h2 > 0 else 1\n best_move_score = w*(h4+h3) + (1-w)*h2 \n\n best_move_score += state.players_score[state.player_type]\n return best_move_score", "def is_goal(state):\n pass", "async def movement_tick(self):\n self.movement_progress += self.sub.power.get_power(\"engines\")\n threshold = get_square(self.x, self.y).difficulty()\n if \"blessing\" in self.sub.upgrades.keywords:\n # Bound difficulty above by four (normal waters)\n threshold = min(4, threshold)\n if self.movement_progress >= threshold:\n self.movement_progress -= threshold\n direction = self.direction # Direction can change as result of movement.\n message = await self.move()\n move_status = (\n f\"Moved **{self.sub.name()}** in direction **{direction.upper()}**!\\n\"\n f\"**{self.sub.name()}** is now at position **{self.get_position()}**.\"\n )\n\n # Do all the puzzles stuff.\n await self.sub.puzzles.movement_tick()\n\n # Cancel trades, if necessary.\n trade_messages = self.sub.inventory.timeout_trade()\n\n # Finally, return our movement.\n if message:\n return f\"{message}\\n{move_status}\", trade_messages\n return move_status, trade_messages\n return None, {}", "def action(self, direction: str) -> bool:\n direction = direction[0].upper()\n assert (\n direction in constants.BABY_MOVEMENTS\n ), f\"Movement must be one of {constants.BABY_MOVEMENTS}\"\n if direction == \"R\":\n legal_moves = []\n if self.position[0] != 0:\n legal_moves.append(\"N\")\n if self.position[0] != self.board_dimensions[0] - 1:\n legal_moves.append(\"S\")\n if self.position[1] != 0:\n legal_moves.append(\"W\")\n if self.position[1] != self.board_dimensions[1] - 1:\n legal_moves.append(\"E\")\n direction = np.random.choice(legal_moves)\n if direction == \"N\":\n if self.position[0] != 0:\n self.position[0] -= 1\n return True\n else:\n return False\n elif direction == \"E\":\n if self.position[1] != self.board_dimensions[1] - 1:\n self.position[1] += 1\n return True\n else:\n return False\n elif direction == \"S\":\n if self.position[0] != self.board_dimensions[0] - 1:\n self.position[0] += 1\n return True\n else:\n return False\n elif direction == \"W\":\n if self.position[1] != 0:\n self.position[1] -= 1\n return True\n else:\n return False\n return False", "def succeeded(self):\n return self.current_reward == 300", "def test_lands_on_goal_correctly():\n env = Four_Rooms_Environment(stochastic_actions_probability=0.0)\n env.reset()\n env.move_user(env.current_user_location, (3, 3))\n env.move_goal(env.current_goal_location, (2, 2))\n\n env.step(0)\n assert env.reward == env.step_reward_for_not_achieving_goal\n assert not env.done\n\n env.step(3)\n assert env.reward == env.reward_for_achieving_goal\n assert env.done\n\n env = Four_Rooms_Environment(stochastic_actions_probability=0.0)\n env.reset()\n env.move_user(env.current_user_location, (2, 3))\n env.move_goal(env.current_goal_location, (2, 8))\n for move in [2, 1, 1, 1, 1, 1, 0]:\n env.step(move)\n if move != 0:\n assert env.reward == env.step_reward_for_not_achieving_goal\n assert not env.done\n else:\n assert env.reward == env.reward_for_achieving_goal\n assert env.done", "def reached_goal(self):\n for i in range(self.simulator_.num_agents):\n if rvo_math.abs_sq(self.simulator_.agents_[i].position_ - self.goals_[i]) > self.simulator_.agents_[i].radius_ * self.simulator_.agents_[i].radius_:\n return False\n\n return True", "def move(self, state_prev, state, reward, selected):\n\n if state:\n if self.team_id == 1: # Set correct teams based on team id\n self_team = state['team1']\n other_team = state['team2']\n else:\n self_team = state['team2']\n other_team = state['team1']\n\n if state:\n if self.id == 0: # Special for the goal-keeper\n ai_gk_pass = self.gk_pass(\n other_team['players'], self_team['goal_x'])\n ai_gk_move = self.gk_move(self_team['goal_x'], state['ball'])\n # GK has the ball\n if selected == self.id and state['ball'].ball_stats['player'] == self.id:\n if ai_gk_pass != 'NOTHING':\n return ai_gk_pass\n else:\n return ai_gk_move\n else:\n return ai_gk_move\n\n # Selected player has the ball\n if selected == self.id and state['ball'].ball_stats['player'] == self.id:\n ai_shoot = self.ai_shoot(\n other_team['players'][0], other_team['goal_x'])\n ai_pass = self.ai_pass(\n self_team['players'], other_team['players'])\n # If shot is possible, take it\n if self.pos.dist(P(other_team['goal_x'], H//2)) <= AI_SHOOT_RADIUS and ai_shoot != 'NOTHING':\n return ai_shoot\n # Else, pass if possible (passes towards the enemy goal are prioritized)\n elif ai_pass != 'NOTHING' and random.random() >= AI_PASS_PROB:\n return ai_pass\n else:\n # Move towards the goal\n return self.ai_move_with_ball(other_team['players'], other_team['goal_x'])\n\n else: # Move towards the ball if posssbile, otherwise return to formation\n move = self.ai_move_without_ball(state['ball'])\n if move != 'NOTHING':\n return move\n else:\n return 'FORM' # Special action, not defined in ACT\n else:\n return 'NOTHING' # Otherwise do nothing", "def process_move(player, board):\r\n c = player.__repr__()\r\n print(c, \"'s turn\")\r\n move = player.next_move(board)\r\n board.add_checker(player.checker, move)\r\n print()\r\n print(board)\r\n if board.is_win_for(player.checker):\r\n i = player.num_moves\r\n print(player.__repr__(), \"wins in \", i, \"moves\")\r\n print(\"Congratulations!\")\r\n return True\r\n elif board.is_full() and not board.is_win_for(player.checker):\r\n print(\"It's a tie!\")\r\n return True\r\n else:\r\n return False", "def is_done(self, observations):\n ####################################################################\n # Plan0: init #\n ####################################################################\n # done = False\n # done_reward = 0\n # reward_reached_goal = 2000\n # reward_crashing = -200\n # reward_no_motion_plan = -50\n # reward_joint_range = -150\n\n ####################################################################################\n # Plan1: Reach a point in 3D space (usually right above the target object) #\n # Reward only dependent on distance. Nu punishment for crashing or joint_limits #\n ####################################################################################\n done = False\n done_reward = 0\n reward_reached_goal = 100\n reward_crashing = 0\n reward_no_motion_plan = 0\n reward_joint_range = 0\n\n\n # Check if there are invalid collisions\n invalid_collision = self.get_collisions()\n\n # print(\"##################{}: {}\".format(self.moveit_action_feedback.header.seq, self.moveit_action_feedback.status.text))\n if self.moveit_action_feedback.status.text == \"No motion plan found. No execution attempted.\" or \\\n self.moveit_action_feedback.status.text == \"Solution found but controller failed during execution\" or \\\n self.moveit_action_feedback.status.text == \"Motion plan was found but it seems to be invalid (possibly due to postprocessing).Not executing.\":\n\n print(\">>>>>>>>>>>> NO MOTION PLAN!!! <<<<<<<<<<<<<<<\")\n done = True\n done_reward = reward_no_motion_plan\n\n # Successfully reached goal: Contact with at least one contact sensor and there is no invalid contact\n if observations[7] != 0 and observations[8] != 0 and not invalid_collision:\n done = True\n print('>>>>>>>>>>>>> get two contacts <<<<<<<<<<<<<<<<<<')\n done_reward = reward_reached_goal\n # save state in csv file\n U.append_to_csv(self.csv_success_exp, observations)\n self.success_2_contacts += 1\n print(\"Successful 2 contacts so far: {} attempts\".format(self.success_2_contacts))\n\n if observations[7] != 0 or observations[8] != 0 and not invalid_collision:\n done = True\n print('>>>>>>>>>>>>> get one contacts <<<<<<<<<<<<<<<<<<')\n self.success_1_contact += 1\n print(\"Successful 1 contact so far: {} attempts\".format(self.success_1_contact))\n\n # Check if the box has been moved compared to the last observation\n target_pos = U.get_target_position()\n if not np.allclose(self.object_position, target_pos, rtol=0.0, atol=0.0001):\n print(\">>>>>>>>>>>>>>>>>>> Target moved <<<<<<<<<<<<<<<<<<<<<<<\")\n done = True\n\n # Crashing with itself, shelf, base\n if invalid_collision:\n done = True\n print('>>>>>>>>>>>>>>>>>>>> crashing <<<<<<<<<<<<<<<<<<<<<<<')\n done_reward = reward_crashing\n\n joint_exceeds_limits = False\n for joint_pos in self.joints_state.position:\n joint_correction = []\n if joint_pos < -math.pi or joint_pos > math.pi:\n joint_exceeds_limits = True\n done = True\n done_reward = reward_joint_range\n print('>>>>>>>>>>>>>>>>>>>> joint exceeds limit <<<<<<<<<<<<<<<<<<<<<<<')\n joint_correction.append(-joint_pos)\n else:\n joint_correction.append(0.0)\n\n if joint_exceeds_limits:\n print(\"is_done: Joints: {}\".format(np.round(self.joints_state.position, decimals=3)))\n self.publisher_to_moveit_object.pub_joints_to_moveit([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n while not self.movement_complete.data:\n pass\n self.publisher_to_moveit_object.pub_relative_joints_to_moveit(joint_correction)\n while not self.movement_complete.data:\n pass\n print('>>>>>>>>>>>>>>>> joint corrected <<<<<<<<<<<<<<<<<')\n\n return done, done_reward, invalid_collision" ]
[ "0.68620497", "0.61089545", "0.60718143", "0.60501325", "0.6016241", "0.6005791", "0.5993357", "0.59653", "0.5931018", "0.591706", "0.5897315", "0.58952403", "0.5891687", "0.58798397", "0.581872", "0.5814012", "0.5781461", "0.5760628", "0.5734404", "0.57288635", "0.57284474", "0.5713007", "0.57122356", "0.5700348", "0.5690386", "0.56677085", "0.565261", "0.5645595", "0.5639456", "0.5636376" ]
0.700028
0
Returns the angle, in radians, between the target and hunter positions
def get_heading(hunter_position, target_position): hunter_x, hunter_y = hunter_position target_x, target_y = target_position heading = atan2(target_y - hunter_y, target_x - hunter_x) heading = angle_trunc(heading) return heading
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def angle_to(self, target_pos):\n return angle_to(self.tonp(), target_pos.tonp())", "def _angle_of_attack(self, rel_wind, blade_chord):\n # blade_chord_vector - (relative_wind + pi)\n # rel_oposite = rel_wind.rotated(math.pi)\n aoa_rad = rel_wind.theta - blade_chord.theta\n aoa_rad = vec.normalize_angle(aoa_rad)\n aoa_360 = aoa_rad * 360 / math.tau\n return aoa_rad, aoa_360", "def angle(self):\n return math.degrees(math.atan2(self[1], self[0]))", "def angle(self) -> float:\n ...", "def avl_angle(self):\n dif_height = (self.heights[5] - self.heights[7])\n dif_position = (self.positions[0][7] - self.positions[0][5])\n angle = atan(dif_height / dif_position) / 1.5 * 180 / pi\n return angle", "def angle(self):\n v = self.p1 - self.p0\n return atan2(v.y, v.x)", "def angle(self):\n return atan2(self.v.y, self.v.x)", "def angle(self) -> int:", "def getAngle(self):\n x, y = self.components\n return math.atan2(y, x)", "def angle(self):\n return 0", "def calculate_angle(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.atan2(dy, dx) * 180.0 / math.pi", "def get_angle(self):\n return self.bot_client.send_command(_Command.GetAngle)", "def getH(self):\n\t\thAngle = (math.atan2(self.y,self.x))/(2*math.pi)\n\t\tif self.y < 0:\n\t\t\thAngle = 1 + hAngle\t\n\t\treturn hAngle", "def lead_angle(target_disp,target_speed,target_angle,bullet_speed):\n\t\"\"\"\t\n\t One can imagine the gun, target and point of \n target collision at some time t forming a triangle\n --o-.-.-.--- St collision of which one side has length St*t where St is\n\t . /' ' ' ' . . . o the target speed, and another has length Sb*t\n\t . /z . . where Sb is the bullet speed. We can eliminate\n\t . . . t by scaling all sides of the triangle equally\n\t . A. . leaving one side St and another Sb. This \n\t . . . Sb triangle can be split into 2 right-angled\n\t . a__ . triangles which share line A. Angle z can then\n\t . / . be calculated and length A found \n\t . . (A = sin(z)/St), and from this angle a can be\n\t -----o----- found (a = arcsin(A/Sb) leading to the\n\t gun\t calculation of the firing angle. \n\t\"\"\"\t\n\t# Check for situations with no solution\n\tif target_speed > bullet_speed:\n\t\t# TODO target being faster than bullet does not necessarily mean no collision\n\t\t# - e.g. head on collision\n\t\treturn None\n\tif target_disp[0]==0 and target_disp[1]==0:\n\t\treturn None\n\t\n\t# Find angle to target\n\tang_to_targ = math.atan2(target_disp[1],target_disp[0])\n\t\n\t# Calculate angle\n\treturn math.asin(target_speed/bullet_speed*math.sin(\n\t\t\tang_to_targ-target_angle-math.pi\n\t\t)) + ang_to_targ", "def get_angle(self,tolerance=0.25):\n x, y = self.gamepad.get_axis(0), self.gamepad.get_axis(1)\n if abs(x) > tolerance or abs(y) > tolerance:\n self.angle = 135-math.degrees(math.atan2(float(y), float(x)))\n self.barrel = pg.transform.rotate(self.original_barrel, self.angle)\n self.rect = self.barrel.get_rect(center=self.rect.center)", "def deltaAngle(x, y):\n return math.atan2(math.sin(x-y), math.cos(x-y))", "def delta_angle(source_angle, target_angle, hi=2 * np.pi):\n diff = target_angle - source_angle\n def mod(a, n): return (a % n + n) % n\n return mod(diff + hi / 2, hi) - hi / 2", "def getFinalLarmorAngle(self):\n return np.degrees(self.theta_L_array[-1])", "def __calc_target_angle(self, delta_angle, direction):\n if self.is_reverse:\n direction = not direction\n\n if direction:\n if self.current_angle - delta_angle < 0 or self.current_angle - delta_angle > pi:\n return self.current_angle\n return self.current_angle - delta_angle # this mines (-) for cw.\n else:\n if self.current_angle + delta_angle < 0 or self.current_angle + delta_angle > pi:\n return self.current_angle\n return self.current_angle + delta_angle", "def radians(self) -> float:\n return math.atan2(self.y, self.x)", "def get_angle_between(self, other):\n cross = self.x*other[1] - self.y*other[0]\n dot = self.x*other[0] + self.y*other[1]\n return math.atan2(cross, dot)", "def angle_in_degrees(x, y):\n return math.atan2(y, x) / math.pi * 180", "def _joint_angle_control(self):\n\n error = self.target_pos - self.robot_arm_pos\n return self._pd_control(error) + self.torque", "def find_allowable_angle(self, dist: float) -> float:\n angle = math.atan(self.TRUE_TARGET_RADIUS / dist)\n # print(f\"angle tolerance +- {angle} true target radius{self.TRUE_TARGET_RADIUS}\")\n return angle", "def angle(self):\n return angle(self.force, self.forceXYZ, self.excited_axis,\n self.distance, self.distanceXYZ)", "def direction_angle(self):\n return math.atan2(self.velocity, self.velocity)", "def angle(self):\n return arccos(dot((self.a - self.o) / self.r, (self.b - self.o) / self.r))", "def __calculate_angle(self):\r\n mouse_x, mouse_y = pygame.mouse.get_pos()\r\n rel_x, rel_y = mouse_x - self.x, mouse_y - self.y\r\n angle = (180 / PI) * -atan2(rel_y, rel_x) - 90\r\n self.set_angle(angle)", "def angle(self, other):\n return acosd(np.clip(self.uv().dot(other.uv()), -1, 1))", "def get_angle(p1, p2):\n return math.atan2(p2[1] - p1[1], p2[0] - p1[0])" ]
[ "0.70849824", "0.7078092", "0.69498146", "0.68233067", "0.6816026", "0.6716919", "0.6716865", "0.66994107", "0.6624106", "0.66036713", "0.6584936", "0.6542871", "0.651355", "0.6493879", "0.6482985", "0.6479435", "0.64647853", "0.6453982", "0.6453617", "0.6434811", "0.64185977", "0.6382345", "0.6371726", "0.63694406", "0.6366996", "0.6357795", "0.63488466", "0.6284332", "0.627896", "0.6263473" ]
0.7280507
1
Gets the stress_test_number param from user params. Gets the stress_test_number param. If absent, returns default 100.
def get_stress_test_number(self): return int(self.user_params.get("stress_test_number", 100))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_trial_param(self, trial_id: int, param_name: str) -> float:\n raise NotImplementedError", "def getintparam(name, default=None, stash=None, params=None):\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0: return int(v[0])\n return default", "def param_num(self, *, include_tp: bool = False, include_gq: bool = False) -> int:\n return self._param_num(self.model, int(include_tp), int(include_gq))", "def get_test_params(cls, parameter_set=\"default\"):\n params = {\"state_dim\": 2}\n return params", "def get_test_params(cls, parameter_set=\"default\"):\n params = {\"state_dim\": 2}\n return params", "def getIntParam(self, paramkey, default=None):\n value = self.request.getParameter(paramkey)\n if value is None: return default\n try: return int(value)\n except: return default", "def set_model_parameters(test_type: str, parameter_value: float):\n # assigning default parameters for the model\n som_width = Config.som_width\n som_height = Config.som_height\n n_iter = Config.n_iter\n sigma = Config.sigma\n learning_rate = Config.learning_rate\n\n # assign testing parameter to the model parameter basing on test_parameter value\n if test_type == 'map_size':\n som_width = parameter_value\n som_height = parameter_value\n if test_type == 'n_iter':\n n_iter = parameter_value\n if test_type == 'learning_rate':\n learning_rate = parameter_value / 1000\n if test_type == 'sigma':\n sigma = parameter_value / 100\n return som_width, som_height, n_iter, sigma, learning_rate", "def get_stress(self) -> Union[np.float64, float]:\n\n return self.__stress", "def get_test_params(cls, parameter_set=\"default\"):\n params = {\n \"default_fc_parameters\": \"efficient\",\n \"disable_progressbar\": True,\n \"show_warnings\": False,\n \"fdr_level\": 0.01,\n }\n return params", "def autoset_numerical_parameters():\n testenv = env(\n Ndim=N_DIMS,\n lambda_over_dx=LAMBDA_OVER_DX,\n R_dt=R_DT,\n norm_Poisson=NORM_POISSON,\n Ngrid=N_GRID,\n Nhits=N_HITS,\n dummy=True,\n )\n if STOP_t is None:\n if N_DIMS == 1:\n stop_t = int(round(4 * testenv.N))\n else:\n if testenv.mu0_Poisson < 1e-3:\n stop_t = 10 * testenv.N ** N_DIMS\n elif testenv.mu0_Poisson < 1:\n stop_t = int(round(5 * 10 ** N_DIMS * LAMBDA_OVER_DX / np.sqrt(testenv.mu0_Poisson)))\n else:\n stop_t = int(round(5 * 10 ** N_DIMS * LAMBDA_OVER_DX))\n else:\n stop_t = STOP_t\n\n if N_RUNS is None:\n # predefined for REL_TOL = 0.01\n if N_DIMS == 1:\n Nruns = 16000\n elif N_DIMS == 2:\n Nruns = 6400\n elif N_DIMS == 3:\n Nruns = 25600\n elif N_DIMS == 4:\n Nruns = 102400\n else:\n raise Exception(\"Nruns not pre-defined for N_DIMS > 4\")\n Nruns = int(Nruns * (0.01 / REL_TOL) ** 2)\n else:\n Nruns = N_RUNS\n\n if MAX_N_RUNS is None:\n max_Nruns = MAX_N_RUNS\n else:\n max_Nruns = 10 * Nruns\n\n if ADAPTIVE_N_RUNS or WITH_MPI:\n Nruns = int(N_PARALLEL * (np.ceil(Nruns / N_PARALLEL))) # make it multiple of N_PARALLEL\n max_Nruns = int(N_PARALLEL * (np.ceil(max_Nruns / N_PARALLEL))) # make it multiple of N_PARALLEL\n\n return testenv.N, testenv.Nhits, stop_t, Nruns, max_Nruns, testenv.mu0_Poisson", "def test_getint_with_default(self):\n self.assertEqual(self.config.getint('advanced','p'),None)\n self.assertEqual(self.config.getint('advanced','p',11),11)", "def _get_ssm_param(self, parameter_name):\n response = self.ssm_client.get_parameter(Name=parameter_name)\n res = response.get(\"Parameter\", {})\n cwa_parameter = res.get(\"Value\", {})\n return cwa_parameter", "def get_indexed_param(self):\n switcher_index = self.input_param(\"switch_index\").value \n indexed_param = self.input_param(\"index_%s\" % switcher_index)\n if indexed_param is None:\n raise Exception(\"Switch index value for %s is out of bouned.\" % self)\n return indexed_param", "def testPsychStress(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"stress\")\n\n self.util.intPropertyTest(self, attr, \"stress\")", "def get_setting(param_name, default='None'):\n\n try:\n value = os.environ.get(param_name.upper())\n if value is None:\n value = get_ssm_parameter(param_name.lower())\n # logging.error(f'Param Name: {param_name} not set in env file')\n\n except (IndexError, KeyError) as e:\n logging.warn(e)\n value = default\n\n return value", "def test_invalid_ssm_parameter_with_default():\n param_value = get_ssm_parameter(\n ssm_parameter_key=\"/oops/not/valid\", default=\"NoWorries\"\n )\n assert param_value == \"NoWorries\"", "def get_measurement_parameter(self, trace: int) -> str:\n if trace not in range(1, 5):\n raise ValueError(\"Trace must be between 1 and 4\")\n\n return self.query(f\"CALC:PAR{trace}:DEF?\")", "def getIntParam(self, params, name):\n try:\n return int(params.get(name))\n except:\n return None", "def getParam(key):\n \n if globalParams == {}:\n warning(\"WARNING: runtime parameters not yet initialized\")\n LoadParams(\"_defaults\")\n \n if key in globalParams.keys():\n return globalParams[key]\n else:\n raise ValueError()", "def get_param(self, param):\n return self.params.get(param, None)", "def _get_int_param(request, param):\n try:\n int_param = utils.validate_integer(request.GET[param], param,\n min_value=0)\n except exception.InvalidInput as e:\n raise webob.exc.HTTPBadRequest(explanation=e.format_message())\n return int_param", "def _get_ssm_param_name(self, config_type):\n ssm_config_param_name = \"ray_cloudwatch_{}_config_{}\".format(\n config_type, self.cluster_name)\n return ssm_config_param_name", "def performance_tolerance():\n tolerance = os.environ.get(\"TEST_PERFORMANCE_TOLERANCE\")\n\n if tolerance:\n tolerance = float(tolerance)\n print(f\"Testing performance tolerance multiplier set to: {tolerance}x\")\n else:\n tolerance = 1\n print(\"Testing performance tolerance not set. Using the default value: 1.0x\")\n\n return tolerance", "def get_test_params(cls, parameter_set=\"default\"):\n params1 = {}\n params2 = {\"p_threshold\": 0.1, \"regression\": \"ctt\", \"nlags\": 5}\n\n return [params1, params2]", "def get_weight():\n\tparser = argparse.ArgumentParser(description='Script to update google sheet ')\n\tparser.add_argument('-w','--weight', help='Weight',required=True)\n\targs = parser.parse_args()\n\n\tweight = float(args.weight)\t\n\tprint (\"Weight entry is: %s\" % str(weight))\n\tprint 20*'-'\n\treturn weight", "def set_stress(self, stress=None):\n self.status()\n if not stress:\n if self.__cod == 'vasp': \n #getData = VASP()\n getData = vasp.Stress()\n outfile = 'vasprun.xml'\n elif self.__cod == 'espresso':\n getData = espresso.Stress()\n outfile = 'espresso.out'\n for atoms in self.__structures.items():\n \n if not atoms[1].status:\n atoms[1].stress = np.zeros((3,3))\n continue\n #getData.set_outfile('%s/%s/'%atoms[0] + outfile)\n #getData.set_gsEnergy()\n getData.set_fname(self.__workdir + '%s/'%atoms[1].path.lstrip('.') + outfile)\n getData.set_stress()\n #atoms[1].gsenergy = getData.get_gsEnergy()\n atoms[1].stress = getData.get_stress()\n \n self.__stress = stress", "def parallelism_per_kpu(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"parallelism_per_kpu\")", "def get_performance_threshold(self):\n\n if Test.performance_params: return float(Test.performance_params[0])\n elif self._check_performance: return self._performance_threshold\n else: return None", "def get_check_performance(self):\n\n return self._check_performance or Test.performance_params", "def init_upper(request) -> float:\n return request.param" ]
[ "0.5574652", "0.54855615", "0.54105365", "0.5292212", "0.5292212", "0.52149796", "0.51648545", "0.5153553", "0.51269835", "0.51085144", "0.509756", "0.5092859", "0.50264287", "0.50162494", "0.49809265", "0.49752045", "0.49556142", "0.49453557", "0.49236017", "0.49203673", "0.49111086", "0.49107772", "0.49026483", "0.4902259", "0.48978457", "0.48832744", "0.48782736", "0.48564753", "0.48515007", "0.48491558" ]
0.83593583
0
Given a sequence (let's say from a context window), extract its components under the assumption that each "word" in the sequence is a triplet, and triplets may overlap on the last base
def get_triplet_composition(seq): out = [] for i in range(len(seq)): if i+3 > len(seq): break out.append(seq[i:i+3]) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clts(sequence):\n return [_token2clts(segment)[1] for segment in sequence]", "def bipa(sequence):\n return [_token2clts(segment)[0] for segment in sequence]", "def get_complementary_sequence(sequence):\n\n complementary_sequence = ''\n for char in sequence:\n complementary_sequence = complementary_sequence + get_complement(char)\n return complementary_sequence", "def triplets(p):\n return [p[i:i+3] for i in range(3)]", "def get_ngrams(sequence, n):\n # Example: n = 3, length = 4\n # we want 0:3, 1:4\n\n result = []\n if n == 1:\n result.append(tuple([\"START\"]))\n result.append(tuple([\"STOP\"]))\n\n if n == 3 and len(sequence) == 1:\n result.append(tuple([\"START\",\"START\",sequence[0]]))\n result.append(tuple([\"START\",sequence[0],\"STOP\"]))\n return result\n\n if n > 1 :\n for i in range(n-1):\n result.append(tuple([\"START\"]*(n-1-i)+ sequence[:i+1]))\n result.append(tuple(sequence[-n+1:] + [\"STOP\"]))\n\n for i in range(len(sequence)-n+1):\n result.append(tuple(sequence[i:i+n]))\n return result", "def triples():", "def get_contexts(chords_seq):\n chords , contexts = [], []\n m_before = context_size\n m_after = context_size\n\n copy_chords_seq = copy.deepcopy(chords_seq)\n size = len(chords_seq)\n\n for i in range(size):\n # the neighborhood of chords at the beginning or at the end of a sequence is smaller\n if i < m_before:\n m_before = i\n elif size - i <= m_after:\n m_after = size - i - 1\n\n if (m_before > 0):\n for context in map(list, copy_chords_seq[(i - m_before):i]):\n c_j = copy.deepcopy(list(context))\n #c_j.append(EOS_ID)\n chords.append(list(chords_seq[i]))\n contexts.append(c_j)\n if (m_after > 0):\n\n for context in map(list, chords_seq[(i + 1):(i + m_after + 1)]):\n c_j = copy.deepcopy(list(context))\n #c_j.append(EOS_ID)\n chords.append(list(chords_seq[i]))\n contexts.append(c_j)\n\n m_before = context_size\n m_after = context_size\n\n return (chords,contexts)", "def get_windows_for_sequence(self, sequence, labels):\n extractions = np.array([sequence[i: i + self.window_size, :] for i in \\\n range(0, len(sequence) - self.window_size + 1, self.stride)])\n \n extraction_labels = np.zeros(len(extractions), dtype=np.int) - 1\n\n def get_non_zero_mask(left, right):\n mask = np.logical_and(labels_time - start_time > left,\n end_time - labels_time > right)\n label = np.flatnonzero(mask)\n return label\n\n for index, extraction in enumerate(extractions):\n start_time = extraction[0, self.X_TIME_COLUMN]\n end_time = extraction[-1, self.X_TIME_COLUMN]\n labels_time = labels[:, self.Y_TIME_COLUMN]\n label = get_non_zero_mask(self.left_epsilon, self.right_epsilon)\n if len(label) > 1:\n raise Warning(\"Overlapping labels. Reduce Epsilon boundaries\")\n elif len(label) == 1:\n extraction_labels[index] = label[0]\n elif len(label) == 0:\n if not self.only_positive:\n if len(get_non_zero_mask(-self.right_epsilon, -self.left_epsilon)) == 0:\n extraction_labels[index] = 0\n \n extractions = extractions[extraction_labels > -1]\n extraction_labels = extraction_labels[extraction_labels > -1]\n\n return extractions, extraction_labels", "def wc(seq):\n return \"\".join(complement[nt] for nt in reversed(seq))", "def get_combo(un_lit):\n\n done_lit = []\n li_count = len(un_lit)\n\n for letter in un_lit: # for each letter in the provided\n placeholder = 0\n for num in range(li_count) # for each pos in list\n if letter.index == placeholder:\n temp_lit = \n\n elif letter.index > placeholder:\n \n elif letter.index < placeholder:\n\n done_lit.append(temp_lit)\n placeholder += 1", "def get_all_peptides(nuc_seq):\n # TODO - Refactor to use a generator function (in start order)\n # rather than making a list and sorting?\n answer = []\n full_len = len(nuc_seq)\n if options.strand != \"reverse\":\n for frame in range(0, 3):\n for offset, n, t in break_up_frame(nuc_seq[frame:]):\n start = frame + offset # zero based\n answer.append((start, start + len(n), +1, n, t))\n if options.strand != \"forward\":\n rc = reverse_complement(nuc_seq)\n for frame in range(0, 3):\n for offset, n, t in break_up_frame(rc[frame:]):\n start = full_len - frame - offset # zero based\n answer.append((start - len(n), start, -1, n, t))\n answer.sort()\n return answer", "def _is_component(words):\n init_word = words[0]\n words = set(words) # odstrani duplicity\n seen = {init_word, }\n first_ch = {init_word[0], }\n last_ch = {init_word[-1], }\n index = 0\n while index < max(len(first_ch), len(last_ch)):\n for word in words:\n if word[:1] in last_ch or word[-1:] in first_ch:\n first_ch.add(word[:1])\n last_ch.add(word[-1:])\n seen.add(word)\n index += 1\n return len(seen) == len(words)", "def _is_component(words):\n init_word = words[0]\n words = set(words) # odstrani duplicity\n seen = {init_word, }\n first_ch = {init_word[0], }\n last_ch = {init_word[-1], }\n index = 0\n while index < max(len(first_ch), len(last_ch)):\n for word in words:\n if word[:1] in last_ch or word[-1:] in first_ch:\n first_ch.add(word[:1])\n last_ch.add(word[-1:])\n seen.add(word)\n index += 1\n return len(seen) == len(words)", "def get_position_indices(triplet, dna):\n import re\n return list(filter(None, [m.start() / 3 if m.start() is not m.start() % 3 else '' for m in re.finditer(triplet, dna)]))", "def triples(self):\n\n if len(self.words) < 3:\n return\n\n for i in range(len(self.words) - 2):\n yield (self.words[i], self.words[i+1], self.words[i+2])", "def get_combinations(text):\n combinations = []\n arr = []\n slen = len(text)\n __find_factor(slen,slen,combinations,arr)\n \n elements = []\n for comb in combinations:\n tmp = [0] + comb\n elements.append([text[tmp[i]:tmp[i]+tmp[i+1]] for i in range(len(tmp)-1)])\n return elements", "def getComboTerms(tuples):\t\t\t\n\t\t\t#return \"[{0}]\".format('; '.join([\"({0})\".format(','.join([text[indices[0]:indices[1]], str(indices[0])])) for indices in tuples]))\n\t\t\treturn \"{0}\".format('; '.join((\"{0}\".format(text[indices[0]:indices[1]]) for indices in tuples)))", "def extract_ngrams(self, sequence):\n sequence = self.prefix + sequence + self.suffix\n for i, event in enumerate(sequence[self.n:], self.n):\n yield event, sequence[i-self.n: i]", "def find_words(text):\n print \"finding combinations\"\n length = len(text)\n n = length - 1\n num_combos = 2 ** (length - 1)\n\n bins = []\n for i in range(num_combos):\n num = bin(i).rsplit('b', 1)[1]\n num_str = num.zfill(n)\n bins.append(num_str)\n\n total_combos = []\n for binary_num in bins:\n combo = []\n for i in range(n):\n if binary_num[i] == '1':\n combo.append(text[i])\n combo.append(',')\n else:\n combo.append(text[i])\n\n combo.append(text[-1])\n combo = ''.join(combo)\n combo = combo.split(',')\n total_combos.append(combo)\n\n return total_combos", "def sequence(word1: str, word2: str) -> str:\r\n matrix = [[[0, [0, 0]] for x in range(len(word1) + 1)] for i in range(len(word2) + 1)]\r\n\r\n for i in range(1, len(word2) + 1):\r\n for j in range(1, len(word1) + 1):\r\n # compares every letter in\r\n if word2[i - 1] == word1[j - 1]:\r\n matrix[i][j][0] = 1 + matrix[i-1][j-1][0]\r\n matrix[i][j][1] = [i - 1, j - 1]\r\n else:\r\n if matrix[i - 1][j][0] > matrix[i][j - 1][0]:\r\n matrix[i][j][0] = matrix[i - 1][j][0]\r\n matrix[i][j][1] = [i - 1, j]\r\n else:\r\n matrix[i][j][0] = matrix[i][j - 1][0]\r\n matrix[i][j][1] = [i, j - 1]\r\n # the code below runs in order to extract the sequence. it starts at position (m,n)\r\n res = \"\"\r\n i = len(matrix) - 1\r\n j = len(matrix[0]) - 1\r\n while i and j != 0:\r\n if matrix[i][j][1] == [i - 1, j - 1]:\r\n res = word1[j - 1] + res\r\n i, j = matrix[i][j][1]\r\n return res", "def get_ngrams(sequence, n):\n length = len(sequence)\n #if only require 1-gram, then we need to add one START and one END to the sequence. \n if n==1 or n==2:\n sequence=[\"START\"]*n+sequence+[\"STOP\"]\n end = n+1 #end i means that when n==1, we need to read one more data, that is to the end of sequence, which is slightly different from when n>1.\n #if require multi-grams, use the common calculation below.\n else:\n sequence = [\"START\"]*(n-1)+sequence+[\"STOP\"]\n end = 1\n if n==2:\n end = n\n result = []\n temp = ()\n #the process to construct the tuple-based array.\n for i in range(0,length+end):\n temp = tuple(sequence[i:i+n])\n\n result.append(temp)\n return result", "def convert_clifford_sequence_to_tape(\n clifford_sequence, lutmapping, gate_decomposition=gate_decomposition\n):\n # This is intended to replace the block below but not done because\n # I cannot test it at this moment (MAR)\n # decomposed_seq = decompose_clifford_seq(clifford_sequence,\n # gate_decomposition)\n decomposed_seq = []\n for cl in clifford_sequence:\n decomposed_seq.extend(gate_decomposition[cl])\n tape = []\n for g in decomposed_seq:\n tape.append(lutmapping.index(g))\n return tape", "def C ( self ) :\n lst = [ i for i in self.__nums_components ]\n if not lst : return () ## extended fit? no other components?\n elif 1 == len(lst) : return lst[0] ## single component?\n return tuple ( lst )", "def C ( self ) :\n lst = [ i for i in self.__nums_components ]\n if not lst : return () ## extended fit? no other components?\n elif 1 == len(lst) : return lst[0] ## single component?\n return tuple ( lst )", "def get_components(self):\r\n return [Token.from_multiword(word, index, self) for index, word in enumerate(self.wordform.split('_'))]", "def seq2bbox(sequence: np.ndarray) -> np.ndarray:\n sequence = np.asarray(sequence, dtype=np.bool)\n selected_indices, = np.where(sequence == 1)\n\n bboxes_lr = []\n for k, g in groupby(enumerate(selected_indices), lambda x: x[0] - x[1]):\n segment = list(map(itemgetter(1), g))\n start_frame, end_frame = segment[0], segment[-1] + 1\n bboxes_lr.append([start_frame, end_frame])\n\n bboxes_lr = np.asarray(bboxes_lr, dtype=np.int32)\n return bboxes_lr", "def get_seg_features(string):\n seg_feature = []\n\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def get_full_context(chords_seq):\n chord , context = [],[]\n m_before = context_size\n empty_before = 0\n m_after = context_size\n empty_after = 0\n\n size = len(chords_seq)\n for i in range(size):\n # the neighborhood of chords at the beginning or at the end of a sequence is smaller\n if i < m_before:\n m_before = i\n empty_before = context_size - m_before\n elif size - i <= m_after:\n m_after = size - i - 1\n empty_after = context_size - m_after\n\n neighborhood = []\n\n for j in range(empty_before):\n neighborhood.append([])\n if (m_before > 0):\n neighborhood.extend(map(list, chords_seq[(i - m_before):i]))\n if (m_after > 0):\n neighborhood.extend(map(list, chords_seq[(i + 1):(i + m_after + 1)]))\n for j in range(empty_after):\n neighborhood.append([])\n\n #for context_chord in neighborhood:\n # context_chord.append(EOS_ID)\n\n chord.append(list(chords_seq[i]))\n context.append(neighborhood)\n m_before = context_size\n m_after = context_size\n empty_after = 0\n empty_before = 0\n\n return (chord,context)", "def _get_seg_features(string):\n seg_feature = []\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def superstring(g):\n substrings = []\n last_overlap = 0\n i = source(g)\n while True:\n substrings.append(g.vertex_label(i)[last_overlap:])\n if g.outdegree(i) > 0:\n j = g.out_edges(i)[0][1]\n last_overlap = g.edge_weight(i, j)\n i = j\n else:\n break\n return \"\".join(substrings)" ]
[ "0.64476144", "0.5610308", "0.5590001", "0.55161774", "0.5455728", "0.5424268", "0.5388733", "0.5383343", "0.5379006", "0.5371296", "0.53479505", "0.5318797", "0.5318797", "0.52970743", "0.5292962", "0.5259284", "0.52484095", "0.5230913", "0.52165705", "0.52094585", "0.52030563", "0.5198468", "0.5191101", "0.5191101", "0.5188471", "0.5184082", "0.5181438", "0.51584715", "0.5152823", "0.51391125" ]
0.7335
0
Opens marker file and adds all markers to dictionary with
def open_markers(filename): markers = {} try: with open(filename, "r") as f: lines = f.readlines() cur_marker = "" cur_marker_name = "" for i in range(len(lines)): if i >= 7: cur_line = lines[i] if cur_line.startswith(" "): cur_marker += cur_line.replace(" ", "").strip() else: if i != 7: markers[cur_marker_name] = [cur_marker] cur_marker_name = cur_line.split(" ")[0] cur_marker = "" except IOError: print("Error loading file.") return markers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readMarkers(self,markerfile):\n with open(markerfile,'r') as fin:\n count = 0\n for line in fin:\n if line.startswith('#'): continue\n l = line.strip().split()\n if len(l) == 0: continue\n if len(l) == 6: chrom,name,distance,position,a1,a2 = l\n elif len(l) == 4:\n chrom,name,distance,position = l # Plink\n a1,a2 = [],[]\n elif len(l) == 1:\n name = l[0]\n chrom,pos,a1,a2 = '0',count,[],[]\n if name not in self.mark:\n self.mark[name] = {'chrom':chrom,\n 'pos':int(position),\n 'alleles': a1+a2,\n 'rank':count}\n count += 1\n self.marklist.append(name)", "def collectMarkers(self, ingeno):\n with open(ingeno,'r') as fin:\n for line in fin:\n if line.startswith('#'):\n l = line.strip('#').strip().split()\n for i,e in enumerate(l):\n self.mark[e] = {'chrom':'0',\n 'pos':i,\n 'alleles': [],\n 'rank':i}\n self.marklist.append(e)\n break\n else:\n l = line.strip().split()\n if self.ia == 3:\n for i in xrange(0,len(l[self.ic:])//2):\n self.mark[str(i)] = {'chrom':'0',\n 'pos':i,\n 'alleles': [],\n 'rank':i}\n self.marklist.append(str(i))\n else:\n for i,e in enumerate(l[self.ic:]):\n self.mark[str(i)] = {'chrom':'0',\n 'pos':i,\n 'alleles': [],\n 'rank':i}\n self.marklist.append(str(i))", "def create_map(filename: str) -> TravelMap:\n travel_map = {}\n for line in open(filename, \"r\"):\n loc1, loc2, dist = parse_line(line)\n add_locs(travel_map, loc1, loc2, dist)\n add_locs(travel_map, loc2, loc1, dist)\n return travel_map", "def Dictionary_create(nMarkers, markerSize):\n pass", "def load_data_map(self):\n with open(\"map/maps.txt\") as maps:\n for x_axis, line in enumerate(maps):\n self.x_axis = x_axis\n self.full_map.insert(x_axis, [])\n for y_axis, case in enumerate(line.strip()):\n self.y_axis = y_axis\n if case == \"D\":\n self.full_map[x_axis].insert(y_axis, \"M\")\n self.user.position = (x_axis, y_axis)\n elif case == \"A\":\n self.full_map[x_axis].insert(y_axis, \"A\")\n elif case == \"_\":\n self.full_map[x_axis].insert(y_axis, \"_\")\n elif case == \"#\":\n self.full_map[x_axis].insert(y_axis, \"#\")", "def custom_dictionary(nMarkers, markerSize):\n pass", "def return_markers(self):\r\n ent_file = join(self.filename, self._basename + '.ent')\r\n if not exists(ent_file):\r\n ent_file = join(self.filename, self._basename + '.ent.old')\r\n\r\n try:\r\n ent_notes = _read_ent(ent_file)\r\n\r\n except (FileNotFoundError, PermissionError):\r\n markers = []\r\n\r\n else:\r\n allnote = []\r\n for n in ent_notes:\r\n try:\r\n n['value'].keys()\r\n allnote.append(n['value'])\r\n except AttributeError:\r\n lg.debug('Note of length {} was not '\r\n 'converted to dict'.format(n['length']))\r\n\r\n s_freq = self._hdr['erd']['sample_freq']\r\n pcname = '0CFEBE72-DA20-4b3a-A8AC-CDD41BFE2F0D'\r\n note_time = []\r\n note_name = []\r\n note_note = []\r\n for n in allnote:\r\n if n['Text'] == 'Analyzed Data Note':\r\n continue\r\n if not n['Text']:\r\n continue\r\n if 'User' not in n['Data'].keys():\r\n continue\r\n user1 = n['Data']['User'] == 'Persyst'\r\n user2 = False # n['Data']['User'] == 'eeg'\r\n user3 = n['Data']['User'] == pcname\r\n user4 = n['Data']['User'] == 'XLSpike - Intracranial'\r\n user5 = n['Data']['User'] == 'XLEvent - Intracranial'\r\n if user1 or user2 or user3 or user4 or user5:\r\n continue\r\n if len(n['Data']['User']) == 0:\r\n note_name.append('-unknown-')\r\n else:\r\n note_name.append(n['Data']['User'].split()[0])\r\n note_time.append(n['Stamp'] / s_freq)\r\n note_note.append(n['Text'])\r\n\r\n markers = []\r\n for time, name, note in zip(note_time, note_name, note_note):\r\n m = {'name': note + ' (' + name + ')',\r\n 'start': time,\r\n 'end': time,\r\n 'chan': None,\r\n }\r\n markers.append(m)\r\n\r\n return markers", "def Dictionary_create_from(nMarkers, markerSize, baseDictionary):\n pass", "def custom_dictionary_from(nMarkers, markerSize, baseDictionary):\n pass", "def extract_data(data, markers, fn, key):\n\n for index, obj in enumerate(markers):\n start = obj['offset'] + 8\n\n # Is this the last marker in the file?\n if index < len(markers) - 1:\n # Extract to start of next marker\n end = markers[index + 1]['offset']\n else:\n # Extract to end of data\n end = None\n\n # Output filename made from input + marker\n name = \"{}-{}\".format(fn, obj['text'])\n\n save_data(data[start:end], name, key)", "def read(self, filename):\n try:\n with open(filename) as f:\n line = f.readline()\n while line:\n charLine = line.strip().split(',')\n l = []\n for c in charLine:\n l.append(CharMapCell(c))\n self.charMap.append(l)\n line = f.readline()\n except FileNotFoundError:\n print(\"[Error] Map not found.\", file=sys.stderr)\n raise UserInputException", "def open_file(self, fname):\n\n # Save that the file is opened.\n self.open_files[fname] = {}\n self.open_files[fname][\"name\"] = fname\n self.open_files[fname][\"contents\"] = []", "def read_ics(self, filename, lat_long_data):\n with open(os.path.join(self.zoneinfo_path, filename), \"r\") as zone:\n zoneinfo = zone.readlines()\n\n with open(os.path.join(self.zoneinfo_pure_path, filename), \"r\") as zone:\n zoneinfo_pure = zone.readlines()\n\n ics_data = []\n for i in range(0, len(zoneinfo)):\n line = zoneinfo[i]\n key = line[:line.find(\":\")]\n\n if key == \"BEGIN\":\n if line != \"BEGIN:VCALENDAR\\r\\n\":\n ics_data.append(line)\n elif key == \"END\":\n if line != \"END:VCALENDAR\\r\\n\":\n ics_data.append(line)\n elif key in (\"TZID\", \"TZOFFSETFROM\", \"TZOFFSETTO\", \"TZNAME\", \"DTSTART\"):\n ics_data.append(line)\n elif key == \"RRULE\":\n if line == zoneinfo_pure[i]:\n ics_data.append(line)\n else:\n sys.stderr.write(\"Using pure version of %s\\n\" % filename[:-4])\n ics_data.append(zoneinfo_pure[i])\n\n zone_data = {\n \"ics\": \"\".join(ics_data).rstrip()\n }\n zone_name = filename[:-4]\n if zone_name in lat_long_data:\n zone_data[\"latitude\"] = lat_long_data[zone_name][0]\n zone_data[\"longitude\"] = lat_long_data[zone_name][1]\n\n return zone_data", "def init() -> None:\n init_dict()\n parse_file(\"alphabet.txt\", letters)\n parse_file(\"numbers.txt\", numbers)\n parse_file(\"symbols.txt\", symbols)", "def file_to_dictionary():\n\n return;", "def create_map(data_file):\n\n\t# Define a type of GeoJSON\n\tgeo_map = {\"type\": \"FeatureCollection\"}\n\t# Define list to collect each point to graph\n\titem_list = []\n\n\t# Iterate over our data to create GeoJSON doc\n\tfor index, line in enumerate(data_file):\n\t\t# Skip any zero coordinates\n\t\tif line['X'] == '0' or line['Y'] == '0':\n\t\t\tcontinue\n\t\t# New dict for every iteration\n\t\tdata = {}\n\t\t# Assign line items to json fields\n\t\tdata['type'] = 'Feature'\n\t\tdata['id'] = index\n\t\tdata['properties'] = {'title': line['Category'],\n\t\t 'description': line['Descript'],\n\t\t 'date': line['Date']}\n\t\tdata['geometry'] = {'type': 'Point',\n\t\t 'coordinates': (line['X'], line['Y'])}\n\t\t# Add data dict to our itemlist\n\t\titem_list.append(data)\n\n\t# for each point in our item list we add a point to dict\n\tfor point in item_list:\n\t\tgeo_map.setdefault('features', []).append(point)\n\t# write a file, upload to gist.github.com\n\twith open('file_sf.geojson', 'w') as f:\n\t\tf.write(geojson.dumps(geo_map))", "def readFromFile(self, infile):\n\n self.mMapComponent2Object = {}\n self.mMapObject2Component = {}\n\n for line in infile:\n if line[0] == \"#\":\n continue\n\n data = line[:-1].split(\"\\t\")\n\n obj_id, obj_start, obj_end, ncoms, com_type, com_id = data[:6]\n\n if com_type == \"N\":\n continue\n com_start, com_end, orientation = data[6:9]\n\n obj_start, obj_end = int(obj_start) - 1, int(obj_end)\n com_start, com_end = int(com_start) - 1, int(com_end)\n\n orientation = orientation in (\"+\", \"0\", \"na\")\n\n if com_start != 0:\n raise ValueError(\"non zero com_start\")\n\n object = ObjectPosition()\n object.mId = obj_id\n object.start = obj_start\n object.end = obj_end\n object.mOrientation = orientation\n\n self.mMapComponent2Object[com_id] = object", "def __init__(self, fileName):\n self.recordDict = {}\n for line in open(fileName, 'r') :\n sipRecord = json.loads(line)\n self.recordDict[sipRecord['addressOfRecord']] = line", "def LoadMapping(self, fname):\n\n M = [{} for i in range(N_ChanUIDS)]\n\n # Load Map:\n with open(fname, \"r\") as f:\n pass", "def pre_lookup(self, file):\n return {}", "def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"", "def create_map(self, data_file):\n mapping = []\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n path_to_file = os.path.join(root, DATA_PATH, data_file)\n with open(path_to_file, 'r') as file:\n lines = file.readlines()\n list_array = [x.strip('\\n') for x in lines]\n for line in list_array:\n mapping.append(list(line))\n\n self.map = mapping", "def make_map(filename, datadir):\n items = json.load(open(filename))\n geojson_items = []\n for item in items:\n geojson_items.append(get_linestring(item))\n with open(os.path.join(datadir, 'waze.geojson'), 'w') as outfile:\n geojson.dump(geojson.FeatureCollection(geojson_items), outfile)", "def _new_marker_set(self, markers, key):\n if len(markers.shape) > 2 and markers.shape[2] > 1:\n raise IndexError(\"Markers should be from one frame only\")\n self.markers[key].data = markers\n\n # Remove previous actors from the scene\n for actor in self.markers[key].actors:\n self.parent_window.ren.RemoveActor(actor)\n self.markers[key].actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtk.vtkPoints()\n for i in range(markers.channel.size):\n # Create a mapper\n mapper = vtkPolyDataMapper()\n\n # Create an actor\n self.markers[key].actors.append(vtkActor())\n self.markers[key].actors[i].SetMapper(mapper)\n\n self.parent_window.ren.AddActor(self.markers[key].actors[i])\n\n # Update marker position\n self._update_markers(self.markers[key].data, key)", "def onLoadMarkersButton(self):\n start_time = time.time() \n fileName = self.seedsPath + self.fileNameSeedsLineEdit.text\n \n markupsNode = slicer.mrmlScene.GetFirstNodeByName(\"MarkupsFiducial\")\n if markupsNode == None:\n markupsNode = slicer.mrmlScene.AddNewNodeByClass(\"vtkMRMLMarkupsFiducialNode\")\n \n markupsNode.RemoveAllMarkups()\n \n # name, point_ras, label\n markups = self.loadMarkupsFromSeedFile(fileName)\n \n for i in range(len(markups)):\n point_ras = markups[i][1]\n markupsNode.AddFiducial(point_ras[0], point_ras[1], point_ras[2])\n markupsNode.SetNthFiducialLabel(i, markups[i][0])\n markupsNode.SetNthControlPointDescription(i, str(markups[i][2]))\n markupsNode.SetNthMarkupLocked (i, False)\n\n loadTime = time.time() - start_time\n logging.info('Markers loaded from ' + fileName + ': ' + str(loadTime) + \" seconds\")", "def read_locations(db, openfile):\n pass", "def loadIdMap(self, filename:str) -> None :\n if(not isinstance(filename,str)):\n raise TypeError(\"filename must be a string but %s was passed\"%str(type(filename)))\n if(not os.path.exists(filename) or not os.path.isfile(filename)):\n raise ValueError(\"invalid filename\")\n\n self.idMap = self.ioutil.loadKeysVals(filename, \";\")", "def __init__(self):\n\n basedir = os.path.dirname(__file__)\n with open(os.path.join(basedir, \"datasets/beneficiary_ownership_markers.txt\")) as fp:\n self.bo_markers = set(map(str.strip, fp))\n\n with open(os.path.join(basedir, \"datasets/beneficiary_ownership_absent_markers.txt\")) as fp:\n self.absent_markers = set(map(str.strip, fp))\n\n with open(os.path.join(basedir, \"datasets/beneficiary_owner_is_founder_markers.txt\")) as fp:\n self.ref_markers = set(map(str.strip, fp))", "def read_maps(path):\n maps = {}\n for file_name in os.listdir(path):\n if file_name.endswith(\".txt\"):\n path_maps = os.path.join(path, file_name)\n name_maps = file_name[:-4].lower() # Remove the .txt - could use\n # split()\n with open(path_maps, \"r\") as files:\n content = files.read()\n maps[name_maps] = content\n\n return maps", "def create_zip_dict() -> dict:\n with open('zip_coordinates.json', 'r') as zip_map:\n return json.loads(zip_map.read())" ]
[ "0.6890485", "0.6529261", "0.59296936", "0.56526536", "0.564885", "0.5543642", "0.5540649", "0.5503927", "0.54962057", "0.5492501", "0.5430475", "0.5408602", "0.5362851", "0.5325515", "0.5323305", "0.532094", "0.52844286", "0.5238507", "0.5233728", "0.52186966", "0.5215181", "0.51880544", "0.51679873", "0.51650673", "0.5160313", "0.51354927", "0.51225245", "0.5108768", "0.5107069", "0.5092738" ]
0.73693407
0
Calculates chisquared values based on amount of a and b in the marker data. Markers with chisquared value > 3.84 are discarded.
def chi_squared(markers): new_markers = {} for marker in markers: line = markers[marker][0] a = line.count("a") b = line.count("b") length = a + b expect_a = length / 2 expect_b = length / 2 chisq = pow((a - expect_a), 2) / expect_a + pow((b - expect_b), 2) / expect_b if chisq <= 3.84: new_markers[marker] = markers[marker] new_markers[marker].append(chisq) else: print(f"Marker discarded:\t{marker}\t{chisq}") print(f"Amount of markers:\t{len(new_markers)}") return new_markers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _chisquare_value(self):\n x2 = np.sum((np.absolute(self.observed - self.expected) - (0.5 * self.continuity_correction)) ** 2 /\n self.expected)\n\n return x2", "def f(a):\n b = a * 2\n while b.norm().asscalar() < 1000:\n b = b * 2\n if b.sum().asscalar() > 0:\n c = b\n else:\n c = 100 * b\n return c", "def get_chi2(a, b):\n off = (a - b)**2\n return np.sqrt(np.sum(off)) / b.sum()", "def compute(real_data, synthetic_data):\n f_obs, f_exp = get_frequencies(real_data, synthetic_data)\n if len(f_obs) == len(f_exp) == 1:\n pvalue = 1.0\n else:\n _, pvalue = chisquare(f_obs, f_exp)\n\n return pvalue", "def calculate_chi_squared(self):\n chi = 0\n obsVals, expVals = self.calculate_obs_and_exp()\n for i in range(4):\n if expVals[i] != 0:\n chi += (obsVals[i] - expVals[i])**2 / expVals[i]\n return chi", "def afriedmanchisquare(*args):\r\n k = len(args)\r\n if k < 3:\r\n raise ValueError, '\\nLess than 3 levels. Friedman test not appropriate.\\n'\r\n n = len(args[0])\r\n data = apply(pstats.aabut,args)\r\n data = data.astype(N.float_)\r\n for i in range(len(data)):\r\n data[i] = arankdata(data[i])\r\n ssbn = asum(asum(args,1)**2)\r\n chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)\r\n return chisq, achisqprob(chisq,k-1)", "def calc_chisq(func, xdata, ydata, yerrdata, *args):\n xdata = np.array(xdata)\n ydata = np.array(ydata)\n yerrdata = np.array(yerrdata)\n return np.sum(((ydata - func(xdata, *args)) / yerrdata) ** 2)", "def calculate_chi2(target_data, source_data, q_min, q_max):\n\n matched_source_I = match_scatter_curves(target_data, source_data)\n\n # Get average I for experimental and calculated values over matched q\n # range\n matched_no = len(matched_source_I)\n expt_avg = np.mean(target_data[0:matched_no, 1])\n calc_avg = np.mean(matched_source_I)\n\n # Initial guess of the concentration:\n # ratio of experimental and calculated average intensities\n con = expt_avg / calc_avg\n\n if np.count_nonzero(target_data[:, 1]) == 0:\n print(\"Chi^2 calculations cannot proceed without target data\")\n sys.exit()\n else:\n \n if (target_data.shape[1] > 2) and (\n np.count_nonzero(target_data[:, 2]) != 0):\n \n # Call fortran code to calculate the reduced Chi2\n chi2 = sjp_util.calc_chi2(\n target_data[\n :, 0], target_data[\n :, 1], target_data[\n :, 2], matched_source_I, matched_no, q_min, q_max, con, False)\n \n else:\n #print \"For Chi^2 calculations an error column must be present\"\n #sys.exit()\n # Call fortran code to calculate the Pearson Chi2\n chi2 = sjp_util.calc_pearson(target_data[:,0],\n target_data[:,1],\n matched_source_I,\n matched_no,\n q_min,\n q_max,\n con,\n False)\n \n # 1/con is the scaling factor needed to multiply experimental I values\n # to compare with calculated data\n return chi2, 1.0 / con", "def cdf(x, a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n if x < 0:\n return mp.zero\n if x > 1:\n return mp.one\n return mp.betainc(a, b, x1=0, x2=x, regularized=True)", "def Calc(self, a, b, size):\n self.eq = lambda x: (60000/((b-a)/size*x+a))\n points = []\n names = [str(self.offset)]\n points.append(0)\n for j in range(1, int(size)):\n points.append(integrate.quad(self.eq,0,j)[0])\n names.append(str(points[-1]+self.offset))\n self.beatstr = ' '.join(names)\n return points", "def herons_formula(a, b, c):\n\tprint(\"sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2\")\n\ts = (a + b + c) / 2\n\treturn math.sqrt(s * (s-a) * (s-b) * (s-c))", "def general_cm_wilson(a, b, digits_of_precision, z_score):\n n = a + b\n return compute_wilson_bounds(a / n, n, digits_of_precision, z_score)", "def cal_chi_square(self, info, meta, per_bl_chi2=False, g = None):\n reds = info.get_reds()\n SH = self.shape_waterfall\n chisq = np.zeros(SH)\n weight = np.zeros(SH)\n p1, p2 = self.pol\n if g is None:\n if self.gains.gfit is None:\n self.gains.bandpass_fitting(include_red = True)\n g = self.gains.gfit\n noise = []\n for bl in self.noise.keys():\n i,j = bl\n try: G = np.abs(g[p1][i]*g[p2][j])\n except: continue\n noise.append(self.noise[bl]/(G*G+1e-10))\n noise = np.ma.masked_array(noise, np.zeros((len(noise), len(noise[0]))))\n noise.mask[np.where(noise==0)] = True\n noise = np.mean(noise, axis=0).data\n self.recover_model_vis_waterfall(info, g = g)\n mdl = self.gains.mdl\n data_arr = None\n flag_arr = None\n if self.data_backup:\n data_arr = self.data_backup\n flag_arr = self.flag_backup\n else:\n data_arr = self.data\n flag_arr = self.flag\n for r in reds:\n if len(r) < 5: continue\n bl0 = None\n yij = None\n for bl in r:\n if mdl[self.pol].has_key(bl):\n yij = mdl[self.pol][bl]\n bl0 = bl\n break\n if bl0 is None: continue\n chis = np.zeros(SH)\n wgts = np.zeros(SH)\n for bl in r:\n try:\n di = data_arr[bl][self.pol]\n wi = np.logical_not(flag_arr[bl][self.pol])\n except(KeyError):\n di = data_arr[bl[::-1]][self.pol].conj()\n wi = np.logical_not(flag_arr[bl[::-1]][self.pol])\n i,j = bl\n chis += (np.abs(di/(g[p1][i]*g[p2][j].conj()+1e-10)-yij))**2 * wi / (noise + 1e-10)\n wgts += wi\n iuse = np.where(wgts>1)\n self.chisq_base[bl0] = np.mean(chis[iuse]-(wgts[iuse]-1))\n if per_bl_chi2:\n meta['chisq'+'('+str(bl0[0])+','+str(bl0[1])+')'] = chis\n meta['wgts'+'('+str(bl0[0])+','+str(bl0[1])+')'] = wgts\n chisq += chis\n weight += (wgts - 1)\n meta['chisq'] = chisq * (weight > 1) / (weight + 1e-10)\n meta['flags'] = weight < 2", "def calculate_cci(hunterlab):\n return 1000 * (hunterlab[1]) / (hunterlab[0] * hunterlab[2])", "def chi2(data, fdata, err):\n return sum(((data-fdata)/err)**2)", "def compare_sums_chi(array1, array2):\n return stats.chisquare(array1, array2)", "def fmeasure(B, hits, misses, falses) :\r\n x = ((1 + B**2) * hits) / ((1 + B**2) * hits + B**2 * misses + falses)\r\n return x", "def find_c(b):\n return (2*(b**2) - 2000*b + 1000000)/(2000 - 2*b)", "def wrap_chi_sq(gcalc, gobs):\n rw, scale = get_chi_sq(gobs, gcalc)\n return rw, scale", "def chi_squared(actual, predicted):\n perturb = np.vectorize(lambda x: x if x != 0 else config.decision_boundary)\n return np.sum((predicted - actual)**2 / perturb(actual))", "def _calculate_percentile_cutoff(run_numbers):\n mcp_values = []\n andor_values = []\n for run_number in run_numbers:\n current_data_path = ''.join([DATA_PATH, 'run', str(run_number), 'allevts.h5'])\n f = h5py.File(current_data_path, 'r')\n current_phot = _get_photon_energy(f, run_number)\n current_mcp = np.array(f['Acqiris2']['acq'])\n current_mcp = current_mcp[(current_phot > 781) & (current_phot < 782)]\n mcp_values.extend(current_mcp)\n current_andor = np.array(f['Andor']['signal'])\n current_andor = current_andor[(current_phot > 781) & (current_phot < 782)]\n andor_values.extend(current_andor)\n #plt.figure()\n #plt.scatter(mcp_values, andor_values)\n mcp_percentile_cutoff = min([percentileofscore(andor_values, 4000), 99.9])\n return mcp_percentile_cutoff", "def calc_fitness(xi, Y, Yhat, c=2):\n\n p = sum(xi) # Number of selected parameters\n n = len(Y) # Sample size\n numer = ((Y - Yhat) ** 2).sum() / n # Mean square error\n pcn = p * (c / n)\n if pcn >= 1:\n return 1000\n denom = (1 - pcn) ** 2\n theFitness = numer / denom\n return theFitness", "def maks2(a, b, c):\n m = a\n if b > m:\n m = b\n if c > m:\n m = c\n \n return m", "def fmeasure(B, hits, misses, falses) :\n x = ((1 + B**2) * hits) / ((1 + B**2) * hits + B**2 * misses + falses)\n return x", "def problem():\n for a in range(1, 380):\n for b in range(a):\n if a + b + (a**2 + b**2)**0.5 == 1000:\n return int(a * b * (a**2 + b**2)**0.5)", "def lfriedmanchisquare(*args):\r\n k = len(args)\r\n if k < 3:\r\n raise ValueError, 'Less than 3 levels. Friedman test not appropriate.'\r\n n = len(args[0])\r\n data = apply(pstats.abut,tuple(args))\r\n for i in range(len(data)):\r\n data[i] = rankdata(data[i])\r\n ssbn = 0\r\n for i in range(k):\r\n ssbn = ssbn + sum(args[i])**2\r\n chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)\r\n return chisq, chisqprob(chisq,k-1)", "def f3_ab_b(self,a,b,c):\n ans = (7.*(a*a - a*b*c) + (a*a/b)*(a*c - b))*self.g2(a,b,-c)/(a*a + b*b - 2.*a*b*c)\n #print 'b_b:',ans,\n return ans", "def _calculate_a_value(self, bval, nvalue, nyr, cmag, ref_mag):\n\n denominator = np.sum(nyr * np.exp(-bval * (cmag - ref_mag)))\n return nvalue / denominator", "def biphasic_fit_function(x, a, b, c, d, e, f):\n term1 = 1 + (a + (1 - a)/(1 + (x * (10 ** b)) ** c))\n term2 = 1 + (d + (1 - d)/(1 + (x * (10 ** e)) ** f))\n\n biphasic_function = 2 ** (0.5 * (np.log2(term1) + np.log2(term2))) - 1\n return biphasic_function", "def ccw(self, b: PointOrIterable, c: PointOrIterable) -> float:\n try:\n return ((b.x - self.x) * (c.y - self.y)) - ((c.x - self.x) * (b.y - self.y))\n except AttributeError:\n pass\n\n return ((b[0] - self.x) * (c[1] - self.y)) - ((c[0] - self.x) * (b[1] - self.y))" ]
[ "0.56620073", "0.56429327", "0.558835", "0.5531059", "0.546065", "0.5427781", "0.53272873", "0.53119254", "0.527172", "0.52057266", "0.51861894", "0.517939", "0.51749146", "0.5146191", "0.51202285", "0.51165134", "0.50929654", "0.50773597", "0.50755304", "0.5072139", "0.50703454", "0.5067337", "0.5063192", "0.5051771", "0.5049576", "0.50489986", "0.5042139", "0.50391173", "0.5036368", "0.50354546" ]
0.7073684
0
Calculates recombination frequency between all combinations of two markers.
def rec_freq(markers): keys = list(markers.keys()) rf_pairs = {} for i in range(len(markers)): for j in range(i + 1, len(markers)): m1 = markers[keys[i]][0] m2 = markers[keys[j]][0] tot_len = 0 score = 0 if len(m1) != len(m2): print("Error, sequences aren't same length.", keys[i], keys[j]) exit() for k in range(len(m1)): c1 = m1[k] c2 = m2[k] if c1 == "-" or c2 == "-": continue if c1 != c2: score += 1 tot_len += 1 rf = score / tot_len * 100 rf_pairs[(keys[i], keys[j])] = rf return rf_pairs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def derive_count(freq1: typing.List[int], freq2: typing.List[int]) -> int:\n count = 0\n for i in range(26):\n count += min(freq1[i], freq2[i])\n return count", "def joint_frequencies_combo(self, alleles):\n\n representations = [1 << i for i in range(len(alleles))]\n\n intrenal_hap_dict_per_group = {group2: self.build_intrenal_hap_dict(alleles, group2)\n for group2 in self.hap_dict_per_group}\n\n result = {}\n\n for c in representations:\n hap = {group2: internal[c] for group2, internal in intrenal_hap_dict_per_group.items()}\n result[c] = self.effective_joint_frequency(hap)\n\n for C in combinations(representations, 2):\n hap = {group2: internal[C[0]] & internal[C[1]] for group2, internal in intrenal_hap_dict_per_group.items()}\n result[C[0]|C[1]] = self.effective_joint_frequency(hap)\n\n for C in combinations(representations, 3):\n hap = {group2: internal[C[0]] & internal[C[1]] & internal[C[2]]\n for group2, internal in intrenal_hap_dict_per_group.items()}\n result[C[0]|C[1]|C[2]] = self.effective_joint_frequency(hap)\n\n for r in range(4,len(alleles)):\n for C in combinations(representations, r):\n hap = {group2: reduce(and_,itemgetter(*C)(internal))\n for group2, internal in intrenal_hap_dict_per_group.items()}\n result[sum(C)] = self.effective_joint_frequency(hap)\n\n if len(alleles)>=4:\n hap = {group2: reduce(and_,internal.values())\n for group2, internal in intrenal_hap_dict_per_group.items()}\n result[sum(representations)] = self.effective_joint_frequency(hap)\n\n return result", "def correspondences(labels1,labels2,return_counts=True):\n q = 100000\n assert amin(labels1)>=0 and amin(labels2)>=0\n assert amax(labels2)<q\n combo = labels1*q+labels2\n result = unique(combo, return_counts=return_counts)\n if return_counts:\n result, counts = result\n result = array([result//q,result%q,counts])\n else:\n result = array([result//q,result%q])\n return result", "def calculate_2mer_freq(counts_file):\n count_matrix = dict()\n\n with open(counts_file, \"r\", newline=\"\") as handle:\n records = csv.reader(handle, delimiter=\"\\t\")\n next(records)\n for row in records:\n nuc1 = str(row[0])\n nuc2 = str(row[1])\n count = int(row[2])\n\n left = \"x{}\".format(nuc2)\n right = \"{}x\".format(nuc1)\n\n count_matrix.setdefault(nuc1, dict())[left] = count\n count_matrix.setdefault(nuc2, dict())[right] = count\n\n lines = \"\"\n header = \"\"\n for ref, d in count_matrix.items():\n lines += ref\n for other in sorted(d.keys()):\n lines += \"\\t\" + str(d[other])\n lines += \"\\n\"\n header = \"x\\t{}\\n\".format(\"\\t\".join(sorted(d.keys())))\n print(header + lines)", "def same_frequency(num1, num2):\n freqlist1 = list(str(num1))\n freqlist2 = list(str(num2))\n setfreq1 = set(freqlist1)\n setfreq2 = set(freqlist2)\n setdict1 = {}\n setdict2 = {}\n for digit in setfreq1:\n setdict1[digit] = freqlist1.count(digit)\n for digit in setfreq2:\n setdict2[digit] = freqlist2.count(digit)\n return setdict1 == setdict2", "def freq():", "def combined_step_count(intersection_coords, wire_one_map, wire_two_map):\n return wire_one_map[intersection_coords] + wire_two_map[intersection_coords]", "def joint_frequencies_redundant(self, alleles):\n\n internal = {}\n\n for group2, hap_dict in self.hap_dict_per_group.items():\n internal[group2] = {1 << i: reduce(and_,itemgetter(*haplotype)(hap_dict))\n if type(haplotype[0])==tuple else hap_dict[haplotype]\n for i, haplotype in enumerate(alleles)}\n\n representations = [1 << i for i in range(len(alleles))]\n\n result = {}\n\n for c in representations:\n hap = {group2: internal[group2][c] for group2 in internal}\n result[c] = self.effective_joint_frequency(hap)\n\n for r in range(1,len(alleles)):\n for C in combinations(representations, r+1):\n hap = {group2: reduce(and_,itemgetter(*C)(internal[group2]))\n for group2 in internal}\n result[sum(C)] = self.effective_joint_frequency(hap)\n\n return result", "def compute_pair_frequencies(text, alphabet_size=256):\n\n curr = text[1:]\n prev = text[:-1]\n pairs = np.stack([prev, curr])\n\n print(\"Counting pairs . . .\")\n (char1, char2), counts = np.unique(pairs, return_counts=True, axis=1)\n print(\"Done.\\n\")\n\n joints = np.zeros((alphabet_size, alphabet_size))\n joints[char1, char2] = counts / np.sum(counts)\n assert np.isclose(np.sum(joints), 1.0)\n\n return joints", "def count_pairs(assignments, v1, v2, M):\n assert v1 != v2\n pairs = assignments[:, v1].astype(np.int32) * M + assignments[:, v2]\n return np.bincount(pairs, minlength=M * M).reshape((M, M))", "def same_frequency(num1, num2):\n return req_counter(str(num1)) == req_counter(str(num2))", "def same_frequency(num1, num2):\n num1 = list(str(num1))\n num2 = list(str(num2))\n\n num1_dic = {num:num1.count(num) for num in num1}\n num2_dic = {num:num2.count(num) for num in num2}\n\n count = 0\n\n\n for key in num1_dic.keys():\n try:\n num2_dic[key]\n if num2_dic[key] == num1_dic[key]:\n count+=1\n except: \n return False\n\n if count == len(num2_dic.keys()):\n return True\n else:\n return False", "def same_frequency(num1, num2):\n count_num1 = {}\n for num in str(num1):\n count_num1[num] = count_num1.get(num, 0) + 1\n\n count_num2 = {}\n for num in str(num2):\n count_num2[num] = count_num2.get(num, 0) + 1\n\n return count_num1 == count_num2", "def freq(self) -> int:", "def marked_pair_counts(sample1, sample2, rbins, period, num_threads,\\\n do_auto, do_cross, marks1, marks2, wfunc, _sample1_is_sample2):\n \n #add ones to weights, so returned value is return 1.0*1.0\n marks1 = np.vstack((marks1,np.ones(len(marks1)))).T\n marks2 = np.vstack((marks2,np.ones(len(marks2)))).T\n \n if do_auto==True:\n D1D1 = marked_npairs(sample1, sample1, rbins,\\\n weights1=marks1, weights2=marks1,\\\n wfunc = wfunc,\\\n period=period, num_threads=num_threads)\n D1D1 = np.diff(D1D1)\n else:\n D1D1=None\n D2D2=None\n \n if _sample1_is_sample2:\n D1D2 = D1D1\n D2D2 = D1D1\n else:\n if do_cross==True:\n D1D2 = marked_npairs(sample1, sample2, rbins,\\\n weights1=marks1, weights2=marks2,\\\n wfunc = wfunc,\\\n period=period, num_threads=num_threads)\n D1D2 = np.diff(D1D2)\n else: D1D2=None\n if do_auto==True:\n D2D2 = marked_npairs(sample2, sample2, rbins,\\\n weights1=marks2, weights2=marks2,\\\n wfunc = wfunc,\\\n period=period, num_threads=num_threads)\n D2D2 = np.diff(D2D2)\n else: D2D2=None\n \n return D1D1, D1D2, D2D2", "def solve_part_two(wire_one_map, wire_two_map):\n return min([combined_step_count(intersection_coords, wire_one_map, wire_two_map) for intersection_coords in find_intersection(wire_one_map, wire_two_map)])", "def prefix_rel_freq(prefix1,prefix2,corpus=EL_corpus):\n try:\n return prefix_count.get(prefix1,0) / prefix_count.get(prefix2,0)\n except ZeroDivisionError, e:\n return 0", "def fn(x):\n if len(x) == len(s): ans.append(x)\n for k, v in freq.items(): \n if v >= 2: \n freq[k] -= 2\n fn(k + x + k)\n freq[k] += 2", "def word_cross_product_phi(t1, t2):\n return Counter([(w1, w2) for w1, w2 in product(t1.leaves(), t2.leaves())])", "def final_frequency(changes: Sequence[int]) -> int:\n return sum(changes)", "def add_freq(self,system,nu = NOTHING):\n if nu is NOTHING: #because can't use self.xxx as default \n nu = self.nu\n for i in range(self.n):\n for j in range(i+1,self.n):\n system[self.index(i,j)][self.index(i,j)+1] -= self.interaction(i,j,nu)\n system[self.index(i,j) + 1][self.index(i,j)] += self.interaction(i,j,nu)\n return system", "def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))", "def kmer_frequencies(kmertable_all, kmertable_filtered, kmertable_nonDT_hi, kmertable_nonDT_lo, data_mm, codon_seqs):\n\n def codon_bgfreq(codon_seqs, data_mm):\n \"\"\"\n get codon background frequencies from mRNA seqs\n seqs: dictionary of yeast mRNA sequences\n data_mc: dictionary of multi-mapping boolean\n \"\"\"\n codon_counts = np.zeros(( len(codons_nonstop) ))\n list_orfs = list( data_mm.keys() )\n\n for ix, orf in enumerate(list_orfs):\n current_seq = codon_seqs[orf]\n current_mm = data_mm[orf]\n\n for pos in range( len(current_mm) ):\n if current_mm[pos] and current_seq[pos] in codons_nonstop:\n current_index = codons_nonstop.index(current_seq[pos])\n codon_counts[current_index] += 1\n codon_counts = np.around( codon_counts / np.sum(codon_counts), 5)\n\n return codon_counts\n\n\n def codonfreqs_kmerdf(kmertable):\n \"\"\"\n get codon frequencies from kmertable\n \"\"\" \n codon_counts_kmer = np.zeros(( len(codons_nonstop) ))\n for kmer in kmertable['kmer']:\n current_kmer_codons = [ kmer[(i*3):((i*3)+3)] for i in range(3) ] # ! hard coded for length L=3\n for codon in current_kmer_codons:\n current_index = codons_nonstop.index(codon)\n codon_counts_kmer[current_index] += 1 \n codon_counts_kmer /= np.sum(codon_counts_kmer)\n\n return np.around(codon_counts_kmer, 5)\n\n #kmertable_threshold = kmertable_all[kmertable_all['threshold']==1]\n kmertable_all2 = kmertable_all[kmertable_all['threshold']==0]\n\n\n cc_bg = codon_bgfreq(codon_seqs, data_mm)\n cc_all = codonfreqs_kmerdf(kmertable_all2)\t\t\t# without hits\n cc_theta = codonfreqs_kmerdf(kmertable_filtered)\n cc_nDT_hi = codonfreqs_kmerdf(kmertable_nonDT_hi) # min 16 max 4 at 1090\n cc_nDT_lo = codonfreqs_kmerdf(kmertable_nonDT_lo) # min 16 max 4 at 1090\n\n output = pd.DataFrame({'codon': list(codons_nonstop), \n 'kmer_theta': list(cc_theta), \n 'redundant': list(cc_all), \n 'background': list(cc_bg),\n 'nDThi': list(cc_nDT_hi),\n 'nDTlo': list(cc_nDT_lo) } ) \n output.to_csv(\"../data/figures/figure3/kmer_frequencies.txt\", header=True, index=False, sep='\\t')\n\n return output", "def update_frequencies():\n pass", "def freq_counts(self, arrs, lens):\n no_nans = reduce(np.logical_and, [~np.isnan(a) if bn.anynan(a) else np.ones(self.m).astype(bool) for a in arrs])\n combined = reduce(add, [arrs[i][no_nans]*reduce(mul, lens[:i]) for i in range(1, len(arrs))], arrs[0][no_nans])\n return np.bincount(combined.astype(np.int32, copy=False), minlength=reduce(mul, lens)).astype(float)", "def total_occurrences(word1, word2, flag):\n result = 0\n word1_length = len(word1)\n for i in range(word1_length):\n if word1[i] == flag:\n result += 1\n\n word2_length = len(word2)\n for i in range(word2_length):\n if word2[i] == flag:\n result += 1\n\n return result", "def make_frequency_table(x, y, X, Y):\n freq = dict()\n\n for i in range(len(X)):\n freq[X[i]] = [0, 0]\n\n # merging the two to get a matrix\n\n M = np.array([[x[i], y[i]] for i in range(len(x))])\n\n for i in range(len(M)):\n if M[i][1] == Y[0]:\n freq[M[i][0]][0] += 1\n else:\n freq[M[i][0]][1] += 1\n\n return freq", "def countTriplets1(arr, r):\n from collections import Counter\n arr_dict = Counter()\n ratio_range = []\n triplets = 0\n\n # Build the counter\n for x in arr:\n arr_dict[x] += 1\n\n # Build a list for easier iteration\n for key, value in arr_dict.items():\n ratio_range.append(tuple([key,value]))\n ratio_range.sort()\n \n for y in range(len(ratio_range)-2):\n firstvalue = ratio_range[y][1]\n secondvalue = ratio_range[y+1][1]\n thirdvalue = ratio_range[y+2][1]\n print(ratio_range, firstvalue, secondvalue,thirdvalue)\n\n summedvalue = (firstvalue + secondvalue + thirdvalue) - 3\n triplet_count = 2**summedvalue\n print(summedvalue, triplet_count)\n triplets += triplet_count\n\n return triplets, arr_dict, ratio_range", "def redshifts2frequencies(z):\r\n return 1420e6 / (z + 1)", "def part_two(forms: str) -> int:\n\n count = 0\n groups = parse_forms(forms)\n for members in groups:\n answers = None\n for member in members:\n if answers is None:\n answers = set(member)\n answers.intersection_update(set(member))\n count += len(answers)\n return count" ]
[ "0.64678943", "0.592945", "0.58793336", "0.5876366", "0.5795837", "0.5719026", "0.5665673", "0.5658172", "0.56559056", "0.56537145", "0.5625105", "0.55932873", "0.5565763", "0.55539304", "0.5537769", "0.55084366", "0.5466326", "0.543985", "0.54315937", "0.5410862", "0.53959036", "0.535486", "0.5342288", "0.53367096", "0.53137827", "0.53047806", "0.5274237", "0.5247627", "0.5244779", "0.52323526" ]
0.64783263
0
Calculates the distances between a list of markers from the first marker.
def calc_distances(marker_list, rf_pairs): final_distance = [[marker_list[0], 0]] for i in range(1, len(marker_list)): cur_markers = [marker_list[i-1], marker_list[i]] for rf_pair in rf_pairs: if rf_pair[0] in cur_markers and rf_pair[1] in cur_markers: final_distance.append([cur_markers[1], rf_pairs[rf_pair]]) break return final_distance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_distances(coords: List[Tuple[float, float]]) -> List[Dict]:\n miles = 0\n od = []\n for idx in range(len(coords)):\n if idx == 0:\n continue\n dist = distance(coords[idx], coords[idx - 1]).miles\n miles = miles + dist\n od.append(\n {\n \"start\": coords[idx - 1],\n \"stop\": coords[idx],\n \"distance\": dist,\n \"total\": miles,\n }\n )\n return od", "def _compute_distances(self, atoms: List[CellAtom]):\n muon = self._cell_atoms[self._muon_index]\n\n for atom in atoms:\n atom.distance_from_muon = np.linalg.norm(muon.position - atom.position)", "def _calculate_distance(self, ordered_list):\r\n\r\n total_distance = 0\r\n previous_point = None\r\n for point in ordered_list:\r\n if previous_point is not None:\r\n angle, distance = previous_point.angleAndDistanceTo(point) # geodesic distance in meters\r\n total_distance += distance\r\n previous_point = point\r\n\r\n return total_distance", "def compute_distances(Ls):\n if not isinstance(Ls, list):\n Ls = [Ls]\n\n dists = []\n for L in Ls:\n N,D = L.shape\n # 1xNxD - Nx1xD (L1 distance)\n dist = (np.abs(L[None,:,:] - L[:,None,:])).sum(axis=2)\n dists.append(dist)\n\n return dists", "def distances(self):", "def _calculate_distances(self):\n all_dists = []\n for ref in range(len(self.atoms)):\n if self.atoms[ref].symbol in self.exclude:\n continue\n indices = list(range(ref+1, len(self.atoms)))\n indices = self._filter_excluded(indices)\n if len(indices) == 0:\n continue\n dists = self.atoms.get_distances(ref, indices, mic=True)\n all_dists += list(dists)\n \n # Normalize by the mean distance\n return np.array(all_dists)/np.mean(all_dists)", "def point_distances(self, params=None):\n if params is None:\n params = self.collocation_points()\n with self.fix_evaluator():\n pts = np.array([self(la) for la in params])\n deltas = np.diff(pts, axis=0)\n distances = norm(deltas, axis=1)\n return distances", "def _get_distances(self):\n for molecule in self.values():\n molecule.get_distances()\n\n # for atom in self.atoms:\n # atom.get_distances()", "def _calculate_distances(boxes, homography):\n pos_markers = []\n pix_markers = []\n for box in boxes:\n (pt1_w, pt1_h), (pt2_w, pt2_h) = box\n\n pix_marker = ((pt1_w + pt2_w) // 2, max(pt1_h, pt2_h))\n pix_markers.append(pix_marker)\n\n pos_marker = np.array(pix_marker).reshape(\n 1, 1, 2).astype(\"float32\")\n pos_marker = cv2.perspectiveTransform(\n pos_marker, homography).squeeze()\n pos_markers.append(pos_marker)\n\n if len(pos_markers) <= 1:\n return np.array([]), np.array([])\n\n distances = pdist(np.array(pos_markers))\n return pix_markers, distances", "def distances(self):\n self._sort_measurements()\n return self._distances", "def _get_dlon_dlat_km(self):\n self.dlon_km=np.array([])\n self.dlat_km=np.array([])\n for lat in self.lat:\n dist_lon, az, baz = obspy.geodetics.gps2dist_azimuth(lat, 0., lat, self.dlon)\n dist_lat, az, baz = obspy.geodetics.gps2dist_azimuth(lat, 0., lat+self.dlat, 0.)\n self.dlon_km=np.append(self.dlon_km, dist_lon/1000.)\n self.dlat_km=np.append(self.dlat_km, dist_lat/1000.)\n self.dlon_kmArr=(np.tile(self.dlon_km, self.Nlon).reshape(self.Nlon, self.Nlat)).T\n self.dlat_kmArr=(np.tile(self.dlat_km, self.Nlon).reshape(self.Nlon, self.Nlat)).T\n return", "def distances(points, l=2):\n distances = []\n while points:\n baseline = points.pop()\n distances.extend([distance(baseline, point, l) for point in points])\n return distances", "def all_distances(coords1, coords2):\r\n c1 = np.array(coords1)\r\n c2 = np.array(coords2)\r\n z = (c1[:, None, :] - c2[None, :, :]) ** 2\r\n return np.sum(z, axis=-1) ** 0.5", "def _calculate_distance(self):\n xy = list(zip(self.x, self.y))\n\n dist = [0]\n for i in range(1, len(xy)):\n dist.append(self.distance_between_two_points(xy[i-1], xy[i]))\n\n return np.array(dist).cumsum()", "def total_distance(self):\n distance = 0\n\n for segment in self.data:\n segment_distance = 0\n\n last_lon = None\n last_lat = None\n\n for point in segment:\n current_lon = point[\"lon\"]\n current_lat = point[\"lat\"]\n\n # in case data is missing skip point !\n if current_lon is None or current_lat is None:\n continue\n\n # the first valid element is processed, get distance\n if not (last_lon is None or last_lat is None):\n d = gpx_distance(last_lat, last_lon, current_lat, current_lon)\n segment_distance += d\n\n last_lon = current_lon\n last_lat = current_lat\n\n distance += segment_distance\n\n return distance", "def calcDistanceList(work_list):\n distance_list = []\n for swap in work_list: # for every work item find distance\n distance_list.append(Cluster.calcDistance(*swap))\n return distance_list", "def get_distances(self, crds):\n self.all_dist = np.zeros((self.natom, self.natom))\n # Loop over upper triangle of atom pairs\n for iat in range(self.natom-1):\n # Get the atom indices\n at_inds = np.arange(len(crds))\n\n # Calc distances between atoms (only upper triangle though)\n at_msk = at_inds > iat\n all_ut_dist = crds[at_msk] - crds[iat]\n all_ut_dist = np.linalg.norm(all_ut_dist, axis=1)\n\n self.all_dist[iat, iat+1:] = all_ut_dist\n\n # Get lower triangle indices\n self.all_dist = self.all_dist + self.all_dist.T", "def distance_list(coordinate_list):\n for item1 in coordinate_list:\n L2 = []\n d_list.append(L2)\n for item2 in coordinate_list:\n if item1 != item2:\n distance = math.sqrt((item2[1] - item1[1]) ** 2 \\\n + (item2[2] - item1[2]) ** 2)\n L2.append((item2[0], distance))\n return d_list", "def _distance_to(self, coordinates):\n\n return abs(coordinates[0]) + abs(coordinates[1])", "def calculate_distances(drives):\n for d in drives:\n d.set_distance()", "def calculate_distances(data_point, centroids):\n distances = []\n for centroid_index, centroid_value in enumerate(centroids):\n distances.append(distance(data_point, centroid_value))\n return distances", "def _calc_adjacent_discrepancies(self, l_ts_dists):\n return [abs(ts_dist[1] - l_ts_dists[i + 1][1]) for i, ts_dist in enumerate(l_ts_dists[:-1])]", "def _compute_set_distances(nonzeros_1, nonzeros_2):\n distances = np.zeros(len(nonzeros_1))\n for i, _ in enumerate(distances):\n distances[i] = np.min(\n _norm_along_last_axis(nonzeros_1[i].reshape(1, -1) - nonzeros_2)\n )\n return distances", "def calculateDistanceBetweenPoints(lat1,lon1,lat2,lon2):\n\treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['s12']", "def compute_distances(self, X):\n #print(X.shape, self.Xtr.shape)\n dists = np.zeros((X.shape[0], self.Xtr.shape[0]))\n for i in range(X.shape[0]):\n X_r = np.tile(X[i], (self.Xtr.shape[0], 1))\n dists[i] = np.sqrt(np.sum(np.square(self.Xtr - X_r), axis = 1))\n #print(dists.shape)\n return dists", "def total_distance(points):\n return sum([distance_lat_lon(point, points[index + 1]) for index, point in enumerate(points[:-1])])", "def _location_distances(self, positions) -> torch.Tensor:\n diff = positions[..., None, :, :] - positions[..., None, :]\n distances = torch.norm(diff, dim=3)\n return distances", "def calculate_distance(atom1,atom2): #dot string to show when you go into the help doc of this function\n x_distance = atom1[0]-atom2[0]\n y_distance = atom1[1]-atom2[1]\n z_distance = atom1[2]-atom2[2]\n distance = numpy.sqrt(x_distance**2+ y_distance**2+z_distance**2)\n return distance", "def Distance(item0, item1):\n \n SumOfDims = len(item0) + len(item1)\n \n if SumOfDims == 4:\n # 2D points/indices.\n distance = ((item0[0] - item1[0])**2 \\\n + (item0[1] - item1[1])**2)**0.5\n \n return distance\n \n elif SumOfDims == 6:\n # 3D points/indices.\n distance = ((item0[0] - item1[0])**2 \\\n + (item0[1] - item1[1])**2 \\\n + (item0[2] - item1[2])**2)**0.5\n \n return distance\n \n \n else:\n msg = \"The inputs must both be 2D or 3D lists of points/indices.\"\n \n raise Exception(msg)", "def compute_distances_one_loop(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in range(num_test):\n dists[i, :] = np.sqrt(np.sum(np.square(X[i, :] - self.X_train), axis=1)).transpose()\n return dists" ]
[ "0.6635253", "0.6633621", "0.6571341", "0.6439199", "0.62930983", "0.62800103", "0.61445427", "0.6096219", "0.60502046", "0.5957475", "0.5946086", "0.5938248", "0.5932937", "0.5882029", "0.5876757", "0.5874259", "0.5873279", "0.5872944", "0.58673966", "0.5867383", "0.5850922", "0.5822295", "0.5816458", "0.5814968", "0.5813248", "0.579241", "0.576892", "0.5757098", "0.57376", "0.5737188" ]
0.72004944
0
Should index a batch in the form of a list of (id,url,other_data)
def index_batch(self,batch): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bulk_index(data):\n\n def bulk_api_string(item):\n return f\"{{\\\"index\\\":{{}}\\n{json.dumps(item)}\"\n\n body = '\\n'.join([bulk_api_string(item) for item in data]) + '\\n'\n\n return make_request(\n requests.post,\n url=f\"{connection.hostname}:{connection.port}/{connection.index}/_bulk\",\n headers={'Content-Type': 'application/json'},\n auth=auth,\n data=body\n )", "def _index_group_with_subgroup(self, **kwargs):\n\n log.setLevel(self.log_level)\n # get a list of all the uri to index\n uri_list = kwargs.get('uri_list', self.get_uri_list())\n if not uri_list:\n log.info(\"0 items to index\")\n return\n # results = results[:100]\n # Start processing through uri\n batch_file = os.path.join(CFG.dirs.logs, \"batch_list.txt\")\n # with open(batch_file, \"w\") as fo:\n # fo.write(\"{\")\n log.info(\"'%s' items to index\", len(uri_list))\n self.time_start = datetime.datetime.now()\n batch_size = kwargs.get(\"batch_size\", 12000)\n if len(uri_list) > batch_size:\n batch_end = batch_size\n else:\n batch_end = len(uri_list)\n batch_start = 0\n batch_num = 1\n self.batch_data = {}\n self.batch_data[batch_num] = {}\n self.batch_data[batch_num]['main'] = []\n self.batch_uris = {}\n self.batch_uris[batch_num] = []\n for name, indexer in self.other_indexers.items():\n self.batch_data[batch_num][name] = []\n end = False\n last = False\n final_list = []\n expand_index = kwargs.get(\"expand_index\", True)\n while not end:\n log.debug(\"batch %s: %s-%s\", batch_num, batch_start, batch_end)\n sub_batch = []\n j = 0\n for i in range(batch_start, batch_end):\n # for i, subj in enumerate(uri_list[batch_start:batch_end]):\n qry_size = kwargs.get(\"qry_size\", 1000)\n if j < qry_size:\n try:\n sub_batch.append(uri_list.pop()) #subj)\n except IndexError:\n pass\n if j == qry_size -1 or i == batch_end - 1:\n try:\n sub_batch.append(uri_list.pop()) #subj)\n except IndexError:\n pass\n # with open(batch_file, \"a\") as fo:\n # fo.write(json.dumps({str('%s-%s' % (batch_num, i+1)):\n # [item[0].sparql\n # for item in sub_batch]})[1:-1]+\",\\n\")\n if not kwargs.get(\"no_threading\", False):\n th = threading.Thread(name=batch_start + i + 1,\n target=self._index_sub,\n args=(sub_batch,\n i+1,\n batch_num,))\n th.start()\n else:\n self._index_sub(sub_batch, i+1, batch_num)\n j = 0\n final_list += sub_batch\n sub_batch = []\n else:\n j += 1\n log.debug(datetime.datetime.now() - self.time_start)\n if not kwargs.get(\"no_threading\", False):\n main_thread = threading.main_thread()\n for t in threading.enumerate():\n if t is main_thread:\n continue\n t.join()\n action_list = []\n for key, items in self.batch_data[batch_num].items():\n if key == 'main':\n es_worker = self.es_worker\n else:\n es_worker = self.other_indexers[key]\n action_list += es_worker.make_action_list(items)\n result = self.es_worker.bulk_save(action_list)\n final_list += self.batch_uris[batch_num]\n self._update_triplestore(result, action_list)\n del action_list\n del self.batch_uris[batch_num]\n del self.batch_data[batch_num]\n try:\n del pyrdf.memorized\n pyrdf.memorized = {}\n except AttributeError:\n pass\n while gc.collect() > 0:\n pass\n # pdb.set_trace()\n batch_end += batch_size\n batch_start += batch_size\n if last:\n end = True\n if len(uri_list) <= batch_size:\n batch_end = len(uri_list)\n last = True\n batch_num += 1\n self.batch_uris[batch_num] = []\n self.batch_data[batch_num] = {}\n self.batch_data[batch_num]['main'] = []\n for name, indexer in self.other_indexers.items():\n self.batch_data[batch_num][name] = []\n log.debug(datetime.datetime.now() - self.time_start)\n # with open(batch_file, 'rb+') as fo:\n # fo.seek(-2, os.SEEK_END)\n # fo.truncate()\n # # fo.close()\n # fo.write(\"}\".encode())", "def _index_sub(self, uri_list, num, batch_num):\n bname = '%s-%s' % (batch_num, num)\n log.debug(\"batch_num '%s' starting es_json conversion\",\n bname)\n qry_data = get_all_item_data([item[0] for item in uri_list],\n self.tstore_conn,\n rdfclass=self.rdf_class)\n log.debug(\"batch_num '%s-%s' query_complete | count: %s\",\n batch_num,\n num,\n len(qry_data))\n # path = os.path.join(CFG.dirs.cache, \"index_pre\")\n # if not os.path.exists(path):\n # os.makedirs(path)\n # with open(os.path.join(path, bname + \".json\"), \"w\") as fo:\n # fo.write(json.dumps(qry_data))\n data = RdfDataset(qry_data)\n del qry_data\n log.debug(\"batch_num '%s-%s' RdfDataset Loaded\", batch_num, num)\n for value in uri_list:\n try:\n\n self.batch_data[batch_num]['main'].append(\\\n data[value[0]].es_json())\n self.count += 1\n except KeyError:\n pass\n for name, indexer in self.other_indexers.items():\n for item in data.json_qry(\"$.:%s\" % name.pyuri):\n val = item.es_json()\n if val:\n self.batch_data[batch_num][name].append(val)\n self.batch_uris[batch_num].append(item.subject)\n del data\n del uri_list\n log.debug(\"batch_num '%s-%s' converted to es_json\", batch_num, num)", "def __getitem__(self, index):\r\n\r\n # Generate indexes of the batch\r\n indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]\r\n\r\n # Find list of IDs\r\n list_ids_temp = [self.list_IDs[k] for k in indexes]\r\n\r\n # Calls function to load batch of data into memory\r\n X, y = self.__data_generation(list_ids_temp)\r\n\r\n return X, y", "def fetch_report_urls(start, end, batch_size):\n db = db_connect()\n db_ensure_init(db)\n\n with open('log.csv', 'w', newline='') as log:\n logwriter = csv.writer(log)\n\n cmd = db.execute(\"\"\"\n SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path\n FROM \"index\" ix\n LEFT JOIN reports r ON ix.id = r.index_id\n WHERE ix.type = '10-K' AND r.id IS NULL AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}\n ORDER BY ix.date DESC\n \"\"\".format(start=start, end=end))\n\n for batch in iter(lambda: cmd.fetchmany(batch_size), []):\n to_insert = list()\n for r in batch:\n # print(r)\n log_row = r\n\n response = requests.get(r[5])\n href = parse_href(response.content)\n url = fix_url(href, r[5])\n print(url)\n\n filetype = mimetypes.guess_type(url)[0]\n print(filetype)\n\n filename = os.path.basename(urlparse(url).path)\n print(filename)\n\n to_insert.append((r[0], r[1], r[2], r[3], r[4], url, filetype, filename))\n\n logwriter.writerow(log_row)\n\n db_insert(db, to_insert)", "def batch(self, request):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"POST\", \"/1/indexes/%s/batch\" % self.url_index_name, self.client.timeout, request)", "def __getitem__(self, index):\n #print(\"%d / %d\" %(index, np.floor(len(self.list_IDs) / self.batch_size)))\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n\n return X, y", "def __getitem__(self, index):\n # Generate indexes of the batch\n indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]\n\n # Find list of IDs\n list_ids_temp = [self.list_ids[k] for k in indexes]\n\n # Generate data\n x, y = self.__data_generation(list_ids_temp)\n\n return x, y", "def __getitem__(self, index):\n # Generate indexes of the batch\n indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n # Generate data\n X, Y = self.__data_generation(list_IDs_temp)\n\n return X, Y", "def __getitem__(self, index):\n # Generate indexes of the batch\n indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n\n return X, y", "def __getitem__(self, index):\n # Generate indexes of the batch\n indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n\n return X, y", "def populate_index(db):\n\tfor url in URL:\n\t\tprint url\n\t\trequest = urllib2.Request(url)\n\t\ttry :\n\t\t\tresponse = urllib2.urlopen(request)\n\t\texcept urllib2.URLError:\n\t\t\tprint \"Network Unreachable \"\n\t\t\tsys.exit()\t\n\t\ttext = html2text(response.read())\n\t\tdb.generate_index(text,url)", "def __getitem__(self, index):\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n return X, y", "def __getitem__(self, index):\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n return X, y", "def batch(self):\n return self._client.batch()", "def batch_index(self, records_uuids, request_timeout=None):\n LOGGER.info(f\"Starting task `batch_index for {len(records_uuids)} records\")\n return InspireRecordIndexer().bulk_index(records_uuids, request_timeout)", "def batch(self, requests):\n return AlgoliaUtils_request(self.headers, self.write_hosts, \"POST\", \"/1/indexes/*/batch\", self.timeout, {\"requests\": requests})", "def ids(self,\n limit=100,\n start=None,\n size=None,\n urls=None,\n acl=None,\n hashes=None,\n file_name=None,\n version=None,\n uploader=None,\n metadata=None,\n ids=None,\n urls_metadata=None,\n negate_params=None):\n with self.session as session:\n query = session.query(IndexRecord)\n\n # Enable joinedload on all relationships so that we won't have to\n # do a bunch of selects when we assemble our response.\n query = query.options(joinedload(IndexRecord.urls).\n joinedload(IndexRecordUrl.url_metadata))\n query = query.options(joinedload(IndexRecord.acl))\n query = query.options(joinedload(IndexRecord.hashes))\n query = query.options(joinedload(IndexRecord.index_metadata))\n query = query.options(joinedload(IndexRecord.aliases))\n\n if start is not None:\n query = query.filter(IndexRecord.did > start)\n\n if size is not None:\n query = query.filter(IndexRecord.size == size)\n\n if file_name is not None:\n query = query.filter(IndexRecord.file_name == file_name)\n\n if version is not None:\n query = query.filter(IndexRecord.version == version)\n\n if uploader is not None:\n query = query.filter(IndexRecord.uploader == uploader)\n\n if urls:\n query = query.join(IndexRecord.urls)\n for u in urls:\n query = query.filter(IndexRecordUrl.url == u)\n\n if acl:\n query = query.join(IndexRecord.acl)\n for u in acl:\n query = query.filter(IndexRecordACE.ace == u)\n elif acl == []:\n query = query.filter(IndexRecord.acl == None)\n\n if hashes:\n for h, v in hashes.items():\n sub = session.query(IndexRecordHash.did)\n sub = sub.filter(and_(\n IndexRecordHash.hash_type == h,\n IndexRecordHash.hash_value == v,\n ))\n query = query.filter(IndexRecord.did.in_(sub.subquery()))\n\n if metadata:\n for k, v in metadata.items():\n sub = session.query(IndexRecordMetadata.did)\n sub = sub.filter(\n and_(\n IndexRecordMetadata.key == k,\n IndexRecordMetadata.value == v,\n ))\n query = query.filter(IndexRecord.did.in_(sub.subquery()))\n\n if urls_metadata:\n query = query.join(IndexRecord.urls).join(\n IndexRecordUrl.url_metadata)\n for url_key, url_dict in urls_metadata.items():\n query = query.filter(\n IndexRecordUrlMetadata.url.contains(url_key))\n for k, v in url_dict.items():\n query = query.filter(IndexRecordUrl.url_metadata.any(\n and_(\n IndexRecordUrlMetadata.key == k,\n IndexRecordUrlMetadata.value == v\n )\n ))\n\n if negate_params:\n query = self._negate_filter(session, query, **negate_params)\n\n # joining url metadata will have duplicate results\n # url or acl doesn't have duplicate results for current filter\n # so we don't need to select distinct for these cases\n if urls_metadata or negate_params:\n query = query.distinct(IndexRecord.did)\n\n query = query.order_by(IndexRecord.did)\n\n if ids:\n query = query.filter(IndexRecord.did.in_(ids))\n else:\n # only apply limit when ids is not provided\n query = query.limit(limit)\n\n return [i.to_document_dict() for i in query]", "def __getitem__(self, index):\n # Generate indexes of the batch\n rows = self.metadata_dataframe.iloc[index * self.batch_size:(index + 1) * self.batch_size]\n names = rows['Name']\n\n rng = range(index * self.batch_size, (index + 1) * self.batch_size)\n img_files_temp = [names[k] for k in rng]\n # create batch item list\n img_batch_list = []\n meta_batch_list = []\n y_batch_list = []\n for img_file in img_files_temp:\n # Generate data\n print(\"IMAGE FILE:(\")\n print(img_file)\n img, meta, y = self.__data_generation(img_file)\n img_batch_list.append(img)\n meta_batch_list.append(meta)\n y_batch_list.append(y)\n\n # batch_inputs = (img_batch_list, meta_batch_list)\n # return batch_inputs #, y_batch_list\n return [np.array(img),np.array(meta_batch_list)], np.array(y_batch_list)", "def bulk_get_documents():\n ids = flask.request.json\n if not ids:\n raise UserError(\"No ids provided\")\n if not isinstance(ids, list):\n raise UserError(\"ids is not a list\")\n\n with blueprint.index_driver.session as session:\n # Comment it out to compare against the eager loading option.\n # query = session.query(IndexRecord)\n # query = query.filter(IndexRecord.did.in_(ids)\n\n # Use eager loading.\n query = session.query(IndexRecord)\n query = query.options(\n joinedload(IndexRecord.urls).joinedload(IndexRecordUrl.url_metadata)\n )\n query = query.options(joinedload(IndexRecord.acl))\n query = query.options(joinedload(IndexRecord.authz))\n query = query.options(joinedload(IndexRecord.hashes))\n query = query.options(joinedload(IndexRecord.index_metadata))\n query = query.options(joinedload(IndexRecord.aliases))\n query = query.filter(IndexRecord.did.in_(ids))\n\n docs = [q.to_document_dict() for q in query]\n return flask.Response(json.dumps(docs), 200, mimetype=\"application/json\")", "def test_bulk_index_iterates_docs_only_once(self):\n doc = self._make_doc()\n docs = OneshotIterable([doc])\n self.adapter.bulk_index(docs) # does not raise IterableExhaustedError", "def __getitem__(self, index):\n out = super(ImageFromListDataset, self).__getitem__(index)\n out[\"id\"] = self._ids[index]\n return out", "def _load_elastic(self, sqldata):\n inserts = []\n for r in sqldata:\n body = self._preprocess(dict(r.items()))\n if not body:\n continue # Skip if preprocessing returns False\n index_name = self._get_index_name(body['TIME_STAMP'])\n document = {\n \"_index\" : index_name,\n \"_type\" : 'default', # Hardcoded - we only have 1 doctype\n \"_id\" : body[self.seq_field],\n \"_source\" : body\n }\n inserts.append(document)\n\n # update sequence to last item in the results\n self.seq = sqldata[-1][self.seq_field]\n \n # Insert list of documents into elasticsearch\n status = helpers.bulk(self.es, inserts, self.chunk_size)\n self.logger.info(\"Inserted %d chunks into %s\" % (self.chunk_size,\n index_name))\n return status", "def __getitem__(self, index: list) -> (np.array, np.array):\n # Generate indexes of the batch\n indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n\n # Generate data\n X, M = self.__data_generation(list_IDs_temp)\n\n return X, M", "def fetch(index, outfile):\n populate_index(index, outfile=outfile)", "def add(self, batch_size=10000):\n if self.N <= batch_size:\n self.index.add(self.database)\n else:\n [self.index.add(self.database[i:i + batch_size])\n for i in tqdm(range(0, len(self.database), batch_size),\n desc='[index] add')]", "def bulk_index_records(records):\n indexer = RecordIndexer()\n\n click.echo('Bulk indexing {} records...'.format(len(records)))\n indexer.bulk_index([str(r.id) for r in records])\n indexer.process_bulk_queue()\n click.echo('Indexing completed!')", "def update_batch(self, *args, **kwargs):\n pass", "def _getRecordBatch(idList) :\n handle = Entrez.efetch(db = \"nuccore\", rettype = \"gbwithparts\",\n retmode = \"text\", id = \",\".join(idList))\n r = handle.read()\n handle.close()\n return r", "def getDataBatch(self, batch_size):\n for i in range(batch_size):\n params.offset = params.offset+i #increment by 1 for the next set of batch\n url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json'\n url_params = {'q': self.args.query.replace(' ', '+'),'api-key': self.args.api_key,'page': params.offset}\n response = requests.get(url, params=url_params)\n r = response.json()\n\n #start by checking call was successful\n if response.ok:\n if r['status'] != 'OK':\n log.error(\"Error with API call, NYT status not ok\")\n return None\n\n # TODO: implement - this dummy implementation returns one batch of data\n list_of_art = []\n for art in r['response']['docs']:\n list_of_art.append(functions.flatten_json(art)) #attach to list returned in call\n yield list_of_art\n else:\n log.error(\"Error during API call on request side\")" ]
[ "0.6773513", "0.67076516", "0.647827", "0.6319573", "0.6210303", "0.6172807", "0.61597866", "0.612366", "0.61061996", "0.61003715", "0.61003715", "0.60762066", "0.607391", "0.607391", "0.6039906", "0.59697026", "0.5969402", "0.59238076", "0.5890065", "0.5853608", "0.5772766", "0.57714057", "0.57503253", "0.57475084", "0.57259023", "0.5698992", "0.5697273", "0.5673903", "0.5662908", "0.5628047" ]
0.74032223
0
Test case for add_provisioning_request Add a provisioning request
def test_add_provisioning_request(self): body = PortProvisionRequest() response = self.client.open('/api/provisioning/port', method='POST', data=json.dumps(body), content_type='application/json') self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_provisioning_request():\n if connexion.request.is_json:\n discovery = PortProvisionRequest.from_dict(connexion.request.get_json())\n return discovery.save()", "async def test_create(self):\n expected = {\n 'id': 'id'\n }\n profile = {\n 'name': 'name',\n 'version': 4,\n }\n rsps = respx.post(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles') \\\n .mock(return_value=Response(200, json=expected))\n id = await provisioning_client.create_provisioning_profile(profile)\n assert rsps.calls[0].request.url == f'{PROVISIONING_API_URL}/users/current/provisioning-profiles'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'\n assert rsps.calls[0].request.content == json.dumps(profile).encode('utf-8')\n assert id == expected", "def InvocationAddRequest(builder, request):\n return AddRequest(builder, request)", "def test_get_provisioning_request_by_id(self):\n response = self.client.open('/api/provisioning/port/{requestId}'.format(requestId='requestId_example'),\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_request_added(self):\n data = {\n 'workshop_type': 'swc',\n 'g-recaptcha-response': 'PASSED', # to auto-pass RECAPTCHA\n 'name': 'Harry Potter', 'email': '[email protected]',\n 'affiliation': 'Hogwarts', 'location': 'United Kingdom',\n 'country': 'GB', 'preferred_date': 'soon',\n 'approx_attendees': '20-40',\n 'attendee_domains': [1, 2], # IDs\n 'attendee_domains_other': 'Nonsesology',\n 'attendee_academic_levels': [1, 2], # IDs\n 'attendee_computing_levels': [1, 2], # IDs\n 'cover_travel_accomodation': True,\n 'understand_admin_fee': True,\n 'travel_reimbursement': 'book', 'travel_reimbursement_other': '',\n 'admin_fee_payment': 'self-organized', 'comment': '',\n 'privacy_consent': True,\n }\n rv = self.client.post(reverse('swc_workshop_request'), data,\n follow=True)\n self.assertEqual(rv.status_code, 200)\n content = rv.content.decode('utf-8')\n self.assertNotIn('Fix errors below', content)\n self.assertIn('Thank you for requesting a workshop', content)\n self.assertEqual(EventRequest.objects.all().count(), 1)\n self.assertEqual(EventRequest.objects.all()[0].state, 'p')\n self.assertEqual(len(mail.outbox), 1)\n msg = mail.outbox[0]\n self.assertEqual(\n msg.subject,\n '[SWC] New workshop request: Hogwarts, United Kingdom'\n )", "def test_request_added(self):\n data = {\n 'workshop_type': 'dc',\n 'g-recaptcha-response': 'PASSED', # to auto-pass RECAPTCHA\n 'name': 'Harry Potter', 'email': '[email protected]',\n 'affiliation': 'Hogwarts', 'location': 'United Kingdom',\n 'country': 'GB', 'preferred_date': 'soon',\n 'approx_attendees': '20-40',\n 'attendee_domains': [1, 2], # IDs\n 'attendee_domains_other': 'Nonsesology',\n 'data_types': 'survey', 'data_types_other': '',\n 'attendee_academic_levels': [1, 2], # IDs\n 'attendee_data_analysis_level': [1, 2], # IDs\n 'cover_travel_accomodation': True,\n 'understand_admin_fee': True, 'fee_waiver_request': True,\n 'travel_reimbursement': 'book', 'travel_reimbursement_other': '',\n 'comment': '',\n 'privacy_consent': True,\n }\n rv = self.client.post(reverse('dc_workshop_request'), data,\n follow=True)\n self.assertEqual(rv.status_code, 200)\n content = rv.content.decode('utf-8')\n self.assertNotIn('Fix errors below', content)\n self.assertIn('Thank you for requesting a workshop', content)\n self.assertEqual(EventRequest.objects.all().count(), 1)\n self.assertEqual(EventRequest.objects.all()[0].state, 'p')\n self.assertEqual(len(mail.outbox), 1)\n msg = mail.outbox[0]\n self.assertEqual(\n msg.subject,\n '[DC] New workshop request: Hogwarts, United Kingdom'\n )", "def __init_request(self, req):\n return defines.ReturnCode.SUCC", "def add_request(self, partition_request):\n pr = partition_request\n self._reqs[pr.topic_name][pr.partition_id] = (pr.offset, pr.max_bytes)", "def create_provisioning(self, name, uri):\n template = {\n 'name': name,\n 'uri': uri\n }\n return self.client.call('SoftLayer_Provisioning_Hook', 'createObject', template)", "def test_enterprise_add_new_pi_to_vmi(self):\n validation = 'enterprise'\n proj_obj, fabric_obj, pr_obj = self._create_prerequisites()\n self._test_add_new_pi_to_vmi(\n proj_obj, fabric_obj, pr_obj, validation)", "def testSetRequest(self):\n self.mgr.sendGoProCommand = Mock()\n value = struct.pack('<HH', 8, 22)\n self.mgr.handlePacket(app_packet.GOPRO_SET_REQUEST, value)\n self.mgr.sendGoProCommand.assert_called_with( 8, (22, 0, 0, 0) )", "def delete_provisioning_request(requestId):\n doc = PortProvisionRequest.get(id=requestId)\n\n if doc:\n print(doc)\n doc.delete()\n return {\"status\": \"deleted\"}\n else:\n return 'Not Found', 404", "async def test_update(self):\n rsps = respx.put(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id') \\\n .mock(return_value=Response(200))\n await provisioning_client.update_provisioning_profile('id', {'name': 'new name'})\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'\n assert rsps.calls[0].request.content == json.dumps({'name': 'new name'}).encode('utf-8')", "def testSetExtendedRequest(self):\n self.mgr.sendGoProCommand = Mock()\n value = struct.pack('<HBBBB', 5, 0, 3, 7, 1)\n self.mgr.handlePacket(app_packet.GOPRO_SET_EXTENDED_REQUEST, value)\n self.mgr.sendGoProCommand.assert_called_with(5, (0, 3, 7, 1))", "def create_request(v1):\n #get entered data\n data = request.get_json()\n\n #picking the request attributes\n req_title = data.get(\"request_title\")\n req_desc = data.get(\"request_description\")\n requester_name = \"Gideon\"\n req_id = len(all_requests) +1 # + random.randint(1, 3000)\n\n #validation\n if not req_title:\n return jsonify({\"message\": \"Request has no title\"}), 400\n if not req_desc:\n return jsonify({\"message\": \"Request has no description\"}), 400\n if not requester_name:\n return jsonify({\"message\": \"Request must be issued by a user\"}), 400\n if not req_id:\n return jsonify({\"message\": \"Request has no id\"}), 400\n\n #storing entered request\n new_request = MaintenanceRequest(req_title, req_desc, requester_name, req_id)\n all_requests.append(new_request)\n # new_number_of_requests = len(all_requests)\n\n return jsonify({\n \"message\":\"sucessfully created request\",\n 'request_title':new_request.title,\n \"request_description\":new_request.description,\n \"requester_name\" : new_request.requester_name,\n \"request_id\" : new_request.request_id\n })", "def _create(isim_application: ISIMApplication,\n container_dn: str,\n name: str,\n priority: int,\n description: Optional[str] = None,\n keywords: Optional[str] = None,\n caption: Optional[str] = None,\n available_to_subunits: Optional[bool] = None,\n enabled: Optional[bool] = None,\n membership_type: Optional[str] = None,\n membership_role_dns: Optional[List[str]] = None,\n entitlements: List[Dict] = []) -> IBMResponse:\n data = []\n\n # Get the required SOAP types\n\n # Get the policy type\n policy_type_response = isim_application.retrieve_soap_type(soap_service,\n \"ns1:WSProvisioningPolicy\",\n requires_version=requires_version)\n\n # If an error was encountered and ignored, return the IBMResponse object so that Ansible can process it\n if policy_type_response['rc'] != 0:\n return policy_type_response\n policy_type = policy_type_response['data']\n\n # Get the policy membership type\n policy_membership_type_response = isim_application.retrieve_soap_type(soap_service,\n \"ns1:WSProvisioningPolicyMembership\",\n requires_version=requires_version)\n\n # If an error was encountered and ignored, return the IBMResponse object so that Ansible can process it\n if policy_membership_type_response['rc'] != 0:\n return policy_membership_type_response\n policy_membership_type = policy_membership_type_response['data']\n\n # Get the policy entitlement type\n policy_entitlement_type_response = isim_application.retrieve_soap_type(soap_service,\n \"ns1:WSProvisioningPolicyEntitlement\",\n requires_version=requires_version)\n\n # If an error was encountered and ignored, return the IBMResponse object so that Ansible can process it\n if policy_entitlement_type_response['rc'] != 0:\n return policy_entitlement_type_response\n policy_entitlement_type = policy_entitlement_type_response['data']\n\n # Get the service target type\n service_target_type_response = isim_application.retrieve_soap_type(soap_service,\n \"ns1:WSServiceTarget\",\n requires_version=requires_version)\n\n # If an error was encountered and ignored, return the IBMResponse object so that Ansible can process it\n if service_target_type_response['rc'] != 0:\n return service_target_type_response\n service_target_type = service_target_type_response['data']\n\n # Retrieve the container object (the business unit)\n container_response = isimws.isim.container.get(isim_application=isim_application, container_dn=container_dn)\n\n # If an error was encountered and ignored, return the IBMResponse object so that Ansible can process it\n if container_response['rc'] != 0:\n return container_response\n\n container_object = container_response['data']\n data.append(container_object)\n\n # Setup the policy object\n policy_object = _setup_policy_object(\n policy_type=policy_type,\n policy_entitlement_type=policy_entitlement_type,\n service_target_type=service_target_type,\n policy_membership_type=policy_membership_type,\n container_object=container_object,\n name=name,\n priority=priority,\n description=description,\n keywords=keywords,\n caption=caption,\n available_to_subunits=available_to_subunits,\n enabled=enabled,\n membership_type=membership_type,\n membership_role_dns=membership_role_dns,\n entitlements=entitlements\n )\n\n data.append(policy_object)\n\n # Leave the date object empty\n data.append(None)\n\n # Invoke the call\n ret_obj = isim_application.invoke_soap_request(\"Creating a provisioning policy\",\n soap_service,\n \"createPolicy\",\n data,\n requires_version=requires_version)\n return ret_obj", "def assign_request(self, config):\n assign_args = config.request_args[\"assignRequest\"]\n assign_args[\"RequestStatus\"] = \"assigned\"\n json_args = json.dumps(assign_args)\n if isinstance(config.request_names, (newstr, newbytes)):\n config.request_names = [config.request_names]\n for request_name in config.request_names:\n self.logger.info(\"Assigning %s with request args: %s ...\",\n request_name, config.request_args[\"assignRequest\"])\n urn = self.urn_prefix + \"/request/%s\" % request_name\n status, data = self.http_request(\"PUT\", urn, data=json_args,\n headers=self.headersBody)\n if status > 216:\n self.logger.error(\"Failed to assign request with status: %s, data: %s\", status, data)\n sys.exit(1)\n data = json.loads(data)\n self.logger.info(data)\n self.logger.info(\"Assign succeeded.\")", "def create_request(self, config):\n self.logger.info(\"Injecting request args:\\n%s ...\", config.request_args[\"createRequest\"])\n json_args = json.dumps(config.request_args[\"createRequest\"])\n urn = self.urn_prefix + \"/request\"\n status, data = self.http_request(\"POST\", urn, data=json_args,\n headers=self.headersBody)\n if status > 216:\n self.logger.error(\"Failed to create request with status: %s, data: %s\", status, data)\n sys.exit(1)\n data = json.loads(data)\n self.logger.info(data)\n request_name = data[\"result\"][0][\"request\"]\n self.approve_request(request_name)\n self.logger.info(\"Create request '%s' succeeded.\", request_name)\n\n config.request_names = request_name\n\n return request_name", "def test_create_namespaced_deployment_request_instantiate(self):\n pass", "def test_build_new_review_request(self):\n repository = self._create_repository()\n review_request = self.create_review_request(repository=repository)\n diffset = self.create_diffset(review_request=review_request)\n diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'\n diffset.save()\n\n config = self._create_config()\n self.integration.enable_integration()\n\n data = self._spy_on_make_request()\n\n review_request.publish(review_request.submitter)\n\n self.assertTrue(TravisAPI._make_request.called)\n\n self.assertEqual(\n data['url'],\n 'https://api.travis-ci.org/repo/mypublicorg%2Fmypublicorgrepo/'\n 'requests')\n\n self.assertEqual(\n data['request']['config']['env']['global'],\n [\n 'REVIEWBOARD_STATUS_UPDATE_ID=1',\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d' % config.pk,\n ])\n\n self.assertEqual(data['request']['message'],\n 'Test Summary\\n\\nTest Description')\n self.assertTrue('git fetch --unshallow origin || true'\n in data['request']['config']['before_install'])\n self.assertTrue('git checkout %s' % diffset.base_commit_id\n in data['request']['config']['before_install'])\n self.assertEqual(data['request']['branch'], 'review-requests')", "def test_add_labware_request(well_plate_def: models.LabwareDefinition) -> None:\n request = AddLabwareDefinitionRequest(\n definition=well_plate_def,\n )\n\n assert request.definition == well_plate_def", "def test_sp_add_new_pi_to_vmi(self):\n validation = 'serviceprovider'\n proj_obj, fabric_obj, pr_obj = self._create_prerequisites(\n enterprise_style_flag=False)\n self._test_add_new_pi_to_vmi(\n proj_obj, fabric_obj, pr_obj, validation)", "def test_add_duplicated_profile_requests(self):\n # We change the profile of the original flow request. Notice that in test data there is already a flow request\n profile = {\n 'code': 'PROF_002',\n 'version': 'v0',\n 'payload': '[{\"clinical_domain\": \"Laboratory\"}]'\n }\n flow_request = {\n 'flow_id': 'f_11111',\n 'profile': profile,\n 'start_validity': '2017-10-23T10:00:00+02:00',\n 'expire_validity': '2018-10-23T10:00:00+02:00'\n }\n\n res = self._add_flow_request(flow_request=flow_request)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(res.json(), ERRORS_MESSAGE['INVALID_DATA'])", "def test_add_asn(self):\n asn = '123'\n info = self.api.add_asn(asn, tags=['asd'])\n self.assertEqual(info['value'], asn)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])", "def test_delete_provisioning_request(self):\n response = self.client.open('/api/provisioning/port/{requestId}'.format(requestId='requestId_example'),\n method='DELETE')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def put_bucket_request_payment(Bucket=None, RequestPaymentConfiguration=None):\n pass", "def provision(event,context):\n body = json.loads(event['body'])\n try: \n assert 'serial_number' in body\n assert 'device_public_key' in body\n except AssertionError:\n return response(400, \"Missing required parameters.\")\n try:\n pub_key = base64.b64decode(body['device_public_key'])\n assert len(pub_key) == 128\n device_pub_key_bytes = bytearray.fromhex(pub_key.decode('ascii'))\n serial_number = base64.b64decode(body['serial_number'])\n assert len(serial_number) == 18\n assert len(body['device_label']) == 5\n except:\n return response(400, \"Parameters are in the incorrect format.\")\n\n requester_data = event[\"requestContext\"]\n if requester_data[\"authorizer\"][\"claims\"][\"email_verified\"]:\n identity_data = event[\"requestContext\"][\"identity\"]\n print(identity_data)\n ip_address = identity_data[\"sourceIp\"]\n email = requester_data[\"authorizer\"][\"claims\"][\"email\"].lower()\n else:\n return response(400, \"Email not verified.\")\n \n #generate server ECC key pair\n server_private_key = ec.generate_private_key(ec.SECP256R1(), default_backend())\n server_pem_key = server_private_key.private_bytes(\n encoding = serialization.Encoding.PEM,\n format = serialization.PrivateFormat.PKCS8,\n encryption_algorithm = serialization.NoEncryption())\n print(server_pem_key.decode('utf-8'))\n\n server_public_key = server_private_key.public_key()\n server_public_key_bytes = server_public_key.public_bytes(\n encoding = serialization.Encoding.X962,\n format = serialization.PublicFormat.UncompressedPoint)[1:]\n server_public_key_text = server_public_key_bytes.hex().upper()\n print('server_public_key:')\n print(server_public_key_text)\n \n #Hash device public key and server public key\n device_public_key_hash = hashlib.sha256(device_pub_key_bytes).digest()\n server_public_key_hash = hashlib.sha256(server_public_key_bytes).digest()\n\n # Generate a data key associated with the CMK\n # The data key is used to encrypt the file. Each file can use its own\n # data key or data keys can be shared among files.\n # Specify either the CMK ID or ARN\n data_key_encrypted, data_key_plaintext = create_data_key(cmk_id)\n if data_key_encrypted is None:\n return False\n print('Created new AWS KMS data key')\n\n \n # Encrypt the file\n f = Fernet(data_key_plaintext)\n server_pem_key_encrypted = f.encrypt(server_pem_key)\n\n #Create a random 16 bytes\n choices = string.ascii_letters + string.digits\n rand_pass = b''\n for i in range(16):\n \trand_pass += bytes(random.choice(choices),'ascii')\n\n #Load Device Public Key and derive shared secret\n device_bytes = b'\\x04' + device_pub_key_bytes\n print('device_bytes:')\n print(device_bytes)\n try:\n device_pub_key = ec.EllipticCurvePublicKey.from_encoded_point(ec.SECP256R1(),device_bytes)\n except ValueError:\n return response(400, \"Device Public Key is malformed\")\n shared_secret = server_private_key.exchange(ec.ECDH(),device_pub_key)\n\n #use the first 16 bytes (128 bits) of the shared secret to encrypt the random password\n cipher = Cipher(algorithms.AES(shared_secret[:16]), \n modes.ECB(), \n backend=default_backend())\n encryptor = cipher.encryptor()\n encrypted_rand_pass = encryptor.update(rand_pass) + encryptor.finalize()\n\n #Serialize server private key with password from rand_pass\n server_pem_key_pass = server_private_key.private_bytes(\n encoding = serialization.Encoding.PEM,\n format = serialization.PrivateFormat.PKCS8,\n encryption_algorithm = serialization.BestAvailableEncryption(rand_pass))\n\n\n can_logger_dict = {\n 'id': serial_number.decode(\"utf-8\"), #72 bit unique id from the ATECC608.\n 'device_label': body['device_label'],\n 'device_public_key': body['device_public_key'],\n 'device_public_key_prov_hash':device_public_key_hash.hex().upper()[:10],\n 'server_public_key_prov_hash':server_public_key_hash.hex().upper()[:10],\n 'email': email,\n 'sourceIp':ip_address,\n 'encrypted_data_key': base64.b64encode(data_key_encrypted).decode('utf-8'),\n 'encrypted_server_pem_key': base64.b64encode(server_pem_key_encrypted).decode('utf-8'),\n 'provision_time': datetime.datetime.now().isoformat().split('.')[0]\n #'password_for_testing': rand_pass.decode('ascii') #Will delete after testing\n\n }\n\n #Load the server_public_key, the server_pem_key_pass, and the encrypted_rand_pass\n data_dict = {\n \t'server_public_key': base64.b64encode(server_public_key_bytes).decode('ascii'),\n \t'server_pem_key_pass':base64.b64encode(server_pem_key_pass).decode('ascii'),\n \t'encrypted_rand_pass':base64.b64encode(encrypted_rand_pass).decode('ascii')\n }\n\n dbClient = boto3.resource('dynamodb', region_name='us-east-2')\n table = dbClient.Table(\"CANLoggers\")\n try:\n ret_dict = table.put_item(\n Item = can_logger_dict,\n ConditionExpression = 'attribute_not_exists(id)'\n )\n except:\n return response(400, \"serial number already exists\")\n return response(200, data_dict)", "def test_create_project_request(self):\n pass", "def test_upload_1_generation_shell(self):\n entry = mock.MagicMock()\n device_os = mock.MagicMock()\n families = {\"first_gen\": mock.MagicMock()}\n device_os.families.get.return_value = families\n vendor = mock.MagicMock(get_device_os=mock.MagicMock(return_value=device_os))\n first_gen = families[\"first_gen\"]\n cs_session = mock.MagicMock()\n resource_name = \"test resource name\"\n self.networking_handler._upload_resource = mock.MagicMock(return_value=resource_name)\n\n # act\n self.networking_handler.upload(entry=entry,\n vendor=vendor,\n cs_session=cs_session)\n # verify\n self.networking_handler._upload_resource.assert_called_once_with(cs_session=cs_session,\n entry=entry,\n resource_family=first_gen[\"family_name\"],\n resource_model=first_gen[\"model_name\"],\n driver_name=first_gen[\"driver_name\"])", "def get_provisioning_request_by_id(requestId):\n doc = PortProvisionRequest.get(id=requestId)\n if doc:\n return doc\n else:\n return 'Not Found', 404" ]
[ "0.7253028", "0.6238011", "0.61264265", "0.5709561", "0.563654", "0.5516715", "0.54224616", "0.5400144", "0.5395249", "0.53915054", "0.53854346", "0.5357447", "0.534008", "0.53295773", "0.5326414", "0.5318304", "0.53048795", "0.5283525", "0.5275277", "0.52693397", "0.5260181", "0.5256735", "0.5216033", "0.5213095", "0.5181443", "0.5174448", "0.50917935", "0.50571704", "0.5051581", "0.5037238" ]
0.75894356
0
Test case for delete_provisioning_request Deletes a port provisioning request
def test_delete_provisioning_request(self): response = self.client.open('/api/provisioning/port/{requestId}'.format(requestId='requestId_example'), method='DELETE') self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_provisioning_request(requestId):\n doc = PortProvisionRequest.get(id=requestId)\n\n if doc:\n print(doc)\n doc.delete()\n return {\"status\": \"deleted\"}\n else:\n return 'Not Found', 404", "async def test_delete(self):\n rsps = respx.delete(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id') \\\n .mock(return_value=Response(200))\n await provisioning_client.delete_provisioning_profile('id')\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'", "def test_add_provisioning_request(self):\n body = PortProvisionRequest()\n response = self.client.open('/api/provisioning/port',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def delete(self, request, phone):\n attrs = self.flatten_dict(request.POST)\n try:\n endpoint = Endpoint.objects.get(uid__exact=phone, site__name__exact=request.user)\n np = NumberPlan.objects.get(phone_number=phone, site__name__exact=request.user)\n endpoint.enable=False\n np.status=2\n endpoint.save()\n np.save()\n # TODO add parking\n return rc.DELETED\n except:\n return rc.NOT_HERE", "def test_delete(self):\n query = {\"id\":0}\n result = self.app.delete('/testParaDelete', query_string=query)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.data, 'ok')", "def delete_provisioning(self, identifier):\n return self.client.call(\"SoftLayer_Provisioning_Hook\", \"deleteObject\", id=identifier)", "def test_delete():\n sample_uuid = get_sample_id()\n response = requests.delete(f'http://localhost:5000/api/persons/{sample_uuid}')\n\n assert response.status_code == 200", "def test_delete_deployment(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_delete_device(self):\n pass", "def test_delete_device(self):\n pass", "def test_delete_on_background_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_gwservice_deletedevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False", "def deprovision(project, node, network, nic):\n data = {constants.PROJECT_PARAMETER: project,\n constants.NODE_NAME_PARAMETER: node,\n constants.NETWORK_PARAMETER: network,\n constants.NIC_PARAMETER: nic}\n res = requests.delete(_url + \"deprovision/\", data=data, auth=(\n _username, _password))\n click.echo(res.content)", "def DeleteWaiter(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def deleteRequest( self, requestId ):\n cmd = \"DELETE FROM `ProxyDB_Requests` WHERE Id=%s\" % requestId\n return self._update( cmd )", "def test_delete_on_background_response_descriptor_projects_release_release_resource(self):\n pass", "def test_deletePorts(self):\n store = Store(filesdir=self.mktemp())\n factory = DummyFactory(store=store)\n deleteTCP = TCPPort(\n store=store, factory=factory, portNumber=10, interface=u\"foo\")\n keepTCP = TCPPort(\n store=store, factory=factory, portNumber=10, interface=u\"bar\")\n deleteSSL = SSLPort(\n store=store, factory=factory, portNumber=10, interface=u\"baz\",\n certificatePath=store.filesdir.child(\"baz\"))\n keepSSL = SSLPort(\n store=store, factory=factory, portNumber=10, interface=u\"quux\",\n certificatePath=store.filesdir.child(\"quux\"))\n deleteEndpoint = StringEndpointPort(\n store=store, factory=factory, description=u'tcp:1234')\n keepEndpoint = StringEndpointPort(\n store=store, factory=factory, description=u'tcp:1235')\n self.assertSuccessStatus(\n self._makeConfig(store),\n [\"delete\",\n \"--port-identifier\", str(deleteTCP.storeID),\n \"--port-identifier\", str(deleteSSL.storeID),\n \"--port-identifier\", str(deleteEndpoint.storeID)])\n self.assertEqual(\"Deleted.\\n\", sys.stdout.getvalue())\n self.assertEqual(list(store.query(TCPPort)), [keepTCP])\n self.assertEqual(list(store.query(SSLPort)), [keepSSL])\n self.assertEqual(list(store.query(StringEndpointPort)), [keepEndpoint])", "def port_delete(switch, port):\n client.port.delete(switch, port)", "def test_client_can_do_delete_request(self):\n response = self.httpbin_4.test_requests_delete_method()\n self.assertEqual(response.request.method, 'DELETE')\n self.assertEqual(response.status_code, 200)", "def delete(self, call, params={}): \n # Build an endpoint using the parameters...\n endpoint = self._calls[call](params)\n url = '{}/{}'.format(str(self), str(endpoint))\n return self.deleter.respond(url)", "def test_DELETE4(self):\n r = requests.delete(self.address + \"/car/\")\n self.assertEqual(r.status_code, 400)", "def delete(self):\n self.log.info('Deleting')\n self._state = PonPort.State.DELETING\n self._cancel_deferred()", "def test_fax_inbound_automation_delete(self):\n pass", "def test_delete_device_template(self):\n pass", "def test_request_do_delete(test_dao, test_configuration):\r\n DUT = dtcFunction(test_dao, test_configuration, test=True)\r\n DUT.request_do_select_all(revision_id=1)\r\n DUT.request_do_insert(revision_id=1, parent_id=0)\r\n\r\n assert not DUT.request_do_delete(DUT.request_last_id())", "def test_get_provisioning_request_by_id(self):\n response = self.client.open('/api/provisioning/port/{requestId}'.format(requestId='requestId_example'),\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def DeleteInput(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_invalid_verify_delete_request(self, cred):\n resp = requests.delete(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number))\n assert resp.status_code == 403", "def delete(clients, context):\n port_id = context['port_id']\n logger.info(\"Taking action port.delete {}\".format(port_id))\n neutron = clients.get_neutron()\n neutron.delete_port(port_id)" ]
[ "0.8210428", "0.6664818", "0.6417803", "0.6305171", "0.6026843", "0.6005614", "0.60023606", "0.5943052", "0.5932378", "0.59317976", "0.59317976", "0.5874001", "0.58608633", "0.57793397", "0.57621557", "0.57589376", "0.57544667", "0.57511425", "0.5744601", "0.5734947", "0.57232344", "0.5723072", "0.56762946", "0.5672648", "0.56664014", "0.56572074", "0.56546706", "0.56175894", "0.5612516", "0.55922055" ]
0.8789912
0
Test case for get_provisioning_request_by_id get provisioning request by ID
def test_get_provisioning_request_by_id(self): response = self.client.open('/api/provisioning/port/{requestId}'.format(requestId='requestId_example'), method='GET') self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_provisioning_request_by_id(requestId):\n doc = PortProvisionRequest.get(id=requestId)\n if doc:\n return doc\n else:\n return 'Not Found', 404", "async def test_retrieve_one(self):\n expected = {\n '_id': 'id',\n 'name': 'name',\n 'version': 4,\n 'status': 'active'\n }\n rsps = respx.get(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id') \\\n .mock(return_value=Response(200, json=expected))\n profile = await provisioning_client.get_provisioning_profile('id')\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'\n assert profile == expected", "def fetch_a_request(v1, requestid):\n\n #check if user has any requests\n if len(all_requests) < 1:\n return jsonify({\n \"message\":\"You have not made any requests yet\"\n })\n \n #if user has more than one request\n if len(all_requests) >= 1:\n returned_request = []\n for a_request in all_requests:\n if a_request.request_id == int(requestid):\n returned_request.append(a_request)\n return jsonify({\n \"message\": \"Successfully fetched the request\",\n \"request\": returned_request[0].__dict__\n })\n \n return jsonify({\n \"message\":\"Request doesnt exist\"\n })", "def get(self, request_id):\n request = RequestModel.select_by_id(request_id)\n if request:\n return request.json(), 200\n return {'message': 'Request not found'}, 404", "def get_request_by_id(request_id):\n db = get_db()\n requests = db.requests\n \n # Check if request_id is valid BSON\n if objectid.ObjectId.is_valid(request_id) is False:\n raise APIException(status_code=400, message='request_id not a valid BSON')\n \n cursor = requests.find({\"_id\": ObjectId(request_id)})\n context = {}\n for document in cursor:\n temp = document\n temp['request_id'] = str(document['_id'])\n temp['mentee_profile'] = get_mentee(document['mentee_id'], with_partners=0)\n temp['mentor_profile'] = get_mentor(document['mentor_id'], with_partners=0)\n del temp['_id']\n del temp['mentor_id']\n del temp['mentee_id']\n context = temp\n \n context['url'] = \"/api/v1/requests/\" + str(request_id) + \"/\"\n return flask.jsonify(**context)", "def get_user_request_by_id(self, id):\n user_request_table = Table('user_request', self.metadata, autoload=True)\n try:\n u = self.session.query(user_request_table).filter(user_request_table.c.id==id).one()\n raw_request = u._asdict()\n user_request = json.loads(DateTimeEncoder().encode(raw_request))\n return user_request\n except Exception as e:\n logger.info(f\"Error retrieving request {id}: {e}\")\n return False", "def test_prefectures_id_get(self):\n pass", "def test_get_device_by_id(self):\n pass", "def test_presenters_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/presenters/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def get_request(id):\n\n result = ResponseEntity()\n try:\n request = Request.objects.exclude(status=Enum.REQUEST_STATUS.Deleted.value).get(id=id)\n request_entity = convert_Request_to_RequestEntity(request)\n result.success = True\n # result.data= request_entity\n result.data = request_entity.data\n except Exception as e:\n print str(e)\n result.message = str(e)\n result.success = False\n finally:\n return result", "def delete_provisioning_request(requestId):\n doc = PortProvisionRequest.get(id=requestId)\n\n if doc:\n print(doc)\n doc.delete()\n return {\"status\": \"deleted\"}\n else:\n return 'Not Found', 404", "def __get_request(self, request_id):\r\n if request_id not in self.__requests:\r\n self.__requests[request_id] = Request(request_id)\r\n return self.__requests[request_id]", "def __get_request(self, request_id):\r\n if request_id not in self.__requests:\r\n self.__requests[request_id] = Request(request_id)\r\n return self.__requests[request_id]", "def test_delete_provisioning_request(self):\n response = self.client.open('/api/provisioning/port/{requestId}'.format(requestId='requestId_example'),\n method='DELETE')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_get_payments_by_id(self):\n pass", "def request_idkey(self):\r\n if self.use_http():\r\n self.enqueue_http_request(\"money/idkey\", {}, \"idkey\")\r\n else:\r\n self.send_signed_call(\"private/idkey\", {}, \"idkey\")", "def test_get_device_by_id1(self):\n pass", "def _check_request_id(\n self,\n message: W24TechreadMessage\n ) -> None:\n self.assertEqual(type(message.request_id), UUID)", "async def test_create(self):\n expected = {\n 'id': 'id'\n }\n profile = {\n 'name': 'name',\n 'version': 4,\n }\n rsps = respx.post(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles') \\\n .mock(return_value=Response(200, json=expected))\n id = await provisioning_client.create_provisioning_profile(profile)\n assert rsps.calls[0].request.url == f'{PROVISIONING_API_URL}/users/current/provisioning-profiles'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'\n assert rsps.calls[0].request.content == json.dumps(profile).encode('utf-8')\n assert id == expected", "def test_get_case_by_id(self):\n pass", "def test_solareclipses_id_get(self):\n pass", "def test_ProductsDataViewSet_with_get_request_id(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/{}/'.format(\n self.product_id))\n\n # Checking the response\n self.assertEqual(response.status_code, 200)\n self.assertIsNotNone(response.json()['name'])", "def test_drugs_id_get(self):\n pass", "def test_get_certificate_by_id(self):\n self.client.post(\n '/api/v1/certificates', data=json.dumps(new_certificate),\n content_type='application/json',\n headers=self.get_registrar_token())\n response = self.client.get(\n '/api/v1/certificates/1', content_type='application/json',\n headers=self.get_token())\n result = json.loads(response.data.decode())\n self.assertEqual(result['message'],\n 'Certificate retrieved successfully')\n assert response.status_code == 200", "def test_christiandoctrines_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/christiandoctrines/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_installments_id_get(self):\n pass", "def test_add_provisioning_request(self):\n body = PortProvisionRequest()\n response = self.client.open('/api/provisioning/port',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_abbeys_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/abbeys/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_device_registration_get_method(flask_app, db): # pylint: disable=unused-argument\n request = create_registration(REQUEST_DATA, uuid.uuid4())\n\n api_url = '{api}/{id}'.format(api=DEVICE_REGISTRATION_REQ_API, id=request.id)\n rv = flask_app.get(api_url)\n assert rv.status_code == 200\n data = json.loads(rv.data.decode('utf-8'))\n assert data is not None\n assert data['id'] == request.id", "def test_get_specific_by_id(self):\n token = self.get_token()\n self.client.post('/api/v2/party', data=self.add_party,\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n response = self.client.get('/api/v2/party/1',\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json',\n )\n self.assertEqual(response.status_code, 200)" ]
[ "0.750531", "0.6248161", "0.61184204", "0.5848939", "0.577084", "0.57675457", "0.5723609", "0.5676751", "0.5654506", "0.5619345", "0.5601689", "0.55897456", "0.55897456", "0.55712306", "0.5539029", "0.5444288", "0.54233503", "0.54105175", "0.5407941", "0.53651434", "0.5340627", "0.53255904", "0.5325074", "0.53205657", "0.5267571", "0.52579105", "0.52530515", "0.5244939", "0.5238167", "0.52078587" ]
0.8449092
0
Test case for get_requests List server connectivity requests
def test_get_requests(self): response = self.client.open('/api/provisioning/port', method='GET') self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_servers(self):\n response = self.client.open(\n '/v1/servers',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_http_request(self):\n\n response = requests.get(self.live_server_url)\n assert response.status_code == 200", "def test_request(self):\n self.assertIn('list', self.api.request('sys.settings.get').data,\n msg=\"request() doesn't work properly. 'list' is not found in the response\")", "def test_connection(self):\n req = requests.get(\"http://{}:{}\".format(self.config.options.get(\"Server\", \"ListenAddress\"),\n self.config.options.get(\"Server\", \"Port\")))\n\n self.assertEqual(req.status_code, 200)", "def get_requests():\n return PortProvisionRequest.get()", "def GetServers(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def getRequestList(self):\n\n result = RequestsDAO().getRequests()\n mapped_result = []\n\n if not result:\n return jsonify(Error=\"NOT FOUND\"), 404\n\n else:\n for r in result:\n mapped_result.append(self.mapToUserRequestDict(r))\n\n return jsonify(TURN=mapped_result), 200", "def test_get_request_output(self):\n pass", "def test_client_can_load_client_page_requests_directly(self):\n\n req = self.httpbin_3.get_request_data('get_my_ip')\n\n self.assertEqual(req, self.httpbin_3.client[\"homepage\"]['get_my_ip'])\n req = self.httpbin_3.get_request_data('test_requests_patch_method')\n self.assertEqual(req, self.httpbin_3.client[\"homepage\"]['test_requests_patch_method'])\n req = self.httpbin_3.get_request_data('test_requests_delete_method')\n self.assertEqual(req, self.httpbin_3.client[\"second_page\"]['test_requests_delete_method'])\n\n req = self.httpbin_4.get_request_data('get_my_ip')\n self.assertEqual(req, self.httpbin_4.client['get_my_ip'])\n req = self.httpbin_4.get_request_data('get_user_my_agent')\n self.assertEqual(req, self.httpbin_4.client['get_user_my_agent'])\n req = self.httpbin_4.get_request_data('test_requests_put_method')\n self.assertEqual(req, self.httpbin_4.client['test_requests_put_method'])\n req = self.httpbin_4.get_request_data('test_requests_post_method')\n self.assertEqual(req, self.httpbin_4.client['test_requests_post_method'])", "def list_requests(self, src_rse, dst_rse, request_states):\n path = '/'.join([self.REQUEST_BASEURL, 'list']) + '?' + '&'.join(['src_rse={}'.format(src_rse), 'dst_rse={}'.format(\n dst_rse), 'request_states={}'.format(request_states)])\n url = build_url(choice(self.list_hosts), path=path)\n r = self._send_request(url, type_='GET')\n\n if r.status_code == codes.ok:\n return self._load_json_data(r)\n else:\n exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)\n raise exc_cls(exc_msg)", "def queryRequest(self):\n endpoint = \"/foo\"\n full_path = \"/foo?one=1&two=2\"\n\n def verify(request):\n o(request.method).equals(\"GET\")(\"Checking query request method.\")\n o(request.url).equals(full_path)(\"Checking basic request url.\")\n request.respond(200)\n self.testServer.respondWith(verify)\n\n server.request(endpoint, one=1, two=2)\n self.testServer.respond()", "def basicRequest(self):\n endpoint = \"/foo\"\n\n def verify(request):\n o(request.method).equals(\"GET\")(\"Checking basic request method.\")\n o(request.url).equals(endpoint)(\"Checking basic request url.\")\n request.respond(200)\n self.testServer.respondWith(verify)\n\n server.request(endpoint)\n self.testServer.respond()", "def fetch_requests(v1):\n \n #check if user has any requests\n if len(all_requests) < 1:\n return jsonify({\n \"message\":\"You have not made any requests yet\"\n })\n \n #if user has more than one request\n if len(all_requests) >= 1:\n return jsonify({\n \"message\":\"Successfully fetched requests\",\n \"requests\":[\n a_request.__dict__ for a_request in all_requests\n ]\n })\n return jsonify({\"message\":\"Can not fetch requests now\"})", "def on_get(self, req, resp):\n try:\n n_reqs = int(req.params.get('n', self.default_reqs))\n except ValueError:\n error_response(resp, 'ERROR: Incorrect number of requests')\n return\n\n urls = self.scheduler.requests(n_reqs)\n resp.data = json.dumps(urls, ensure_ascii=True)\n resp.content_type = \"application/json\"\n resp.status = falcon.HTTP_200", "def serviceRequests(self):\n for k in self.sessions:\n request = self.sessions[k].ongoingRequest\n client = self.sessions[k].clientInstance\n\n if request and request[0] == \"PULL\":\n filter, snapshot = self.fsicDiffAndSnapshot(request[2], request[3])\n self.queue(request[1], filter, snapshot)\n self.send(client, k, (\"DATA\", request[1], self.outgoingBuffer[request[1]]))\n del self.outgoingBuffer[request[1]]\n self.sessions[k].ongoingRequest = None\n\n elif request and request[0] == \"PUSH\":\n # Create a copy of your FSIC and sends it to client\n localFSIC = self.calcFSIC(request[2])\n # PUSH2 request : (\"PUSH2\", pushID, filter, localFSIC)\n self.send(client, k, (\"PUSH2\", request[1], request[2], localFSIC))\n self.sessions[k].ongoingRequest = None\n\n elif request and request[0] == \"PUSH2\":\n filter, snapshot = self.fsicDiffAndSnapshot(request[2], request[3])\n self.queue(request[1], filter, snapshot)\n self.send(self.sessions[k].serverInstance, k, (\"DATA\", request[1], self.outgoingBuffer[request[1]]))\n self.sessions[k].ongoingRequest = None\n\n elif request:\n raise ValueError('Invalid Request!')", "def queryAllRequests(self):\n logging.info(\"Querying all requests at ReqMgr instance ...\")\n r = self.reqMgrService.getRequestNames()\n print \"Found %s requests:\" % len(r)\n for req in r:\n print req", "def test_list_clients(self):\n pass", "def test_simple_request(self):\n urls = [\"https://api.omniture.com/admin/1.4/rest/\",\n \"https://api2.omniture.com/admin/1.4/rest/\",\n \"https://api3.omniture.com/admin/1.4/rest/\",\n \"https://api4.omniture.com/admin/1.4/rest/\",\n \"https://api5.omniture.com/admin/1.4/rest/\"]\n self.assertIn(self.analytics.request('Company', 'GetEndpoint'),urls, \"Company.GetEndpoint failed\" )", "def list_servers(self, request, paginate):\n raise NotImplementedError", "def test_request(nsproxy, server):\n server = run_agent('server', base=server)\n active = run_agent('active_client', base=Client)\n passive = run_agent('passive_client', base=Client)\n\n # Connect clients\n server_addr = server.addr('publish')\n active_addr = active.connect(server_addr, alias='sub',\n handler=append_received)\n passive_addr = passive.connect(server_addr, alias='sub',\n handler=append_received)\n assert active_addr == server_addr.twin()\n assert passive_addr == server_addr.twin()\n\n # Publish from server\n server.each(0, 'publish')\n\n # Wait for clients to receive some data\n N = 10\n assert wait_agent_attr(active, length=N)\n assert wait_agent_attr(passive, length=N)\n\n # Send request from active client\n active.send('sub', 'request!', handler=receive_negate)\n\n # Server request processing\n assert wait_agent_attr(server, length=1)\n received = server.get_attr('received')\n assert len(received) == 1\n assert received[0][1] == 'request!'\n instant = received[0][0]\n\n # Make sure active gets response\n response = instant + 0.5\n assert wait_agent_attr(active, data=-response)\n\n # Wait for at least another message after the response\n N = len(active.get_attr('received')) + 1\n assert wait_agent_attr(active, length=N)\n\n # Check received messages are properly sorted\n received = active.get_attr('received')\n index = received.index(-response)\n assert received[index - 1] + 1 == received[index + 1]\n received.remove(-response)\n assert received == list(range(received[0], received[-1] + 1))\n\n # Check passive client received data\n assert wait_agent_attr(passive, data=received[-1])\n received = passive.get_attr('received')\n assert -response not in received\n assert received == list(range(received[0], received[-1] + 1))", "def test_request(nsproxy, server):\n server = run_agent('server', base=server)\n active = run_agent('active_client', base=Client)\n passive = run_agent('passive_client', base=Client)\n\n # Connect clients\n server_addr = server.addr('publish')\n active_addr = active.connect(\n server_addr, alias='sub', handler=append_received\n )\n passive_addr = passive.connect(\n server_addr, alias='sub', handler=append_received\n )\n assert active_addr == server_addr.twin()\n assert passive_addr == server_addr.twin()\n\n # Publish from server\n server.each(0, 'publish')\n\n # Wait for clients to receive some data\n n = 10\n assert wait_agent_attr(active, length=n)\n assert wait_agent_attr(passive, length=n)\n\n # Send request from active client\n active.send('sub', 'request!', handler=receive_negate)\n\n # Server request processing\n assert wait_agent_attr(server, length=1)\n received = server.get_attr('received')\n assert len(received) == 1\n assert received[0][1] == 'request!'\n instant = received[0][0]\n\n # Make sure active gets response\n response = instant + 0.5\n assert wait_agent_attr(active, data=-response)\n\n # Wait for at least another message after the response\n n = len(active.get_attr('received')) + 1\n assert wait_agent_attr(active, length=n)\n\n # Check received messages are properly sorted\n received = active.get_attr('received')\n index = received.index(-response)\n assert received[index - 1] + 1 == received[index + 1]\n received.remove(-response)\n assert received == list(range(received[0], received[-1] + 1))\n\n # Check passive client received data\n assert wait_agent_attr(passive, data=received[-1])\n received = passive.get_attr('received')\n assert -response not in received\n assert received == list(range(received[0], received[-1] + 1))", "def test_all_servers_connection():\n task_data = dict(const.TEST_TASK)\n task_data[\"client_list\"] = list()\n agents = models.Agent.objects.all()\n for agent in agents:\n task_data[\"client_list\"].append({\"id\": agent.id, \"ip_address\": agent.ip_address})\n message_queue.push_task(task_data)\n logger.info(\"create tasks to test all agents' connection status\")", "def test_get_server_list(self):\n self.assertEqual(sorted(self.checkredis.get_server_list(\"php\",\"qa\")), sorted(['aw1-php70-qa', 'aw1-php80-qa']))", "def test_request_ok(self, method, m_requests):\n # Dummy values for the K8s API request.\n url = 'http://examples.com/'\n client = k8s.requests.Session()\n headers = {\"some\": \"headers\"}\n payload = {\"some\": \"payload\"}\n response = {\"some\": \"response\"}\n\n # Verify the makeup of the actual request.\n def additional_matcher(req):\n assert req.method == method\n assert req.url == url\n assert req.json() == payload\n assert req.headers[\"some\"] == headers[\"some\"]\n assert req.timeout == 30\n return True\n\n # Assign a random HTTP status code.\n status_code = random.randint(100, 510)\n m_requests.request(\n method,\n url,\n json=response,\n status_code=status_code,\n additional_matcher=additional_matcher,\n )\n\n # Verify that the function makes the correct request and returns the\n # expected result and HTTP status code.\n ret = k8s.request(client, method, url, payload, headers)\n assert ret == (response, status_code)", "def test_client_can_load_client_requests_directly(self):\n\n req = self.httpbin.get_request_data('get_my_ip')\n self.assertEqual(req, self.httpbin.client['get_my_ip'])\n req = self.httpbin.get_request_data('get_my_headers')\n self.assertEqual(req, self.httpbin.client['get_my_headers'])\n\n req = self.httpbin_2.get_request_data('get_my_ip')\n self.assertEqual(req, self.httpbin_2.client['get_my_ip'])\n req = self.httpbin_2.get_request_data('get_my_headers')\n self.assertEqual(req, self.httpbin_2.client['get_my_headers'])", "def test_get(self):\n\n # Grab the server's addresses...\n addrs = self.server.addresses\n\n # Make sure the public and private lists are present\n dtutil.assert_true('public' in addrs)\n dtutil.assert_true('private' in addrs)\n\n # Are IP addresses actually returned?", "def test_client_list(self):\n pass", "def retrieve_requests(self, request=None):\n data = {}\n if request:\n data = request.dict()\n req = requests.put('{}/retrieve'.format(self._get_url()),\n params={'type': 'requests'}, data=json.dumps(data))\n if req.status_code == 200:\n try:\n return req.json()\n except ValueError:\n return []\n return []", "def test_requests(self):\n\n text_data = \"\"\"\n BERNARDO: Who's there?\n FRANCISCO: Nay, answer me: stand, and unfold yourself.\n BERNARDO: Long live the king!\n FRANCISCO: Bernardo?\n BERNARDO: He.\n FRANCISCO: You come most carefully upon your hour.\n BERNARDO: 'Tis now struck twelve; get thee to bed, Francisco.\n \"\"\"\n binary_data = text_data.encode()\n file_content = {\"/foo.txt\": binary_data}\n\n # Test all possible combinations of:\n # - whether or not the server supports compression\n # - whether or not the server supports random access\n # - chosen buffering policy\n for allow_gzip in (False, True):\n for allow_range in (False, True):\n with DummyHTTPServer(\n file_content=file_content,\n allow_gzip=allow_gzip,\n allow_range=allow_range,\n ) as server:\n url = server.url(\"/foo.txt\")\n for buffering in (-2, -1, 0, 20):\n self._test_text(url, text_data, buffering)\n self._test_binary(url, binary_data, buffering)", "def test_get_servers(self):\n self.assertIsInstance(network.get_servers(), dict)" ]
[ "0.6687752", "0.6559068", "0.64146256", "0.62880325", "0.62294745", "0.61838835", "0.6169326", "0.61663264", "0.6153799", "0.6117643", "0.6065051", "0.6059946", "0.6022924", "0.60025907", "0.5994368", "0.5990659", "0.5976886", "0.59657145", "0.5963672", "0.5962847", "0.5951642", "0.59215456", "0.59190387", "0.5905062", "0.5890795", "0.5889514", "0.58841836", "0.588336", "0.587437", "0.58592105" ]
0.6894288
0
Get mean/std and optional min/max of scalar x across MPI processes.
def statistics_scalar(x, with_min_and_max=False): x = np.array(x, dtype=np.float32) global_sum, global_n = np.sum(x), len(x) mean = global_sum / global_n global_sum_sq = np.sum((x - mean) ** 2) std = np.sqrt(global_sum_sq / global_n) # compute global std if with_min_and_max: global_min = np.min(x) if len(x) > 0 else np.inf global_max = np.max(x) if len(x) > 0 else -np.inf return mean, std, global_min, global_max return mean, std
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_mean_and_log_std(self, x):\n mean = self._mean_module(x)\n return mean, self._log_std", "def xminmax ( self ) :\n return self.xvar.minmax()", "def statistics_from_array(x: numpy.ndarray):\n try:\n return x.mean(), x.std(), x.max(), x.min()\n except AttributeError:\n return numpy.nan, numpy.nan, numpy.nan, numpy.nan", "def standardise(x):\n mean_x = np.mean(x, axis=0)\n x = x - mean_x\n std_x = np.std(x, axis=0)\n x = x / std_x\n return x, mean_x, std_x", "def standardize(x, mean_x=None, std_x=None):\n if mean_x is None:\n mean_x = np.mean(x,axis=0)\n x = x - mean_x\n if std_x is None:\n std_x = np.std(x,axis=0)\n x = x / std_x\n return x, mean_x, std_x", "def compute_mean_std(x):\n x = np.hstack(x)\n return (np.mean(x).astype(np.float32),\n np.std(x).astype(np.float32))", "def batch_stat(x):\n\tmean = torch.mean(x, dim=[0, 2, 3], keepdim=True)\n\tvar = torch.mean((x-mean)**2, dim=[0, 2, 3], keepdim=True)\n\treturn mean, var", "def getXmeanstd(self):\n if not self._frozen:\n raise Exception(\"Dataset must be frozen\")\n return (self._X_mean,self._X_std)", "def _compute_instance_moments(x):\n return torch.mean(x, dim=(2, 3), keepdim=True), torch.var(x, dim=(2, 3), keepdim=True)", "def summaryone(x):\n print 'mean and std are ',np.mean(x), np.std(x)\n print 'max and min are ',np.max(x), np.min(x)\n print 'the range is ',np.max(x)-np.min(x)", "def basic_stats_builtin(x):\n return {\"Minimum: \": min(x), \"Maximum: \": max(x), \"Sum: \": sum(x),\n \"Mean: \": statistics.mean(x)}", "def get_minmax_stats(dataframe, variable):\n\n print(\"Maximum value of \", variable, \"is: \", dataframe[variable].max())\n print(\"Minimum value of \", variable, \"is: \", dataframe[variable].min())", "def mpi_std(data):\n m = mpi_mean(data)\n data_centered = data - m\n data_centered **= 2\n std_local = data_centered.sum(0)\n std = np.empty_like(std_local)\n mpi.COMM.Allreduce(std_local, std)\n num_data = mpi.COMM.allreduce(data.shape[0])\n std /= float(num_data)\n return std", "def _compute_batch_moments(x):\n return torch.mean(x, dim=(0, 2, 3), keepdim=True), torch.var(x, dim=(0, 2, 3), keepdim=True)", "def standardize(x, mean=None, std=None): \n \n mean = mean if mean is not None else x.mean(axis=0)\n std = std if std is not None else x.std(axis=0) \n \n return (x - mean) / std, mean, std", "def sample_mean_var_ml(x):\n n = len(x)\n assert(n > 0)\n if n == 1:\n return x[0], 0\n s = 0.0\n ss = 0.0\n for i in x:\n s += i\n ss += i*i\n mu = s/n\n var = (ss/n) - mu*mu\n return mu, var", "def basic_stats_custom(x):\n def my_min(x):\n m = x[0]\n for elem in x:\n if elem < m:\n m = elem\n else:\n pass\n return m\n\n def my_max(x):\n m = x[0]\n for elem in x:\n if elem > m:\n m = elem\n else:\n pass\n return m\n\n def my_sum(x):\n acc = 0\n for elem in x:\n acc += elem\n return acc\n\n def my_len(x):\n acc = 0\n for elem in x:\n acc += 1\n return acc\n\n def my_mean(x):\n \"\"\"Return the mean of numbers in list. Since the task is not to use\n builtin functions, length is also defined anew (prior to this\n function definition).\n\n \"\"\"\n return my_sum(x) / my_len(x)\n return {\"Minimum: \": my_min(x), \"Maximum: \": my_max(x), \"Sum: \":\n my_sum(x), \"Mean: \": my_mean(x)}", "def get_min_max_x(self, min_x = 1e9, max_x = -1e9, exclude = []): \n \n if self.verbose > 1:\n print(\"MultiLinearSpectra.get_min_max_x()\") \n \n for m in range(len(self.mess)):\n if m not in exclude and self.mess[m][\"class\"] not in exclude:\n min_x, max_x = self.mess[m][\"object\"].get_min_max_x(min_x, max_x)\n \n return min_x, max_x", "def simple_moments(x):\n\n mean = x.mean()\n std = x.std()\n sterr = std / np.sqrt(len(x))\n\n return mean, std, sterr", "def _get_mean_and_log_std(self, *inputs):\n return self._mean_module(*inputs), self._log_std_module(*inputs)", "def std(X,trimming=0):\n \n if trimming==0:\n s = np.power(np.var(X,axis=0),.5)\n s = np.array(s).reshape(-1)\n else: \n var = sps.trim_mean(np.square(X - sps.trim_mean(X,trimming,0)),\n trimming,0)\n s = np.sqrt(var) \n return s", "def std(x):\n return sqrt(TinyStatistician.var(x))", "def show_stats(x):\n print(\"min =\", x.min())\n print(\"max =\", x.max())\n print(\"median =\", np.median(x))\n print(\"average =\", x.mean())\n print(\"std =\", x.std())", "def rms_f(self, x):\n # TODO: the axis used in nanmean is different for U and Uf\n # calcs - change Uf dims to make consistent?\n return stats.nanstd(x, axis=1)", "def min_max_normalization(x, min_x = None, max_x = None):\n if min_x is None:\n min_x = np.min(x, axis=0)\n if max_x is None:\n max_x = np.max(x, axis=0)\n return (x - (min_x)) / (max_x - min_x), min_x, max_x", "def __call__(self, x: np.ndarray):\n out_fst = self.fst(x)\n out_snd = self.snd(x)\n diff_max = [np.max(np.abs(y_fst - y_snd))\n for y_fst, y_snd in zip(out_fst, out_snd)]\n self.max = np.concatenate([self.max, [diff_max]], axis=0)\n diff_mean = [np.mean(np.abs(y_fst - y_snd))\n for y_fst, y_snd in zip(out_fst, out_snd)]\n self.mean = np.concatenate([self.mean, [diff_mean]], axis=0)", "def get_mean_and_std(dataset):\n dataloader = torch.utils.data.DataLoader(\n dataset, batch_size=1, shuffle=True, num_workers=2\n )\n mean = torch.zeros(3)\n std = torch.zeros(3)\n print(\"==> Computing mean and std..\")\n for inputs, targets in dataloader:\n for i in range(3):\n mean[i] += inputs[:, i, :, :].mean()\n std[i] += inputs[:, i, :, :].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n return mean, std", "def get_mean_and_std(dataset):\n dataloader = torch.utils.data.DataLoader(\n dataset, batch_size=1, shuffle=True, num_workers=2\n )\n mean = torch.zeros(3)\n std = torch.zeros(3)\n for inputs, targets in dataloader:\n for i in range(3):\n mean[i] += inputs[:, i, :, :].mean()\n std[i] += inputs[:, i, :, :].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n return mean, std", "def _get_mean_and_log_std(self, *inputs):\n return self._shared_mean_log_std_network(*inputs)", "def estimate_gaussian_params(X):\n mu = X.mean(axis=0)\n var = X.std(axis=0)**2.0\n return mu,var" ]
[ "0.6554827", "0.6272912", "0.61888236", "0.61567163", "0.6144852", "0.6117041", "0.6077917", "0.60632807", "0.60149676", "0.59918183", "0.5950541", "0.5950353", "0.59191823", "0.59150267", "0.5868738", "0.58552974", "0.58083874", "0.5788024", "0.5781067", "0.5763779", "0.5750993", "0.5729949", "0.5721407", "0.5696449", "0.56748885", "0.5655104", "0.56352496", "0.56135875", "0.55833936", "0.5561103" ]
0.6846514
0
parse a kallisto abundance.tsv file, return dict transcriptId > est_tpm Does not return a value for transcripts where est_tpm is 0
def parseKallisto(fname): logging.debug("parsing %s" % fname) ifh = open(fname) ifh.readline() d = {} for line in ifh: fs = line.rstrip("\n").split("\t") if fs[tpmColumnIndex]=="0" and not addZeros: continue d[fs[0]] = float(fs[tpmColumnIndex]) return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_trflp(lines):\r\n\r\n sample_ids = []\r\n otu_ids = []\r\n data = []\r\n non_alphanum_mask = re.compile('[^\\w|^\\t]')\r\n # not sure why the above regex doesn't cover the following regex...\r\n dash_space_mask = re.compile('[_ -]')\r\n\r\n for i, line in enumerate(lines):\r\n elements = line.strip('\\n').split('\\t')\r\n\r\n # special handling for the first line only\r\n if i == 0:\r\n # validating if the file has a header\r\n if elements[0] == '':\r\n for otu_id in elements[1:]:\r\n otu_ids.append(non_alphanum_mask.sub('_', otu_id))\r\n continue\r\n else:\r\n for j, otu_id in enumerate(elements[1:]):\r\n otu_ids.append(non_alphanum_mask.sub('_', 'Bin%3d' % j))\r\n\r\n # handling of all other lines\r\n current_row = []\r\n\r\n # converting each value in the row to int\r\n for count in elements[1:]:\r\n try:\r\n current_row.append(int(round(float(count), 0)))\r\n except ValueError:\r\n current_row.append(0)\r\n\r\n # if the sum of all the values is equial to 0 ignore line\r\n if sum(current_row) == 0:\r\n continue\r\n\r\n # adding sample header to list\r\n sample_ids.append(non_alphanum_mask.sub('.',\r\n dash_space_mask.sub('.', elements[0])))\r\n\r\n # validating the size of the headers to add missing columns\r\n # this is only valid when there is no header\r\n if len(current_row) > len(otu_ids):\r\n # modify header data\r\n extra_cols = []\r\n for j in range(len(otu_ids), len(current_row)):\r\n extra_cols.append(non_alphanum_mask.sub('_', 'Bin%3d' % j))\r\n # modify data\r\n for j in range(len(data)):\r\n data[j].extend([0] * (len(current_row) - len(otu_ids)))\r\n\r\n otu_ids.extend(extra_cols)\r\n elif len(current_row) < len(otu_ids):\r\n # modify data\r\n current_row.extend([0] * (len(otu_ids) - len(current_row)))\r\n\r\n data.append(current_row)\r\n\r\n return sample_ids, otu_ids, asarray(data).transpose()", "def parse_translation(transl):\n t_table = {}\n with open(transl, 'r') as f:\n for line in f:\n tmp = line.strip().split('\\t')\n fun_id = tmp[2]\n gene_name = tmp[0]\n t_table[fun_id] = gene_name\n return t_table", "def parse_theta_results(fname):\n with open(fname) as handle:\n header = next(handle).rstrip().split('\\t')\n body = next(handle).rstrip().split('\\t')\n assert len(body) == len(header) == 4\n\n # NLL\n nll = float(body[0])\n\n # mu\n mu = body[1].split(',')\n mu_normal = float(mu[0])\n mu_tumors = list(map(float, mu[1:]))\n\n # C\n copies = body[2].split(':')\n if len(mu_tumors) == 1:\n # 1D array of integers\n # Replace X with None for \"missing\"\n copies = [[int(c) if c.isdigit() else None\n for c in copies]]\n else:\n # List of lists of integer-or-None (usu. 2 x #segments)\n copies = [[int(c) if c.isdigit() else None\n for c in subcop]\n for subcop in zip(*[c.split(',') for c in copies])]\n\n # p*\n probs = body[3].split(',')\n if len(mu_tumors) == 1:\n # 1D array of floats, or None for \"X\" (missing/unknown)\n probs = [float(p) if not p.isalpha() else None\n for p in probs]\n else:\n probs = [[float(p) if not p.isalpha() else None\n for p in subprob]\n for subprob in zip(*[p.split(',') for p in probs])]\n return {\"NLL\": nll,\n \"mu_normal\": mu_normal,\n \"mu_tumors\": mu_tumors,\n \"C\": copies,\n \"p*\": probs}", "def read_target_file(filename, tdic, hasGT):\n passFirstLine=True\n with open(filename, 'r') as fh:\n if hasGT:\n for line in fh:\n if passFirstLine:\n passFirstLine = False\n continue\n tmp = line.rstrip('\\n').split(',')\n tdic[tmp[0]] = tmp[1]\n else:\n for line in fh:\n if passFirstLine:\n passFirstLine = False\n continue\n tmp = line.rstrip('\\n').split(',')\n tdic[tmp[0]] = -1", "def parse_lineage(tsv_filename, sample_names, allow_missing=True):\n\n samples = {}\n\n if file_is_missing(tsv_filename, allow_missing):\n for name in sample_names:\n samples[name] = { 'lineage' : None,\n 'clade': None,\n 'pangolin_ver': None,\n 'pangodata_ver': None,\n 'nextclade_ver': None }\n return { 'samples': samples }\n\n lineages = pd.read_table(tsv_filename, sep='\\t')\n try:\n df = lineages[['isolate',\n 'pango_lineage',\n 'nextstrain_clade',\n 'pangolin_version',\n 'pangoLEARN_version',\n 'nextclade_version'\n ]]\n except KeyError:\n df = lineages[['isolate',\n 'pango_lineage',\n 'nextstrain_clade',\n 'pangolin_version',\n 'version',\n 'nextclade_version'\n ]]\n\n # Pull each row, identify sid \n for row in df.itertuples():\n if row.isolate.startswith(\"Consensus\"):\n sid = re.findall(\"_(.*?)\\.\", row.isolate)[0]\n else:\n sid = str(row.isolate)\n\n assert sid in sample_names\n\n # Pull Pangolin lineage\n lineage = str(row.pango_lineage)\n clade = str(row.nextstrain_clade)\n pangolin = str(row.pangolin_version)\n try:\n pangodata = str(row.pangoLEARN_version)\n except AttributeError:\n pangodata = str(row.version)\n nextclade = str(row.nextclade_version)\n samples[sid] = { 'lineage' : lineage,\n 'clade': clade,\n 'pangolin_ver': pangolin,\n 'pangodata_ver': pangodata,\n 'nextclade_ver': nextclade }\n\n assert len(samples) == len(sample_names)\n return { 'samples': samples }", "def test_parse_hgts_rangerdtl(self):\n with open(self.rangerdtl_output_hgt_fp, 'r') as f:\n output = parse_hgts(f, 'ranger-dtl')\n self.assertEqual(int(output), 1)", "def test_parse_hgts_trex(self):\n with open(self.trex_output_hgt_fp, 'r') as f:\n output = parse_hgts(f, 'trex')\n self.assertEqual(int(output), 1)", "def annotate_tsv_freq(in_tsv_gz,annotation_tsv):\n sys.stderr.write(\"Reading TSV file ...\\n\")\n nicollo = pd.read_csv(BOLLI, sep=\"\\t\")\n nicollo = nicollo.iloc[:,[1,2,4,5,23]]\n nicollo_counts = nicollo.groupby(['CHR','START'])['MT'].count()\n nol_var = nicollo.drop(['WT','MT'], axis = 1) \n nol_var = nol_var.set_index(['CHR', 'START'])\n\n #nicollo_counts = nicollo.groupby([\"CHR\",\"START\",\"WT\",\"MT\"]).size().reset_index(name=\"count\")\n #nicollo_counts = nicollo_counts[[\"CHR\", \"START\",\"count\"]].set_index(['CHR','START'])\n\n mmrf = pd.read_csv('/ifs/res/leukgen/home/yellapav/MMRF/MMRF_CoMMpass_IA9_All_Canonical_Variants.txt', sep=\"\\t\")\n mmrf=mmrf.iloc[:,[0,1,2,4,5,19,23]]\n mmrf=mmrf.drop_duplicates()\n\n mmrfM=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].median()\n mmrfC=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].count()\n mmrfQ25=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].quantile(q=0.25)\n mmrfQ75=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].quantile(q=0.75)\n \n\n #anno_tsv = pd.read_csv(annotation_tsv, comment='#',sep=\"\\t\")\n anno_tsv = pd.read_csv(annotation_tsv, comment='#',sep=\"\\t\", low_memory=False)\n #anno_tsv[anno_tsv['FILTER'] == \"PASS\"]\n counts_tsv=anno_tsv.groupby([\"CHR\",\"START\",\"REF\",\"ALT\"]).size().reset_index(name=\"count\")\n counts_tsv=counts_tsv[[\"CHR\", \"START\",\"count\"]].set_index(['CHR','START'])\n counts_median=anno_tsv.groupby(['CHR','START'])['TARGET_VAF'].median()\n\n\n\n inFile = gzip.open(in_tsv_gz,'r')\n \n sys.stderr.write(\"Annotating ...\\n\")\n for record in inFile:\n record=record.decode(\"utf-8\")\n record=record.rstrip()\n recArr=record.split(\"\\t\")\n \n cl = [] \n freq = [] \n medVAF = [] \n Q25 = [] \n Q75 = [] \n positions = [] \n normal = \"0\" \n normalVAF = \"0\" \n bolli_cl = [] \n bolli_freq = [] \n bolli_positions = [] \n bolli_anno = [] \n flag = 0\n bolli_flag = 0\n if record.startswith(\"#\"):\n continue\n\n if recArr[0] == \"ID_VARIANT\":\n cl = \"MMRF_Class\"\n freq = \"MMRF_Frequency\"\n medVAF = \"MMRF_VAF\"\n Q25 = \"MMRF_Q25\"\n Q75 = \"MMRF_Q75\"\n positions = \"MMRF_Positions\"\n normal = \"Normals_Frequency\"\n normalVAF = \"Normals_median_VAF\"\n bolli_cl = \"Bolli_Class\"\n bolli_freq = \"Bolli_Frequency\"\n bolli_positions = \"Bolli_Positions\"\n bolli_anno = \"Bolli_Annotation\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions, bolli_cl, bolli_freq, bolli_anno, bolli_positions, normal, normalVAF ]\n record = (\"\\t\".join(record))\n print(record)\n continue\n\n try:\n chrom = str(recArr[3])\n pos = int(recArr[4])\n start = int(recArr[4]) - 9\n end = int(recArr[4]) + 9\n if (chrom, pos) in mmrfC.index:\n cl = \"genomic_exact\"\n freq = str(mmrfC.loc[(chrom,pos)])\n medVAF = str(mmrfM.loc[(chrom,pos)])\n Q25 = str(mmrfQ25.loc[(chrom,pos)])\n Q75 = str(mmrfQ75.loc[(chrom,pos)])\n positions = str(pos)\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n flag = 1\n if flag == 0:\n mmrfCsub=mmrfC.loc[chrom]\n if not mmrfCsub[(mmrfCsub.index >= start) & (mmrfCsub.index <= end)].empty:\n for i in mmrfCsub[(mmrfCsub.index >= start) & (mmrfCsub.index <= end)].index.values:\n cl = \"genomic_close\"\n freq.append(str(mmrfC.loc[(chrom,i)]))\n medVAF.append(str(mmrfM.loc[(chrom,i)]))\n Q25.append(str(mmrfQ25.loc[(chrom,i)]))\n Q75.append(str(mmrfQ75.loc[(chrom,i)]))\n positions.append(str(i))\n freq = (\":\".join(freq))\n medVAF = (\":\".join(medVAF))\n Q25 = (\":\".join(Q25))\n Q75 = (\":\".join(Q75))\n positions = (\":\".join(positions))\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n else:\n cl = \"NA\"\n freq = \"NA\"\n medVAF = \"NA\"\n Q25 = \"NA\"\n Q75 = \"NA\"\n positions = \"NA\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n\n\n except:\n cl = \"NA\"\n freq = \"NA\"\n medVAF = \"NA\"\n Q25 = \"NA\"\n Q75 = \"NA\"\n positions = \"NA\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n\n\n\n try:\n chrom = str(recArr[3])\n pos = int(recArr[4])\n start = int(recArr[4]) - 9\n end = int(recArr[4]) + 9\n if (chrom, pos) in nicollo_counts.index:\n bolli_cl = \"genomic_exact\"\n bolli_freq = str(nicollo_counts.loc[(chrom,pos)]) \n bolli_positions = str(pos)\n bolli_anno = str(nol_var.loc[chrom, pos]['Variant_class'].values[0])\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n bolli_flag = 1\n\n\n if bolli_flag == 0: \n nicollo_counts_sub=nicollo_counts.loc[chrom]\n if not nicollo_counts_sub[(nicollo_counts_sub.index >= start) & (nicollo_counts_sub.index <= end)].empty:\n for i in nicollo_counts_sub[(nicollo_counts_sub.index >= start) & (nicollo_counts_sub.index <= end)].index.values:\n #if not nicollo_counts_sub.ix[start:end].empty:\n # for i in nicollo_counts_sub.ix[start:end].index.values:\n #print(\"XXXXXXX\",i, nicollo_counts_sub.loc[(chrom,i)], start, end)\n bolli_cl = \"genomic_close\"\n bolli_freq.append(str(nicollo_counts.loc[(chrom,i)]))\n bolli_anno.append(str(nol_var.loc[(chrom,i)]['Variant_class'].values[0]))\n bolli_positions.append(str(i))\n bolli_freq = (\":\".join(bolli_freq))\n bolli_positions = (\":\".join(bolli_positions))\n bolli_anno = (\":\".join(bolli_anno))\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n else:\n bolli_cl = \"NA\"\n bolli_freq = \"NA\"\n bolli_positions = \"NA\"\n bolli_anno = \"NA\"\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n\n\n except:\n bolli_cl = \"NA\"\n bolli_freq = \"NA\"\n bolli_anno = \"NA\"\n bolli_positions = \"NA\"\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n\n\n normal = \"0\"\n normalVAF = \"0\"\n try:\n chrom=str(recArr[3])\n pos=int(recArr[4])\n normal = counts_tsv.loc[(chrom,pos),\"count\"]\n normal = normal.ix[0]\n normal = str(normal)\n\n normalVAF = str(counts_median.loc[(chrom,pos)])\n\n record = [ record, normal, normalVAF ]\n record = (\"\\t\".join(record))\n print(record)\n\n except:\n normal = \"0\"\n normalVAF = \"0\"\n record = [ record, str(normal), str(normalVAF) ]\n record = (\"\\t\".join(record))\n print(record)", "def get_taxa(taxa_fname, sample_ids_kept=None):\r\n # future: pass in open file object instead\r\n taxa_f = open(taxa_fname, 'U')\r\n\r\n sample_ids, otu_ids, otu_table, lineages =\\\r\n parse_otu_table(taxa_f, count_map_f=float, remove_empty_rows=True)\r\n if sample_ids_kept:\r\n sam_idxs = [sample_ids.index(sam) for sam in sample_ids_kept]\r\n otu_table = otu_table[:, sam_idxs]\r\n return otu_ids, otu_table", "def parse_perturbation_file(pert_file_path, perturbation_timepoint,perturbation_duration):\n\n perturbations_list = []\n\n if (pert_file_path != None):\n df = pd.read_csv(pert_file_path, sep = \"\\t\")\n\n headers_list = list(df)\n\n for index, row in df.iterrows():\n\n a_perturbation = {\"start\":perturbation_timepoint,\\\n \"end\":perturbation_timepoint + perturbation_duration}\n\n required_headers_checker = {\"params\" : False, \"values\" : False,\n \"update_mode\" : False, \"axes\" : False}\n\n for header in headers_list:\n\n header_lowercase = header.lower()\n\n if header_lowercase in (\"parameter\", \"parameters\", \"param\",\\\n \"params\"):\n required_headers_checker[\"params\"] = True\n params = row[header].split(\",\")\n\n elif header_lowercase in (\"value\", \"values\", \"val\", \"vals\"):\n required_headers_checker[\"values\"] = True\n values = str(row[header]).split(\",\")\n\n elif header_lowercase in (\"update_mode\", \"update_modes\",\\\n \"update mode\", \"update modes\"):\n required_headers_checker[\"update_mode\"] = True\n update_mode = row[header]\n\n elif header_lowercase in (\"axes\", \"axis\"):\n required_headers_checker[\"axes\"] = True\n axes = row[header].split(\",\")\n\n else:\n raise ValueError(\"Could not identify header name in \" + \\\n \"perturbations file\")\n\n missing_headers_error_message = \"\"\n for each_checker in required_headers_checker:\n if required_headers_checker[each_checker] == False:\n missing_headers_error_message += each_checker + \" \"\n if missing_headers_error_message != \"\":\n missing_headers_error_message = \"Missing the following \" +\\\n \"header(s): \" + missing_headers_error_message\n raise ValueError(missing_headers_error_message)\n\n if len(params) != len(values):\n raise ValueError(\"Number of parameters does not match the \" + \\\n \"number of values\")\n\n a_perturbation[\"params\"] = {}\n for idx, single_param in enumerate(params):\n a_perturbation[\"params\"][single_param] = float(values[idx])\n a_perturbation[\"update_mode\"] = update_mode\n a_perturbation[\"axes\"] = axes\n\n perturbations_list.append(a_perturbation)\n\n else:\n set_xyz_lambda_zero = {\"start\":perturbation_timepoint,\\\n \"end\":perturbation_timepoint + perturbation_duration,\\\n \"params\":{\"lambda\":0.000},\"update_mode\":\"replace\",\"axes\":[\"x\",\"y\",\"z\"]}\n\n perturbations_list.append(set_xyz_lambda_zero)\n\n return perturbations_list", "def test_parse_hgts_riatahgt(self):\n with open(self.riatahgt_output_hgt_fp, 'r') as f:\n output = parse_hgts(f, 'riata-hgt')\n self.assertEqual(int(output), 1)", "def parse_trex(input_f):\n\tstring = \"hgt : number of HGT(s) found = \"\n\tout_str = False\n\tfor line in input_f:\n\t\tif string in line:\n\t\t\tnumber_hgts = line.split(string)[1].strip()\n\t\t\tsys.stdout.write(number_hgts)\n\t\t\tout_str = True\n\tif not out_str:\n\t\tsys.stdout.write(\"NaN\")", "def parse_transcripts(transcript_lines):\n LOG.info(\"Parsing transcripts\")\n transcripts = parse_ensembl_transcripts(transcript_lines)\n\n # Since there can be multiple lines with information about the same transcript\n # we store transcript information in a dictionary for now\n parsed_transcripts = {}\n # Loop over the parsed transcripts\n for tx in transcripts:\n tx_id = tx[\"ensembl_transcript_id\"]\n ens_gene_id = tx[\"ensembl_gene_id\"]\n\n # Check if the transcript has been added\n # If not, create a new transcript\n if not tx_id in parsed_transcripts:\n tx_info = {\n \"chrom\": tx[\"chrom\"],\n \"transcript_start\": tx[\"transcript_start\"],\n \"transcript_end\": tx[\"transcript_end\"],\n \"mrna\": set(),\n \"mrna_predicted\": set(),\n \"nc_rna\": set(),\n \"ensembl_gene_id\": ens_gene_id,\n \"ensembl_transcript_id\": tx_id,\n }\n parsed_transcripts[tx_id] = tx_info\n\n tx_info = parsed_transcripts[tx_id]\n # Add the ref seq information\n if tx.get(\"refseq_mrna_predicted\"):\n tx_info[\"mrna_predicted\"].add(tx[\"refseq_mrna_predicted\"])\n if tx.get(\"refseq_mrna\"):\n tx_info[\"mrna\"].add(tx[\"refseq_mrna\"])\n if tx.get(\"refseq_ncrna\"):\n tx_info[\"nc_rna\"].add(tx[\"refseq_ncrna\"])\n\n return parsed_transcripts", "def read_maf(path, tumor_type):\n # figure out whether there is a comment line\n with open(path) as handle:\n first_line = next(handle)\n skip_rows = 1 if first_line.startswith('#') else 0\n\n # read in data frame\n df = pd.read_csv(path, sep='\\t', skiprows=skip_rows)\n\n # drop duplicate mutations\n #df['Tumor_Sample_Barcode_short'] = df['Tumor_Sample_Barcode'].str[:12]\n df['Tumor_Sample_Barcode_short'] = df['Tumor_Sample_Barcode'].apply(fix_samp_id)\n dup_cols = ['Tumor_Sample_Barcode_short', 'Hugo_Symbol', 'Chromosome',\n 'Start_Position', 'End_Position', 'Reference_Allele',\n 'Tumor_Seq_Allele2']\n df = df.drop_duplicates(dup_cols)\n df = df.rename(columns={'amino_acid_change': 'HGVSp_Short'})\n\n #####################\n # filter hypermutated samples based on definition\n # from Kandoth et al.\n #####################\n if opts['no_stratify']:\n strat_col = None\n else:\n strat_col = 'tumor_type'\n hypermut_samps, num_mut_list = mu.detect_hypermutators(opts['maf'],\n samp_colname='Tumor_Sample_Barcode',\n stratify_col=strat_col,\n mut_threshold=opts['mut_threshold'])\n df = df[~df['Tumor_Sample_Barcode'].isin(hypermut_samps)].copy()\n\n # keep only missense mutations\n df_mis = df[df['Variant_Classification']=='Missense_Mutation'].copy()\n\n # make variant ID column\n df_mis['ID'] = range(len(df_mis))\n df_mis['ID'] = tumor_type + df_mis['ID'].astype(str).copy()\n\n # fill in other variants with na's\n is_not_snv = (df_mis['Reference_Allele']=='-') | (df_mis['Tumor_Seq_Allele2']=='-')\n #df_mis.loc[df_mis['Variant_Type']!='SNP', 'HGVSp_Short'] = np.nan\n df_mis.loc[is_not_snv, 'HGVSp_Short'] = np.nan\n\n # get the mutation info\n df_mis['Reference Codon Position'] = df_mis['HGVSp_Short'].str[3:-1].copy()\n\n # fix small number of errors in HGVS syntax\n has_letter = df_mis['Reference Codon Position'].str.contains('[A-Za-z]').fillna(True)\n df_mis.loc[has_letter, 'Reference Codon Position'] = '-1'\n is_empty = df_mis['Reference Codon Position']==''\n df_mis.loc[is_empty, 'Reference Codon Position'] = '-1'\n\n # add mut info columns\n df_mis['Reference Codon Position'] = df_mis['Reference Codon Position'].astype(int).copy()\n df_mis['Reference AA'] = df_mis['HGVSp_Short'].str[2:3].copy()\n df_mis['Alternate AA'] = df_mis['HGVSp_Short'].str[-1].copy()\n\n # figure out whether 'chr' needs to be added\n df_mis['Chromosome'] = df_mis['Chromosome'].astype(str).copy()\n num_chr = df_mis['Chromosome'].str.startswith('chr').sum()\n if num_chr == 0:\n df_mis['Chromosome'] = 'chr'+df_mis['Chromosome'].astype(str).copy()\n\n # rename columns to what is expected\n rename_dict = {'Hugo_Symbol': 'HUGO symbol',\n 'Transcript_ID': 'Reference Transcript',\n 'Start_Position': 'Position',\n 'Tumor_Sample_Barcode': 'Sample ID',\n 'Reference_Allele': 'Reference base(s)',\n 'Tumor_Seq_Allele2': 'Alternate base(s)'}\n df_mis = df_mis.rename(columns=rename_dict)\n\n return df_mis", "def parse_gra(filename, delimiter='\\t'):\n with open(filename, 'r') as f:\n reader = csv.reader(f, delimiter=delimiter)\n l = list(reader)\n taxids = l[0]\n rel_abunds = map(float, l[1])\n errors = map(float, l[2])\n data = [{'rel_abund': r, 'error': e} for r, e in zip(rel_abunds, errors)]\n return dict(zip(taxids, data))", "def read_transcript_data(fn):\n\n def _read_lines(fn):\n # NC_000007.13\tRefSeq\tcDNA_match\t50344265\t50344518\t254\t+\t.\tID=aln58042;Target=NM_001220765.2 1 254 +;gap_count=0;identity=0.0691326;idty=1;num_ident=428;num_mismatch=0;pct_coverage=6.91326;pct_identity_gap=100;pct_identity_ungap=100;score=254\n # NC_000002.11 RefSeq cDNA_match 179671939 179672150 212 - . ID=ed951d46-194c-477a-a480-4bc64530c5ba;Target=NM_001267550.2 1 212 +;gap_count=0;identity=0.999991;idty=1;num_ident=109223;num_mismatch=1;pct_coverage=100;pct_identity_gap=99.9991;pct_identity_ungap=99.9991\n line_re = re.compile(\n \"(?P<ref_ac>\\S+)\\s+(?P<origin>\\S+)\\s+(?P<match_type>\\S+)\\s+\"\n \"(?P<g_start>\\d+)\\s+(?P<g_end>\\d+)\\s+(?P<score>\\S+)\\s+\"\n \"(?P<strand>[-+])\\s+\\.\\s+ID=(?P<aln>[^;]+);Target=(?P<tx_ac>\\S+)\"\n \"\\s+(?P<tx_start>\\d+)\\s+(?P<tx_end>\\d+).+?\"\n \"pct_coverage=(?P<pct_coverage>[^;]+);\"\n \"pct_identity_gap=(?P<pct_identity_gap>[^;]+);\"\n \"pct_identity_ungap=(?P<pct_identity_ungap>[^;]+)\"\n )\n fh = io.open(fn, \"rb\")\n while fh.peek(1)[0] == \"#\":\n fh.readline()\n while fh.peek(3)[0:3] != \"###\":\n line = fh.readline()\n try:\n yield line_re.match(line).groupdict()\n except AttributeError:\n raise Exception(\"Failed at\", line)\n raise StopIteration\n def _key(e):\n return (e[\"tx_ac\"], not e[\"ref_ac\"].startswith(\"NC_\"), e[\"ref_ac\"], e[\"aln\"])\n return itertools.groupby(sorted(_read_lines(fn), key=_key), key=_key)", "def test_parse_hgts_jane4(self):\n with open(self.jane4_output_hgt_fp, 'r') as f:\n output = parse_hgts(f, 'jane4')\n self.assertEqual(int(output), 1)", "def read_est_obs_file(self, tsv_filename):\n d = {}\n for index, line in enumerate(open(tsv_filename, 'rb')):\n chunks = line.replace('\\n', '').split('\\t')\n if index == 0:\n n1_pos = chunks.index('n1')\n n2_pos = chunks.index('n2')\n n3_pos = chunks.index('n3')\n \n s1_pos = chunks.index('s1')\n s2_pos = chunks.index('s2')\n s3_pos = chunks.index('s3')\n s12_pos = chunks.index('s12')\n s13_pos = chunks.index('s13')\n s23_pos = chunks.index('s23')\n s123_pos = chunks.index('s123')\n\n est_pos = chunks.index('est')\n obs_pos = chunks.index('obs')\n ratio_pos = chunks.index('pair_trip_ratio')\n else:\n triangle = (int(chunks[s1_pos]), int(chunks[s2_pos]), int(chunks[s3_pos]), int(chunks[s12_pos]), int(chunks[s13_pos]), int(chunks[s23_pos]), int(chunks[s123_pos]))\n d[(chunks[n1_pos], chunks[n2_pos], chunks[n3_pos])] = (float(chunks[est_pos]), float(chunks[obs_pos]), float(chunks[ratio_pos]), triangle)\n\n return d", "def process_transcripts(transcript_file, dict_of_transcripts, fpkm_threshold):\n dictionary_of_unique_transcripts = {}\n list_transcripts = dict_of_transcripts[transcript_file]\n for transcript in list_transcripts:\n exon_ids = ''\n for exon in transcript.exons:\n exon_ids += str(exon.start) + '-' + str(exon.end) + '.'\n\n transcript_unique_id = transcript.chromosome + '-' + exon_ids\n\n if transcript_unique_id not in dictionary_of_unique_transcripts and transcript.fpkm == fpkm_threshold:\n dictionary_of_unique_transcripts[transcript_unique_id] = transcript\n\n print 'Number of transcripts over the threshold ', fpkm_threshold, ' and are distinct:'\n print len(dictionary_of_unique_transcripts)\n return {\"list_transcripts\": list_transcripts,\n \"dictionary_of_unique_transcripts\": dictionary_of_unique_transcripts}", "def read_changes_tsv(tsv_file):\r\n changes = {}\r\n with open(tsv_file, 'r') as info_file:\r\n for info in info_file:\r\n split_info = info.strip().split('/t')\r\n changes[split_info[0]] = split_info[1]\r\n return changes", "def ddf_parser():\n num_available, total = 0, 0\n indicator_twn_tuples = list() # format of a single tuple: (indicator_name, #twn rows, earliest available year)\n concept_metadata = dict() # {top_tag: second_layer_tag:\n\n # parse all ddf files provided by GapMinder and find how many of them with Taiwan statistics\n for f_path in glob.glob(os.path.join('statistics', '*datapoints*.csv')):\n total += 1\n df = pd.read_csv(f_path)\n if 'twn' in df.geo.unique():\n num_available += 1\n indicator = f_path.replace('statistics/ddf--datapoints--', '').replace('--by--geo--time.csv', '')\n # print('[Indicator]', indicator)\n print(f\"\\t{len(df[df.geo == 'twn'])} indicators including Taiwan statistics.\")\n\n # stat_name = df.columns[-1]\n # df_p = df.pivot(index='geo', columns='time')[stat_name]\n # df_p.insert(loc=0, column='indicator', value=stat_name)\n # df_p.to_csv(f'statistics_transformed/{stat_name}.csv', sep=';')\n\n indicators.append(indicator)\n\n\n # print(\"{:.1f}% datapoints have Taiwan statistics\".format(num_available / float(total) * 100))\n\n\n\n df_c = pd.read_csv(CONCEPT_CSV_PATH)\n df_t = pd.read_csv(TAG_CSV_PATH)\n df = pd.merge(df_c, df_t, how='left', left_on='tags', right_on='tag')\n for idr, num_rows, earliest_year in indicator_twn_tuples:\n ancestors = list()\n\n row_values = df[df['concept'] == idr].values[0]\n name_catalog, parent, ancestor = (row_values[i] for i in [9, 17, 18])\n if type(parent) is str:\n ancestors.append(parent)\n\n # get ancestors recursively\n while type(ancestor) is str:\n tag_row_values = df_t[df_t['tag'] == ancestor].values[0]\n ancestors.append(tag_row_values[1])\n ancestor = tag_row_values[2]\n\n # build concept structure\n ancestors.insert(0, name_catalog)\n print('/'.join(ancestors[::-1]))", "def parse_ivar_variants(tsv_filename, allow_missing=True):\n\n if file_is_missing(tsv_filename, allow_missing):\n return { 'variants': [] }\n\n variants = []\n\n # Skip first line\n for line in open(tsv_filename).readlines()[1:]:\n t = line.split('\\t')\n assert (len(t) == 19) or (len(t) == 20) # added POS_AA column\n\n if t[3] != '':\n variants.append(f\"{t[2]}{t[1]}{t[3]}\")\n\n return { 'variants': variants }", "def _read_tsv(file_path):\n translation_pairs = []\n with file_path.open() as f:\n # Note: the correct way to do this is with csv.DictReader, but some examples\n # have quote characters that confuse the csv parser. Since we know the\n # source never has its own tab or newline characters, basic Python string\n # manipulation is fine here, as long as the model doesn't predict tabs or\n # newlines.\n for line in f:\n line = line.strip()\n line = line.split('\\t')\n if len(line) != 2:\n raise ValueError(\n f'Line {line} could not be parsed. You may need to manually '\n 'replace tab or newline characters in the model output with '\n 'spaces.'\n )\n source, translation = line\n translation_pairs.append(\n evaluation.TranslationPair(source=source, translation=translation)\n )\n return translation_pairs", "def load_tle(filename : str):\n\n num_elements = 0\n first_line = False\n\n elements_by_id = {}\n\n with open(filename) as f:\n for i, line in enumerate(f.readlines()):\n toks = line.strip().split()\n if toks[1][-1] == 'U':\n if first_line:\n raise ValueError(\"Unexpected first line: {}\".format(i))\n num_elements += 1\n first_line = True\n # Skipping the first line data for now...\n else:\n if not first_line:\n raise ValueError(\"Missing element first line: {}\".format(i))\n first_line = False\n id = toks[1]\n inclination_deg = float(toks[2])\n raan_deg = float(toks[3])\n eccentricity = int(toks[4])\n argument_of_perigee_deg = float(toks[5])\n mean_anomaly_deg = float(toks[6])\n mean_motion = float(toks[7][:11])\n\n elements_by_id[id] = [inclination_deg, raan_deg, eccentricity, \n argument_of_perigee_deg, mean_anomaly_deg, mean_motion]\n print(\"Loaded {} elements.\".format(num_elements))\n return elements_by_id", "def process_output_tsv(output_tsv, threshold=None, print_dict=False):\n\n # Set default\n if threshold is None:\n threshold = 0.95\n L = [] # list to capture results\n\n try:\n with open(output_tsv, newline='') as csvfile:\n file_reader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in file_reader:\n L.append(row[0])\n except FileNotFoundError:\n print('no file test_output.tsv')\n\n D = list2dict(L)\n if print_dict:\n pprint('Concordance output: {}'.format(D))\n\n # Convert relevant values in dict to floats.\n vals = [D['type']['SNP']['precision'],\n D['type']['SNP']['sensitivity'],\n D['type']['INDEL']['precision'],\n D['type']['INDEL']['sensitivity']]\n\n if not vals:\n return 1 # this line is sys.exit(0) in WDL\n else:\n vals = [float(val) for val in vals]\n\n # The next line is needed as we encountered NaNs in the output\n # after we ran Concordance with two identical inputs for truth and test.\n # The following lines removes NaNs from the list.\n vals = [x for x in vals if not math.isnan(x)]\n\n # Test whether all values pass the threshold test:\n if all(val >= threshold for val in vals):\n message = 'The VCFs can be considered identical.'\n print(message)\n return 0 # this line is sys.exit(0) in the WDL\n\n else:\n message = 'The VCFs do not have enough overlap.'\n print(message)\n return 1 # this line is sys.exit(0) in WDL", "def parse_ensembl_transcripts(lines):\n header = []\n LOG.info(\"Parsing ensembl transcripts from file\")\n for index, line in enumerate(lines):\n # File allways start with a header line\n if index == 0:\n header = line.rstrip().split(\"\\t\")\n # After that each line represents a transcript\n else:\n yield parse_ensembl_line(line, header)", "def parse():\n file = open(INPUT, 'r')\n\n expect_eff = False\n expect_vout = False\n\n eff_dict = {}\n vout_dict = {}\n\n for line in file:\n if line.startswith('PCC'):\n id = line.strip()\n expect_eff = True\n elif expect_eff:\n if line.startswith('efficiency'):\n eff_str = line.strip().split(':')[1]\n # get rid of % symbol\n eff = int(eff_str.split('%')[0])\n eff_dict[id] = .01 * eff\n\n expect_vout = True\n\n expect_eff = False\n elif expect_vout:\n if line.startswith('output voltage'):\n vout_str = line.strip().split(':')[1]\n vout = int(vout_str)\n vout_dict[id] = vout\n\n expect_vout = False\n\n with open(EFF_OUTPUT, 'w') as f:\n json.dump(eff_dict, f)\n\n with open(VOUT_OUTPUT, 'w') as f:\n json.dump(vout_dict, f)\n\n # plot stats of eff and vout\n plot_hist(eff_dict.values(), 'Efficiency', 'eff', bins=50)\n plot_hist(vout_dict.values(), 'V_out', 'vout', bins=50)", "def parse_geno_file(folder,return_flag):\n\n perc_alt = defaultdict(list)\n perc_ref = defaultdict(list)\n abs_alt = defaultdict(list)\n abs_ref = defaultdict(list)\n\n perc_alt_inv = defaultdict(dict)\n perc_ref_inv = defaultdict(dict)\n abs_alt_inv = defaultdict(dict)\n abs_ref_inv = defaultdict(dict)\n\n for geno_file in glob.glob(folder+'*_test_summary.tsv'):\n strain = geno_file.split('/')[-1].split('_')[0]\n #print strain\n prev_coordinate = \"0\"\n count = 0\n alt_allele = {}\n amb_allele = {}\n ref_allele = {}\n flag = 0 \n\n TEMP_HANDLE = open(geno_file,'r')\n for line in TEMP_HANDLE:\n line = line.rstrip('\\n')\n\n if(line[0]!='v'): ## Skip the header\n coordinate = line.split('\\t')[0].split('::')[-1]\n if(coordinate != prev_coordinate):\n #prev_coordinate = coordinate\n count = count + 1\n if(count == 1):\n if(line.split('\\t')[-3]!='alt'): ## No reads supporting the alternate allele\n flag = 1 \n alt_allele[coordinate] = 0\n amb_allele[coordinate] = int(line.split('\\t')[-1])\n #print line\n else:\n alt_allele[coordinate] = int(line.split('\\t')[-1])\n if(count == 2):\n amb_allele[coordinate] = int(line.split('\\t')[-1])\n if(count == 3):\n if(line.split('\\t')[-3]!='ref'): ## No reads supporting the reference allele (all are ambiguous)\n ref_allele[coordinate] = 0\n else:\n ref_allele[coordinate] = int(line.split('\\t')[-1])\n prev_coordinate = coordinate\n count = 0\n if(flag == 1): ## The case where there are no alternate allele reads, counter is incremented to account for changed numbering\n count = count + 1 \n flag = 0 \n\n \n for key in alt_allele:\n if(alt_allele[key]+ref_allele[key]!= 0): ## Check to see if the denominator is not zero\n abs_alt[strain].append(float(alt_allele[key]))\n abs_ref[strain].append(float(ref_allele[key]))\n perc_alt[strain].append(float(alt_allele[key])/(alt_allele[key]+ref_allele[key]))\n perc_ref[strain].append(float(ref_allele[key])/(alt_allele[key]+ref_allele[key]))\n\n\n abs_alt_inv[strain][key] = float(alt_allele[key])\n abs_ref_inv[strain][key] = float(ref_allele[key])\n perc_alt_inv[strain][key] = float(alt_allele[key])/(alt_allele[key]+ref_allele[key])\n perc_ref_inv[strain][key] = float(ref_allele[key])/(alt_allele[key]+ref_allele[key])\n \n \n\n ## Keep only the common inversions, i.e. those between MC and the rest \n all_inversions = []\n common_inversions = []\n abs_alt_set = defaultdict(list)\n perc_alt_set = defaultdict(list)\n\n abs_alt_inv_set = defaultdict(dict)\n perc_alt_inv_set = defaultdict(dict)\n abs_ref_inv_set = defaultdict(dict)\n perc_ref_inv_set = defaultdict(dict)\n\n Rock = ['AC', 'CL','CM','CN','TI','PN','MC']\n Sand = ['MZ','DC','LF','MP','MS','CV']\n\n\n sand_inversions = []\n rock_inversions = []\n\n for strain in abs_alt_inv.keys():\n for inversion in abs_alt_inv[strain].keys():\n if(strain in Rock):\n rock_inversions.append(inversion)\n else:\n sand_inversions.append(inversion)\n all_inversions.append(inversion)\n \n \n common_inversions_sand = Counter(sand_inversions)\n common_inversions_rock = Counter(rock_inversions)\n #count_sand = 0\n common_inversions = Counter(all_inversions)\n return_inversions = []\n \n \n #print common_inversions\n for inversion in common_inversions.keys():\n if(common_inversions[inversion]==13):\n return_inversions.append(inversion)\n for strain in abs_alt_inv.keys():\n abs_alt_set[strain].append(abs_alt_inv[strain][inversion])\n perc_alt_set[strain].append(perc_alt_inv[strain][inversion])\n\n abs_alt_inv_set[strain][inversion] = abs_alt_inv[strain][inversion]\n perc_alt_inv_set[strain][inversion] = perc_alt_inv[strain][inversion]\n abs_ref_inv_set[strain][inversion] = abs_ref_inv[strain][inversion]\n perc_ref_inv_set[strain][inversion] = perc_ref_inv[strain][inversion]\n\n\n for inversion in abs_alt_inv_set['MC']:\n alternate_allele_sum_rock = 0\n reference_allele_sum_rock = 0\n alternate_allele_sum_sand = 0\n reference_allele_sum_sand = 0 \n for strain in Rock:\n alternate_allele_sum_rock = alternate_allele_sum_rock + abs_alt_inv_set[strain][inversion]\n reference_allele_sum_rock = reference_allele_sum_rock + abs_ref_inv_set[strain][inversion]\n\n for strain in Sand:\n alternate_allele_sum_sand = alternate_allele_sum_sand + abs_alt_inv_set[strain][inversion]\n reference_allele_sum_sand = reference_allele_sum_sand + abs_ref_inv_set[strain][inversion]\n\n abs_alt_set['Rock'].append(alternate_allele_sum_rock)\n perc_alt_set['Rock'].append(float((alternate_allele_sum_rock)/(alternate_allele_sum_rock + reference_allele_sum_rock)))\n \n abs_alt_set['Sand'].append(alternate_allele_sum_sand)\n perc_alt_set['Sand'].append(float((alternate_allele_sum_sand)/(alternate_allele_sum_sand + reference_allele_sum_sand)))\n \n with open('log_file.txt','a') as LOG_FILE:\n if(float((alternate_allele_sum_rock)/(alternate_allele_sum_rock + reference_allele_sum_rock))>float(sys.argv[2]) or float((alternate_allele_sum_sand)/(alternate_allele_sum_sand + reference_allele_sum_sand))>float(sys.argv[2])):\n print >> LOG_FILE,inversion \n \n\n print \"Sand : \"+str(count_sand)\n\n if return_flag == True:\n #print len([abs_alt_inv_set,abs_ref_inv_set,perc_alt_inv_set,perc_ref_inv_set])\n return perc_alt_inv_set\n else:\n return [abs_alt_set,perc_alt_set]", "def getRelevantIDsAndProvedIDs(tsvFile): \r\n start = datetime.now()\r\n relevantIDs = []\r\n provedIDs = []\r\n with open(tsvFile, \"rb\") as tsvReader:\r\n itemReader = DictReader(tsvReader, delimiter='\\t', quotechar='\"')\r\n for i, item in enumerate(itemReader):\r\n item = {featureName:featureValue.decode('utf-8') \\\r\n for featureName,featureValue in item.items() \\\r\n if featureValue is not None}\r\n \r\n if item[\"is_blocked\"] == \"1\":\r\n relevantIDs.append( int(item[\"itemid\"]) )\r\n if item[\"is_proved\"] == \"1\":\r\n provedIDs.append( int(item[\"itemid\"]) ) \r\n\r\n if (i+1)%1000000 == 0:\r\n print(( \"%s\\t%s\"%((i+1),str(datetime.now() - start)) ))\r\n\r\n return relevantIDs, provedIDs", "def _parse_tx_infos(self, gtf_path):\n if os.path.exists('_tx_cache.bin'):\n with open('_tx_cache.bin', 'rb') as f:\n return pickle.load(f)\n result = []\n with gzip.open(gtf_path, 'rt') as f:\n for i, line in enumerate(f):\n if i % 1000 == 0:\n print('processed {}'.format(i), file=sys.stderr)\n if line.startswith('#'):\n continue\n if line.split('\\t', 3)[2] != 'transcript':\n continue\n record = GTFFeature.parse(line)\n if record.feature != 'transcript':\n continue\n result.append(\n TranscriptInfo(record.attrs['gene_id'],\n record.attrs['transcript_id'],\n record.attrs['transcript_type'],\n record.seqname,\n record.start,\n record.end))\n with open('_tx_cache.bin', 'wb') as g:\n pickle.dump(result, g)\n print(len(result), file=sys.stderr)\n return result" ]
[ "0.576154", "0.5523585", "0.54649943", "0.54580283", "0.5412821", "0.5332087", "0.52648944", "0.52553064", "0.52298635", "0.5176947", "0.5138724", "0.5133539", "0.5120489", "0.5109864", "0.50941163", "0.5062895", "0.5057512", "0.50502867", "0.5025302", "0.50223887", "0.5010852", "0.49921796", "0.49793795", "0.49685413", "0.4965468", "0.49393257", "0.49334747", "0.491892", "0.49184442", "0.4907762" ]
0.62047464
0
given a list of cellNames and a list of transcript > count dictionaries, write out a matrix with transcript > counts in columns
def outputBigMatrix(cellNames, results, outFname, isGene=False): logging.info("Writing data to file %s" % outFname) ofh = open(outFname, "w") # write header if isGene: ofh.write("#gene\t%s\n" % "\t".join(cellNames)) else: ofh.write("#transcript\t%s\n" % "\t".join(cellNames)) # create a sorted list of all transcript names logging.info("Getting transcript IDs") allTrans = set() for res in results: allTrans.update(res) allTrans = list(allTrans) allTrans.sort() # write out matrix logging.info("Iterating over transcript IDs and writing to tab file") for trans in allTrans: ofh.write("%s\t" % trans) row = [] for countDict in results: row.append(str(countDict.get(trans, 0))) ofh.write("\t".join(row)) ofh.write("\n") ofh.close() # also output as a binary file for now # it's a lot easier and faster to parse, at least for python scripts # can be read from python with a single line: # matrix = marshal.load(open("data.tab.marshal")) # matrix is then a nested hash: cellName -> transcript -> count binPath = outFname+".marshal" logging.info("Writing %s" % binPath) allData = {} for name, transDict in zip(cellNames, results): allData[name] = transDict marshal.dump(allData, open(binPath, "wb"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_count_matrix(filename, output_dir):\n\n import os\n import json\n\n word_tag_output = \"tag_word_count.json\"\n bigram_matrix_name = \"bigram_count.json\"\n unigram_matrix_name = \"unigram_count.json\"\n trigram_matrix_name = \"trigram_count.json\"\n\n sub_dir = os.path.join(output_dir, \"count_matrix/\")\n if not os.path.exists(sub_dir):\n os.mkdir(sub_dir)\n\n word_tag_matrix = get_tag_word_matrix(filename)\n with open(sub_dir + word_tag_output, \"w\") as f:\n json.dump(word_tag_matrix, f)\n\n unigram_matrix = get_tag_n_gram(n=1, filename=filename)\n with open(sub_dir + unigram_matrix_name, \"w\") as f:\n json.dump(unigram_matrix, f)\n\n bigram_matrix = get_tag_n_gram(n=2, filename=filename)\n with open(sub_dir + bigram_matrix_name, \"w\") as f:\n json.dump(bigram_matrix, f)\n\n trigram_matrix = get_tag_n_gram(n=3, filename=filename)\n with open(sub_dir + trigram_matrix_name, \"w\") as f:\n json.dump(trigram_matrix, f)", "def write_count_table(filtered_counts, gene_names, ids_liver_header):\n with open(\"filtered_tcga_counts.tsv\", \"w\") as file:\n file.write(\"gene_id\\tgene_name\\t\" + \"\\t\".join(ids_liver_header) + \"\\n\")\n for gene_name, counts_line in zip(gene_names, filtered_counts):\n file.write(gene_name + \"\\t\" + \"\\t\" + \"\\t\".join(counts_line) + \"\\n\")", "def write_count_matrix(pb_count, outfile, first=1):\n # write the header (PB names)\n print(\" \" + \"\".join([\"%6s\" % name for name in NAMES]), file=outfile)\n # write the data table\n for residue_idx, residue_pb in enumerate(pb_count):\n print(\"%-5d\" % (residue_idx + first) +\n \" \".join(\"%5d\" % i for i in residue_pb), file=outfile)", "def append_counting(dict):\n row_c = []\n # for nuc in NUC: #Scans all the elements and adds it to the table.\n # row_c.append(dict[nuc])\n for mot in MOT:\n row_c.append(dict[mot])\n for nuc_nr in NUC_NR :\n row_c.append(dict[nuc_nr + \"_NR\"])\n # #row.extend([dict[\"AA_NR\"], dict[\"TT_NR\"], dict[\"CC_NR\"], dict[\"GG_NR\"]])\n return row_c", "def accumulate_entries_as_tables(entries):\n name_table = {}\n num_table = {}\n for number, name in entries:\n name_table[name] = number\n num_table[number] = name\n\n return name_table, num_table", "def frequencyTable (dnaList):\n n = max([len(dna) for dna in dnaList])\n frequency_matrix = {\n 'A': [0]*n,\n 'C': [0]*n,\n 'G': [0]*n,\n 'T': [0]*n,\n }\n for dna in dnaList:\n for index , base in enumerate(dna):\n frequency_matrix[base][index] += 1\n\n return frequency_matrix;", "def getReadCounts( counts, htList ):\n for ht in htList:\n htseqName = ht\n \n # calculate the total number of aligned reads \n totalReads = 0\n alignedReads = 0\n \n # sum read counts\n with open(htseqName,'r') as htseq:\n for x in htseq:\n x = x.strip() # get rid of that pesky newline\n row = x.split('\\t')\n totalReads += float(row[1])\n if x.startswith('__'):\n continue\n else:\n alignedReads += float(row[1])\n \n percentAligned = (alignedReads/totalReads) * 100\n counts[ht].append(totalReads)\n counts[ht].append(alignedReads)\n counts[ht].append(percentAligned)", "def calculate_2mer_freq(counts_file):\n count_matrix = dict()\n\n with open(counts_file, \"r\", newline=\"\") as handle:\n records = csv.reader(handle, delimiter=\"\\t\")\n next(records)\n for row in records:\n nuc1 = str(row[0])\n nuc2 = str(row[1])\n count = int(row[2])\n\n left = \"x{}\".format(nuc2)\n right = \"{}x\".format(nuc1)\n\n count_matrix.setdefault(nuc1, dict())[left] = count\n count_matrix.setdefault(nuc2, dict())[right] = count\n\n lines = \"\"\n header = \"\"\n for ref, d in count_matrix.items():\n lines += ref\n for other in sorted(d.keys()):\n lines += \"\\t\" + str(d[other])\n lines += \"\\n\"\n header = \"x\\t{}\\n\".format(\"\\t\".join(sorted(d.keys())))\n print(header + lines)", "def generate_cell_tsv():\n\n h5_in_path = INPUT_FILE_PATH.replace(\".bed.gz\", \".h5\") \\\n .replace(OVERLAP_PATH, REFERENCE_PATH + PAS_DATASET + \"/centered/\")\n with h5.File(h5_in_path, 'r') as h5_in:\n cell_ids = list(h5_in['cells'])\n utr_lengths = list(h5_in['utrs'])\n cluster_lengths = list(h5_in['cluster_utrs'])\n trajectory_lengths = list(h5_in['traj_utrs'])\n subtrajectory_lengths = list(h5_in['subtraj_utrs'])\n age_lengths = list(h5_in['age_utrs'])\n\n with open(REFERENCE_PATH + \"names_by_id.pkl\", 'rb') as names_in:\n cell_names = pkl.load(names_in)[0]\n tsv_out_path = INPUT_FILE_PATH.replace(\".bed.gz\", \".tsv\") \\\n .replace(OVERLAP_PATH, REFERENCE_PATH + PAS_DATASET + \"/tsv/\")\n with open(tsv_out_path, 'wt') as cell_data_out:\n cell_count = 0\n cell_utrs = []\n cell_utrs_cluster = []\n cell_utrs_trajectory = []\n cell_utrs_subtrajectory = []\n cell_utrs_age = []\n for idx, cell_id in enumerate(cell_ids):\n cell_count += 1\n cell_utr = utr_lengths[idx]\n cell_utr_cluster = cluster_lengths[idx]\n cell_utr_trajectory = trajectory_lengths[idx]\n cell_utr_subtrajectory = subtrajectory_lengths[idx]\n cell_utr_age = age_lengths[idx]\n cell_utrs.append(cell_utr)\n cell_utrs_cluster.append(cell_utr_cluster)\n cell_utrs_trajectory.append(cell_utr_trajectory)\n cell_utrs_subtrajectory.append(cell_utr_subtrajectory)\n cell_utrs_age.append(cell_utr_age)\n # Executes on the last cell group of the entire list or when a new cell group is on the next line.\n if idx + 1 == len(cell_ids) or cell_ids[idx + 1] != cell_id:\n cell_utr_mean = str(np.mean(cell_utrs))\n # Sets approved gene UTR means to 'NA' if cell has no reads from approved genes.\n # Otherwise this will set the approved gene UTR to the mean of only approved gene statistics.\n # Sets approved cell UTR means to 'NA' if the cells aren't in approved groups.\n cell_utr_cluster_mean = str(np.mean(cell_utrs_cluster))\n cell_utr_trajectory_mean = str(np.mean(cell_utrs_trajectory))\n cell_utr_subtrajectory_mean = str(np.mean(cell_utrs_subtrajectory))\n cell_utr_age_mean = str(np.mean(cell_utrs_age))\n cell_name = cell_names[cell_id]\n cell_data = CELL_DATA_DICT[cell_name]\n cell_age = cell_data[0]\n cell_subcluster = cell_data[2] + \".\" + cell_data[5]\n cell_data_used = [cell_data[2], cell_data[3], cell_data[4], cell_subcluster, cell_data[6], cell_data[7],\n cell_data[8], cell_data[9], cell_data[10], cell_data[11], cell_data[16],\n cell_data[13], cell_data[14], cell_data[15], cell_age, cell_utr_mean,\n cell_utr_cluster_mean, cell_utr_trajectory_mean, cell_utr_subtrajectory_mean,\n cell_utr_age_mean, cell_data[20], cell_count]\n cell_data_str = '\\t'.join(cell_data_used) + '\\n'\n cell_data_out.write(cell_data_str)\n # Resets cell data for next line.\n cell_utrs = []\n cell_utrs_cluster = []\n cell_utrs_trajectory = []\n cell_utrs_subtrajectory = []\n cell_utrs_age = []\n cell_count = 0\n\n print(\"Cell tsv generated!\")", "def tabler(subcorpus_names, list_of_dicts, num_rows):\n import pandas as pd\n cols = []\n for subcorp, data in zip(subcorpus_names, list_of_dicts):\n col = pd.Series([w for w, v in data.most_common(num_rows)], name = subcorp)\n cols.append(col)\n word_table = pd.concat(cols, axis = 1)\n return word_table", "def call_cells(df_reads):\n cols = [WELL, TILE, CELL]\n s = (df_reads\n .drop_duplicates([WELL, TILE, BLOB])\n .groupby(cols)[BARCODE]\n .value_counts()\n .rename('count')\n .sort_values(ascending=False)\n .reset_index()\n .groupby(cols)\n )\n\n return (df_reads\n .join(s.nth(0)[BARCODE].rename(BARCODE_0), on=cols)\n .join(s.nth(0)['count'].rename(BARCODE_COUNT_0).fillna(0), on=cols)\n .join(s.nth(1)[BARCODE].rename(BARCODE_1), on=cols)\n .join(s.nth(1)['count'].rename(BARCODE_COUNT_1).fillna(0), on=cols)\n .join(s['count'].sum() .rename(BARCODE_COUNT), on=cols)\n .drop_duplicates(cols)\n [[WELL, TILE, CELL, BARCODE_0, BARCODE_COUNT_0, BARCODE_1, BARCODE_COUNT_1]]\n )", "def get_tag_word_matrix(filename):\n\n count_matrix = {}\n with open(filename) as f:\n for l in f:\n line = l.strip()\n if not line:\n continue\n\n # Reversing the position of word and tag\n key = \" \".join(line.split()[::-1])\n if key not in count_matrix.keys():\n count_matrix[key] = 1\n else:\n count_matrix[key] += 1\n\n return count_matrix", "def write_matrix_to_h5(output_file: str,\n gene_names: np.ndarray,\n barcodes: np.ndarray,\n inferred_count_matrix: sp.csc.csc_matrix,\n cell_barcode_inds: Union[np.ndarray, None] = None,\n ambient_expression: Union[np.ndarray, None] = None,\n rho: Union[np.ndarray, None] = None,\n phi: Union[np.ndarray, None] = None,\n z: Union[np.ndarray, None] = None,\n d: Union[np.ndarray, None] = None,\n p: Union[np.ndarray, None] = None,\n loss: Union[Dict, None] = None) -> bool:\n\n assert isinstance(inferred_count_matrix,\n sp.csc_matrix), \"The count matrix must be csc_matrix \" \\\n \"format in order to write to HDF5.\"\n\n assert gene_names.size == inferred_count_matrix.shape[1], \\\n \"The number of gene names must match the number of columns in the count\" \\\n \"matrix.\"\n\n assert barcodes.size == inferred_count_matrix.shape[0], \\\n \"The number of barcodes must match the number of rows in the count\" \\\n \"matrix.\"\n\n # This reverses the role of rows and columns, to match CellRanger format.\n inferred_count_matrix = inferred_count_matrix.transpose().tocsc()\n\n # Write to output file.\n try:\n with tables.open_file(output_file, \"w\",\n title=\"Background-subtracted UMI counts\") as f:\n\n # Create the group where data will be stored.\n group = f.create_group(\"/\", \"background_removed\",\n \"Counts after background correction\")\n\n # Create arrays within that group for barcodes and gene_names.\n f.create_array(group, \"gene_names\", gene_names)\n f.create_array(group, \"genes\", np.arange(gene_names.size)) # For compatibility, added post PR\n f.create_array(group, \"barcodes\", barcodes)\n\n # Create arrays to store the count data.\n f.create_array(group, \"data\", inferred_count_matrix.data)\n f.create_array(group, \"indices\", inferred_count_matrix.indices)\n f.create_array(group, \"indptr\", inferred_count_matrix.indptr)\n f.create_array(group, \"shape\", inferred_count_matrix.shape)\n\n # Store background gene expression, barcode_inds, z, d, and p.\n if cell_barcode_inds is not None:\n f.create_array(group, \"barcode_indices_for_latents\",\n cell_barcode_inds)\n if ambient_expression is not None:\n f.create_array(group, \"ambient_expression\", ambient_expression)\n if z is not None:\n f.create_array(group, \"latent_gene_encoding\", z)\n if d is not None:\n f.create_array(group, \"latent_scale\", d)\n if p is not None:\n f.create_array(group, \"latent_cell_probability\", p)\n if rho is not None:\n f.create_array(group, \"contamination_fraction_params\", rho)\n if phi is not None:\n f.create_array(group, \"overdispersion_params\", phi)\n if loss is not None:\n f.create_array(group, \"training_elbo_per_epoch\",\n np.array(loss['train']['elbo']))\n\n logging.info(f\"Succeeded in writing output to file {output_file}\")\n\n return True\n\n except Exception:\n logging.warning(f\"Encountered an error writing output to file \"\n f\"{output_file}. \"\n \"Output may be incomplete.\")\n\n return False", "def write_counts(outfile, counts, search):\n\t\n\tcombinations = get_all_counts(counts)\n\t\n\tcounts_df = pd.DataFrame(combinations, columns = [set['name'] for set in search] + ['count'])\n\t\n\tcounts_df.to_csv(outfile, index=False, sep = '\\t')", "def partitioner(mappings):\n\t\n\ttoken_counts = defaultdict(list)\n\t\n\tfor sublist in mappings:\n\t\tfor t, c in sublist:\n\t\t\ttoken_counts[t].append(c)\n\t\t\t\n\treturn token_counts", "def column_creator(path):\n if not os.path.exists(path+'tables'):\n os.makedirs(path+'tables')\n\n\n # Sequences\n if os.path.exists(path+'SEQ.txt'):\n with open(os.path.join(path+'SEQ.txt')) as f1, open(os.path.join(path+'tables/sequences_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n # Modifications\n if os.path.exists(path + 'modifications.txt'):\n\n with open(os.path.join(path+'modifications.txt')) as f1, open(os.path.join(path+'tables/modifications_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n # Spectrum identify:\n if os.path.exists(path + 'spectrum_identify.txt'):\n\n with open(os.path.join(path+'spectrum_identify.txt')) as f1, open(path+'tables/spectrum_ide_table.txt', 'a') as f3:\n lines1 = f1.read().count('\\n')\n f3.write(\"%s\\n%s\\n\" % (\"Spectrum Number\",lines1))\n f1.close()\n f3.close()\n\n if os.path.exists(path + 'spectrum_unidentify.txt'):\n with open(os.path.join(path+'spectrum_unidentify.txt')) as f2, open(path+'tables/spectrum_unide_table.txt', 'a') as f3:\n lines2 = f2.read().count('\\n')\n f3.write(\"%s\\n%s\\n\" % (\"Spectrum Number\",lines2))\n f2.close()\n f3.close()\n\n if os.path.exists(path+'taxonomy_identify.txt'):\n # Taxonomy ide:\n with open(os.path.join(path+'taxonomy_identify.txt')) as f1, open(os.path.join(path+'tables/taxonomy_ide_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n\n if os.path.exists(path + 'taxonomy_unidentify.txt'):\n # Taxonomy unide:\n with open(os.path.join(path+'taxonomy_unidentify.txt')) as f1, open(os.path.join(path+'tables/taxonomy_unide_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()", "def process_metadata(metadata, cell_names):\n cols = [c for c in metadata.columns if 'ontology term' not in c.lower()]\n metadata = metadata[cols] # Drop columns with ontology terms\n \n metadata = metadata.rename(columns=lambda x: re.sub(r'.+\\[(.+)\\]',r'\\1',x)) # Rename columns\n \n metadata = metadata.loc[:,~metadata.columns.duplicated()] # Drop duplicated columns\n\n # Delete cells that are not in the matrix\n metadata = pd.merge(\n cell_names,\n metadata,\n how=\"inner\",\n on='Assay'\n )\n \n return metadata", "def produce_mirna_single_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.single.bam.mirbase_counts.txt\")", "def exp_calculator_with_count(count_table_file):\n count_table = pd.read_table(count_table_file, index_col=0)\n columns = count_table.columns\n\n gene_len = count_table[columns[0]]\n rpkm_dict = dict()\n tpm_dict = dict()\n for sample in columns[1:]:\n # Divide the read counts by the length of each gene in kilobases.\n # This gives you reads per kilobase (RPK)\n rpk = count_table[sample]/gene_len\n # get rpkm/fpkm\n total_counts = sum(count_table[sample])/1000\n \"\"\"\n rpkm = (count_table[sample]/gene_len)/(sum(count_table[sample])/1000)*1000000\n \"\"\"\n rpkm = rpk/total_counts*1000000\n # get tpm\n norm_gene_len_total_counts = sum(rpk)\n tpm = rpk/norm_gene_len_total_counts*1000000\n \"\"\"\n tpm = (count_table[sample]/gene_len)/sum(count_table[sample]/gene_len)*1000000\n \"\"\"\n # save\n rpkm_dict[sample] = rpkm\n tpm_dict[sample] = tpm\n # save results\n df_rpkm = pd.DataFrame(rpkm_dict, index=count_table.index)\n df_tpm = pd.DataFrame(tpm_dict, index=count_table.index)\n df_rpkm.to_csv(count_table_file+'.fpkm.xls', sep='\\t')\n df_tpm.to_csv(count_table_file+'.tpm.xls', sep='\\t')\n #\n return rpkm_dict, tpm_dict", "def count_nucleotides(mat):\n\n final_counts = np.ones((4, mat.shape[1]))\n\n for i in range(len(mat[0, :])):\n cur_nucleotides = np.ones((4, 1))\n a_count = 0\n c_count = 0\n g_count = 0\n t_count = 0\n for j in range(len(mat[:, 0])):\n if mat[j, i] == 'A':\n a_count = a_count + 1\n elif mat[j, i] == 'C':\n c_count = c_count + 1\n elif mat[j, i] == 'G':\n g_count = g_count + 1\n elif mat[j, i] == 'T':\n t_count = t_count + 1\n cur_nucleotides = np.array([a_count, c_count, g_count, t_count])\n final_counts[:, i] = cur_nucleotides\n return final_counts", "def _od_offsets_matrix(file_names, offsets_dict, test_files):\n string = ''\n string += _SEPARATOR.join(['name'] + file_names) + '\\n'\n string += _SEPARATOR.join(['repeats'] + ['%d' % test_files[name]['repeats'] for name in file_names]) + '\\n'\n string += _SEPARATOR.join(['size'] + ['%d' % len(test_files[name]['data']) for name in file_names]) + '\\n'\n for subs in _od_substrings(offsets_dict):\n string += _SEPARATOR.join([H(subs)] + ['%d' % len(offsets_dict[name][subs]) for name in file_names]) + '\\n'\n return string", "def computeWordMatrix( Docs, Keywords ) :\n\n w2vec_count = CountVectorizer( ngram_range=(1, 4), vocabulary=Keywords )\n X_Count = w2vec_count.fit_transform( Docs )\n\n return X_Count", "def writeOutFileBarcodeCounts(barcode_dict_summary, outFileName):\n with gzip.open(outFileName, 'wb') as out_file:\n for barcode in barcode_dict_summary:\n out_file.write(barcode)\n out_file.write(\"\\t\" + \"\\t\".join(map(str,barcode_dict_summary[barcode])))\n out_file.write(\"\\n\")", "def buildWeightMatrix(seqsToScore):\n\t# initialize with pseudocounts at each position\n\twmat = []\n\tfor i in range(0, motifWidth):\n\t\twmat.append({\"A\": 1, \"C\": 1, \"G\": 1, \"T\": 1})\n\t# loop through all motifs, add 1 to appropriate position and nt in wmat\n\tfor s in seqsToScore:\n\t\tfor j in range(0, motifWidth):\n\t\t\twmat[j][s.getMotif()[j]]+=1\t\t\n\t# normalize counts\n\tfor i in range(0, motifWidth):\n\t\ttotCounts = float(sum(wmat[i].values()))\n\t\tfor nt in wmat[i]:\n\t\t\twmat[i][nt] = wmat[i][nt]/totCounts\n\t\t\t\t\n\treturn wmat", "def build_matrix(df,idx):\n nrows = df.shape[0]\n ncols = len(idx)\n \n nnz = 0\n for index, row in df.iterrows():\n rowValue = row['Tags'].strip(\"'<>() \").replace('\\'', '\\\"')\n rowValueToList = json.loads(rowValue)\n\n tagsList = []\n\n for tags in rowValueToList:\n tags = tags.strip()\n tagsList += tags\n nnz += len(set(tagsList))\n \n # set up memory\n ind = np.zeros(nnz, dtype=np.int)\n val = np.zeros(nnz, dtype=np.double)\n ptr = np.zeros(nrows+1, dtype=np.int)\n i = 0 # document ID / row counter\n n = 0 # non-zero counter\n # transfer values\n for index, row in df.iterrows():\n rowValue = row['Tags'].strip(\"'<>() \").replace('\\'', '\\\"')\n rowValueToList = json.loads(rowValue)\n\n tagsList = []\n\n for tags in rowValueToList:\n tags = tags.strip()\n\n for tag in tags.split():\n tag = porter.stem(tag) #Stem the tag\n if tag in idx: #Remove the stopwords\n if len(tag) > 2: \n tagsList.append(tag)\n\n cnt = Counter(tagsList)\n keys = list(k for k,_ in cnt.most_common())\n l = len(keys)\n for j,k in enumerate(keys):\n if(k in idx):\n ind[j+n] = idx[k]\n val[j+n] = cnt[k]\n # else:\n # print(\"Vocabulary Not Found\",k)\n ptr[i+1] = ptr[i] + l\n n += l\n i += 1\n \n mat = csr_matrix((val, ind, ptr), shape=(nrows, ncols), dtype=np.double)\n mat.sort_indices()\n return mat", "def extract_number_target_genes(d, g, miranda_fnm):\n\toutput_nm = \"%s_counts.txt\"%(miranda_fnm[:-4])\n\toutput = open(output_nm, 'w')\n\toutput.write(\"miRNA\\ttotal_target_genes\\ttarget_genes_down\\ttarget_genes\\n\")\n\tfor key in d:\n\t\tif len(d[key]) > 0:\n\t\t\t#print key, len(d[key])\n\t\t\toutput.write(\"%s\\t%s\\t%s\\t%s\\n\"%(key, len(d[key]), \n\t\t\t\tlen(g[key]), str(g[key]).strip('[]')))\n\n\toutput.close()", "def produce_mirna_unique_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.uniq.bam.mirbase_counts.txt\")", "def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri", "def count_transitions_and_emissions(K, D, x, z): \n trans_matrix = [ [ 0 for i in range(K) ] for j in range(K) ]\n emi_matrix = [ [ 0 for i in range(D) ] for j in range(K) ] \n \n print(\"Started counting transitions\")\n for i in range(len(z)-1):\n trans_matrix[z[i]][z[i+1]] += 1 \n\n print(\"Started counting emissions\")\n size_x = len(x)\n size_z = len(z)\n for i,_ in enumerate(zip(x,z)):\n emi_matrix[z[i]][x[i]] += 1\n \n return trans_matrix,emi_matrix", "def cellranger_counts(fname, genome=\"matrix\"):\n with tables.open_file(fname, \"r\") as f:\n try:\n group = f.get_node(f.root, genome)\n except tables.NoSuchNodeError:\n print(\"That genome does not exist in this file.\")\n return None\n gene_ids = getattr(group, \"features/id\").read()\n barcodes = getattr(group, \"barcodes\").read()\n data = getattr(group, \"data\").read()\n indices = getattr(group, \"indices\").read()\n indptr = getattr(group, \"indptr\").read()\n shape = getattr(group, \"shape\").read()\n\n matrix = sp_sparse.csc_matrix((data, indices, indptr), shape=shape)\n gene_ids = np.array([x.decode() for x in gene_ids])\n barcodes = np.array([x.decode().replace(\"-1\", \"\") for x in barcodes])\n\n return CellRangerCounts(matrix, gene_ids, barcodes)" ]
[ "0.6228252", "0.5873816", "0.58383214", "0.57617635", "0.54583514", "0.54458195", "0.54176295", "0.54138976", "0.53219235", "0.5272027", "0.5245677", "0.5184741", "0.5132777", "0.5129691", "0.51268375", "0.5118446", "0.511654", "0.5109872", "0.510377", "0.50655836", "0.5040904", "0.5037385", "0.50343615", "0.50272906", "0.50201637", "0.5001002", "0.49981156", "0.49773377", "0.49551094", "0.4947941" ]
0.63610333
0
given a list of dict transcript > tpm, and a map transcript > gene, map all transcripts to genes and return a list of gene > sum of tpms If we have no gene ID, drop the transcript entirely.
def sumTransToGene(transDictList, transFile): transToGene = parseDict(transFile, stripDot=True) logging.info("Mapping %d transcript IDs to gene IDs" % len(transToGene)) newRes = [] noMapTransIds = set() for transCounts in transDictList: geneCounts = defaultdict(float) for transId, count in transCounts.iteritems(): transId = transId.split(".")[0] geneId = transToGene.get(transId) if geneId is None: noMapTransIds.add(transId) else: geneCounts[geneId]+=count newRes.append(dict(geneCounts)) logging.info("no gene ID found for %d transcript IDs. These are probably scaffolds/patches/etc. Example IDs: %s" % (len(noMapTransIds), list(noMapTransIds)[:5])) return newRes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def concatenate_GO_TPM_data(self, TPM_dict, *filtered_GO_dicts):\n\n dictionary = {}\n for i in filtered_GO_dicts:\n tmp_dict = {}\n for k, v in i.iteritems():\n tmp_dict[k] = map(\n lambda x: x + ':{0}'.format(TPM_dict[k] / len(v)), v\n )\n if i == go_cc:\n dictionary['go_tpm_cc'] = tmp_dict\n elif i == go_bp:\n dictionary['go_tpm_bp'] = tmp_dict\n else:\n dictionary['go_tpm_mf'] = tmp_dict\n return(\n dictionary['go_tpm_cc'], dictionary['go_tpm_bp'],\n dictionary['go_tpm_mf']\n )", "def stampaGTFEsIn(dictTranscript, dictGenes, dictInput, fileOut, geneNames):\n\n\tstringaGTF \t\t\t\t= \t\t'%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n'\t\t\t\t\t# Formato della riga da stampare nel file\n\texonF\t\t\t\t\t= \t\t'exon_number \"%d\"'\t\t\t\t\t\t\t# Formato della stringa di tipo exon (True)\n\tintronF\t\t\t\t\t=\t\t'intron_number \"%d\"'\t\t\t\t\t\t# Formato della stringa di tipo intron (False)\n\t\n\t# Indici all'interno del dizionario dei transcript\n\t#\n\tidx_transcriptName = 0\n\tidx_geneID = 1\n\t\n\t# Indici all'interno del dizionari dei geni\n\t#\n\tidx_geneName = 0\n\tidx_cromosoma = 1\n\n\t# Indici all'interno del dizionario degli introni e degli esoni\n\t#\n\tidx_start = 0\n\tidx_end = 1\n\tidx_tipo = 2\t\n\n\t# Tipo di regioni\n\tesone = True\n\tintrone = False\n\n\n\t# Apertura e preparazione dei file da scrivere (un file gtf con\n\t# esoni/introni per ogni gene e uno totale con tutte le regioni per tutti\n\t# i geni passati dall'utente\n\t#\t\n\tfiles = {}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\n\tfor gene in geneNames:\t\t\t\t\t\t\t\t\t\t\t\t \n\t\tcod = geneNames[gene]\n\t\t# Avendo tanti geni, ad ogni nome di gene si associa la relativa\n\t\t# cartella del gene corrente tra quelli passati dall'utente\n\t\t#\n\t\tif not path.exists(cartella % cod):\n\t\t\tsystem('mkdir ' + cartella % cod)\n\t\tfiles[gene] = open(str(cartella % cod + fileOut), 'w')\n\t\t\n\t# File contenente le regioni esoniche/introniche di tutti i geni\n\t# passati dall'utente (serve per mappare le reads)\n\t#\n\tfileGtf = open(str(fileOut), 'w')\t\t\t\t\t\t\t \n\n\tfor transcriptID in dictInput:\n\t\tgeneID \t\t\t= dictTranscript[transcriptID][idx_geneID]\n\t\tcromosoma\t\t= dictGenes[geneID][idx_cromosoma]\n\t\tgeneName\t\t= dictGenes[geneID][idx_geneName]\n\t\ttranscriptName \t= dictTranscript[transcriptID][idx_transcriptName]\n\t\t# Inizializzazione del numero di esone/introne da stampare nel file\n\t\t#\n\t\tnrEs \t\t\t= 1\n\t\tnrIn \t\t\t= 1\n\t\t\n\t\tfor i in range(0, len(dictInput[transcriptID][idx_start])):\n\t\t\tstart\t\t= dictInput[transcriptID][idx_start][i]\n\t\t\tend\t\t\t= dictInput[transcriptID][idx_end][i]\n\t\t\ttipo\t\t= dictInput[transcriptID][idx_tipo][i]\n\n\t\t\tif tipo == esone:\n\t\t\t\tregione = exonF % (nrEs)\t\t\t\t\t\t\t\t\t\t# Stampa della stringa in formato exon\n\t\t\t\tnrEs += 1\n\t\t\telse:\n\t\t\t\tregione = intronF % (nrIn)\t\t\t\t\t\t\t\t\t\t# Stampa della stringa in formato intron\n\t\t\t\tnrIn += 1\n\t\t\t\t\n\t\t\tstrGtf = stringaGTF % (cromosoma, str(start), str(end), regione,\t\t\n\t\t\t\t\t\t\t\t geneName, transcriptName)\t\t\t\t\t# Creazione della riga del file\n\t\t\t\n\t\t\tif geneName in geneNames:\t\t\t\t\t\t\t\t\t\t\t# Se il gene presenta regioni introniche..\n\t\t\t\tfiles[geneName].write(strGtf)\t\t\t\t\t\t\t\t\t# ..si stampa il file gtf relativo alle proprie..\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# ..regioni introniche nella propria cartella\n\n\t\t\tfileGtf.write(strGtf)\n\t\t\t\t\n\tif geneNames:\n\t\tfor gene in files:\n\t\t\tfiles[gene].close()\n\n\tfileGtf.close()", "def organize_by_chromosome(genes, transcripts):\n gene_dict = {}\n transcript_dict = {}\n\n for ID in genes:\n gene = genes[ID]\n chromosome = gene.chromosome\n if chromosome not in gene_dict:\n chrom_genes = {}\n chrom_genes[ID] = gene\n gene_dict[chromosome] = chrom_genes\n gene_dict[chromosome][ID] = gene\n\n for ID in transcripts:\n transcript = transcripts[ID]\n chromosome = transcript.chromosome\n if chromosome not in transcript_dict:\n chrom_transcripts = {}\n chrom_transcripts[ID] = transcript\n transcript_dict[chromosome] = chrom_transcripts\n transcript_dict[chromosome][ID] = transcript\n transcript_dict[chromosome][ID] = transcript\n\n return gene_dict, transcript_dict", "def process_transcripts(transcript_file, dict_of_transcripts, fpkm_threshold):\n dictionary_of_unique_transcripts = {}\n list_transcripts = dict_of_transcripts[transcript_file]\n for transcript in list_transcripts:\n exon_ids = ''\n for exon in transcript.exons:\n exon_ids += str(exon.start) + '-' + str(exon.end) + '.'\n\n transcript_unique_id = transcript.chromosome + '-' + exon_ids\n\n if transcript_unique_id not in dictionary_of_unique_transcripts and transcript.fpkm == fpkm_threshold:\n dictionary_of_unique_transcripts[transcript_unique_id] = transcript\n\n print 'Number of transcripts over the threshold ', fpkm_threshold, ' and are distinct:'\n print len(dictionary_of_unique_transcripts)\n return {\"list_transcripts\": list_transcripts,\n \"dictionary_of_unique_transcripts\": dictionary_of_unique_transcripts}", "def genes_GT():\n df1=pd.read_csv(config['geneInfo'], sep=\" \")\n df1=df1[df1.chr == '22']\n df2=pd.read_csv(config['counts'], sep=\" \")\n genes=df1.merge(df2.gene_id, on=\"gene_id\")\n return list(set(genes['gene_id']))", "def get_gene_transcript_map(db_path, table=Annotation.__tablename__, index_col='TranscriptId'):\n df = read_attrs(db_path, table, index_col).reset_index()\n r = {}\n for gene_id, s in df.groupby('GeneId'):\n r[gene_id] = s.TranscriptId.tolist()\n return r", "def filter_genes(genes, output=sys.stdout):\n for _, gene_data in genes.items():\n new_gene = OrderedDict({'data': gene_data['data'], 'transcripts': OrderedDict()})\n\n sorted_transcripts = sorted(gene_data['transcripts'].values(),\n key=lambda x: x['data']['stop'] - x['data']['start'],\n reverse=True)\n\n longest_transcript = sorted_transcripts[0]\n\n new_gene['transcripts'][longest_transcript['data']['attributes']['ID']] = longest_transcript\n print(format_gene(new_gene), file=output)", "def preprocess_gene(gene_id,data_dict,t2g_mapping,out_paths,locks):\n \n # features = ['read_id','transcript_id','transcriptomic_position','reference_kmer','norm_mean','start_idx','end_idx'] # columns in the eventalign file per read.\n\n events = []\n condition_labels = []\n run_labels = []\n read_ids = []\n genomic_coordinates = []\n \n # Concatenate\n# if len(data_dict) == 0:\n# return\n\n\n for read_index,events_per_read in data_dict.items():\n# if len(events_per_read) > 0:\n # ===== transcript to gene coordinates ===== # TODO: to use gtf.\n# tx_ids = [tx_id.decode('UTF-8').split('.')[0] for tx_id in events_per_read['transcript_id']]\n tx_ids = [tx_id for tx_id in events_per_read['transcript_id']] \n tx_positions = events_per_read['transcriptomic_position']\n genomic_coordinate = list(itemgetter(*zip(tx_ids,tx_positions))(t2g_mapping)) # genomic_coordinates -- np structured array of 'chr','gene_id','genomic_position','kmer'\n genomic_coordinate = np.array(genomic_coordinate,dtype=np.dtype([('chr','<U2'),('gene_id','<U15'),('genomic_position','<i4'),('g_kmer','<U5')]))\n # ===== \n\n # Based on Ensembl, remove transcript version.\n\n events_per_read['transcript_id'] = tx_ids\n events_per_read = np.array(events_per_read,dtype=np.dtype([('transcript_id', 'S15'), ('transcriptomic_position', '<i8'), ('reference_kmer', 'S5'), ('norm_mean', '<f8')]))\n\n #\n\n events += [events_per_read]\n genomic_coordinates += [genomic_coordinate]\n n_events_per_read = len(events_per_read)\n# else:\n# print(read_index,len(events_per_read))\n\n events = np.concatenate(events)\n genomic_coordinates = np.concatenate(genomic_coordinates)\n \n # Sort and split # \n# idx_sorted = np.lexsort((events['reference_kmer'],genomic_coordinates['genomic_position'],genomic_coordinates['gene_id']))\n# key_tuples, index = np.unique(list(zip(genomic_coordinates['gene_id'][idx_sorted],genomic_coordinates['genomic_position'][idx_sorted],events['reference_kmer'][idx_sorted])),return_index = True,axis=0) #'chr',\n# y_arrays = np.split(events['norm_mean'][idx_sorted], index[1:])\n# # read_id_arrays = np.split(events['read_id'][idx_sorted], index[1:])\n# g_kmer_arrays = np.split(genomic_coordinates['g_kmer'][idx_sorted], index[1:])\n\n idx_sorted = np.argsort(genomic_coordinates['genomic_position'])\n unique_positions, index = np.unique(genomic_coordinates['genomic_position'][idx_sorted],return_index = True)\n y_arrays = np.split(events['norm_mean'][idx_sorted], index[1:])\n # read_id_arrays = np.split(events['read_id'][idx_sorted], index[1:])\n g_kmer_arrays = np.split(genomic_coordinates['g_kmer'][idx_sorted], index[1:])\n g_positions_arrays = np.split(genomic_coordinates['genomic_position'][idx_sorted], index[1:])\n\n # Prepare\n # print('Reformating the data for each genomic position ...')\n data = defaultdict(dict)\n # for each position, make it ready for json dump\n# data = dict(zip(key_tuples, y_arrays))\n\n asserted = True\n# for key_tuple,y_array,g_kmer_array in zip(key_tuples,y_arrays,g_kmer_arrays):\n for position,y_array,g_kmer_array,g_positions_array in zip(unique_positions,y_arrays,g_kmer_arrays,g_positions_arrays):\n# gene_id,position,kmer = key_tuple \n if (len(set(g_kmer_array)) == 1) and ('XXXXX' in set(g_kmer_array)) or (len(y_array) == 0):\n continue\n \n if 'XXXXX' in set(g_kmer_array):\n y_array = y_array[g_kmer_array != 'XXXXX'] \n assert len(y_array) == len(g_kmer_array) - (g_kmer_array=='XXXXX').sum()\n g_kmer_array = g_kmer_array[g_kmer_array != 'XXXXX'] \n \n try:\n assert len(set(g_kmer_array)) == 1\n assert {position} == set(g_positions_array)\n except:\n asserted = False\n break\n kmer = set(g_kmer_array).pop()\n\n data[position] = {kmer: list(y_array)} #,'read_ids': [read_id.decode('UTF-8') for read_id in read_id_array]}\n \n # write to file.\n log_str = '%s: %s' %(gene_id,asserted)\n\n with locks['json'], open(out_paths['json'],'a') as f:\n\n pos_start = f.tell()\n f.write('{')\n f.write('\"%s\":' %gene_id)\n ujson.dump(data, f)\n f.write('}\\n')\n pos_end = f.tell()\n\n with locks['index'], open(out_paths['index'],'a') as f:\n f.write('%s,%d,%d\\n' %(gene_id,pos_start,pos_end))\n \n with locks['readcount'], open(out_paths['readcount'],'a') as f: #todo: repeats no. of tx >> don't want it.\n n_reads = len(data_dict)\n f.write('%s,%d\\n' %(gene_id,n_reads))\n \n with locks['log'], open(out_paths['log'],'a') as f:\n f.write(log_str + '\\n')", "def filter_by_length(genes, transcripts, min_length):\n filtered_transcripts = {}\n filtered_genes = {}\n\n for transcript_id in transcripts:\n curr_transcript = transcripts[transcript_id]\n length = curr_transcript.get_length()\n\n if length >= min_length:\n filtered_transcripts[transcript_id] = curr_transcript\n gene_id = curr_transcript.gene_id\n if gene_id in genes:\n filtered_genes[gene_id] = genes[gene_id]\n\n return filtered_genes, filtered_transcripts", "def process_generic_specific_gene_lists(dict_genes, LV_matrix):\n model_genes = list(LV_matrix.index)\n\n processed_dict_genes = {}\n for gene_label, ls_genes in dict_genes.items():\n ls_genes_processed = list(set(model_genes).intersection(ls_genes))\n\n processed_dict_genes[gene_label] = ls_genes_processed\n\n return processed_dict_genes", "def convert_trsp_index(geneDictNonCoding, df, TR_index_dict):\n\n\n\tgeneDictCanon = OrderedDict()\n\t\n\tfor gene in geneDictNonCoding:\n\t\ttrDF = df.iloc[geneDictNonCoding[gene][0]:geneDictNonCoding[gene][1]]\n\t\ttrDFz = trDF.reset_index(drop=True)\n\t\t\n\t\ttrCount = 0\n\t\ttrDictLoc = OrderedDict()\n\t\t\n\t\tfor i in range(len(trDFz)):\n\t\t\tif trDFz.loc[i, 'feature'] == 'transcript':\n\t\t\t\ttr = trDFz.loc[i, 'transcript_id']\n\t\t\t\ttrdict = parse_entry(tr)\n\t\t\t\ttrName = trdict['transcript_id'][0]\n\t\t\t\ttrDictLoc[trName] = [trDFz.loc[i, 'chromStart'], trDFz.loc[i, 'chromEnd']]\n\t\t\t\ttrCount += 1\n\t\t\n\t\tif trCount > 1:\n# print gene, \"more than 1 trsp !!! \\n\"\n\t\t\t\n\t\t\trangeDict = OrderedDict() ## store the ranges, and take the longest\n\t\t\tfor key in trDictLoc:\n\t\t\t\ttrRange = len(range(int(trDictLoc[key][0]),int(trDictLoc[key][1])))\n\t\t\t\trangeDict[key] = trRange\n\t\t\t\t\n\t\t\tv=list(rangeDict.values())\n\t\t\tk=list(rangeDict.keys())\n\t\t\ttrOut = k[v.index(max(v))]\n# print trOut\n\t\t\tgeneDictCanon[trOut] = [gene, TR_index_dict[trOut]]\n\t\t\t\n\t\t\t\n\n\t\telse: ## for genes with single transcripts\n\t\t\ttrOut = trDictLoc.keys()[0]\n\t\t\tgeneDictCanon[trOut] = [gene, TR_index_dict[trOut]]\n\treturn geneDictCanon", "def process_leading_genes(f_path):\n with open(f_path, 'r') as f:\n lines = f.readlines()\n\n gene_list = lines[2].strip().split('\\t')[2:]\n gene_sets = []\n\n by_gene = defaultdict(list)\n\n for l in lines[3:]:\n parts = l.strip().split('\\t')\n gene_sets.append(parts[0])\n for p, gene in zip(parts[2:], gene_list):\n by_gene[gene].append(int(p))\n\n totals = [(k, sum(v)) for k, v in by_gene.items()]\n totals.sort(key=lambda x: x[1], reverse=True)\n\n return totals", "def all_possible_gene_transcription(dna: str):\n result = set()\n for dna in (dna, reverse_complement(dna)):\n rna = dna_to_rna(dna)\n start = find_motif(rna, START_CODON)\n for s in start:\n r = rna_to_protein(rna, start=s, end=True)\n if r:\n result.add(r)\n return result", "def perform_generation_transforms(mappings, df, output_file, dicts):\n out = open(output_file, 'w')\n for index in df.index:\n mappings_copy = copy.deepcopy(mappings)\n sub_maps = list(item_generator(mappings_copy, 'transforms'))\n for sub_map in filter(lambda x: isinstance(x, dict), sub_maps):\n sub_map_copy = copy.deepcopy(sub_map)\n for key in dict(filter(lambda x: 'transforms' in x[1],\n sub_map_copy.items())):\n for transform in filter(lambda x: 'type' in x and x['type'] in\n available_transforms,\n sub_map_copy[key]['transforms']):\n available_transforms[transform['type']](\n sub_map, key, transform, df, index, dicts)\n if key in sub_map and 'transforms' in sub_map[key]:\n del sub_map[key]['transforms']\n for sub_map in filter(lambda x: isinstance(x, list), sub_maps):\n sub_map_copy = copy.deepcopy(sub_map)\n list_index = 0\n for list_item in filter(lambda x: isinstance(x, dict) and\n 'transforms' in x, sub_map_copy):\n mapping_size = len(sub_map)\n for transform in filter(lambda x: 'type' in x and\n x['type'] in available_transforms,\n list_item['transforms']):\n if not list_index >= len(sub_map):\n available_transforms[transform['type']](\n sub_map, list_index, transform, df, index, dicts)\n if (not list_index >= len(sub_map) and\n 'transforms' in sub_map[list_index]):\n del sub_map[list_index]['transforms']\n if mapping_size == len(sub_map):\n list_index += 1\n fill_json_values('sourceCol', mappings_copy, df, index)\n out.write(json.dumps(mappings_copy) + '\\n')\n out.close()\n return df", "async def get_transcripts(self, gene_tokens: List, classification: Classification,\n errors: List) -> Optional[List[str]]:\n return []", "def active_tu(active):\n active = list(active)\n t_units = list({tu for gene in active for tu in gene.transcription_units})\n return t_units", "def extract_number_target_genes2(d, g, miranda_fnm):\n\tprint 'extract targets genes2'\n\toutput_nm = \"%s_counts2.txt\"%(miranda_fnm[:-4])\n\toutput = open(output_nm, 'w')\n\toutput.write(\"miRNA\\ttotal_target_genes\\ttarget_genes_down\\ttarget_genes\\n\")\n\tfor key in d:\n\t\tif len(d[key]) > 0:\n\t\t\t#print key, len(d[key])\n\t\t\toutput.write(\">>%s\\t%s\\t%s\\n\"%(key, len(d[key]),len(g[key])))\n\t\t\tfor gene in g[key]:\n\t\t\t\t#print gene\n\t\t\t\toutput.write(\"%s\\n\"%(gene[0]))\n\n\toutput.close()", "def gtf_to_transcript_exons(gtf, transcript_type):\n gft = HTSeq.GFF_Reader(gtf)\n\n transcripts = {}\n\n for gtf_line in gft:\n if gtf_line.type == 'exon':\n try:\n tr_id = gtf_line.attr['transcript_id']\n tr_type = gtf_line.attr['transcript_biotype']\n except:\n sys.stderr.write(f\"Problem with: {gtf_line}. Exiting.{os.linesep}\")\n sys.exit(1)\n\n if transcript_type != \"all\":\n if tr_type != transcript_type:\n continue\n\n if tr_id not in transcripts:\n transcripts[tr_id] = [gtf_line]\n else:\n transcripts[tr_id].append(gtf_line)\n\n return transcripts", "async def test_get_transcripts_from_gene(test_db):\n resp = await test_db.get_transcripts_from_gene(\"BRAF\", 2145, 2145)\n assert len(resp) == 32\n\n resp = await test_db.get_transcripts_from_gene(\"BRAF\", 140453136,\n 140453136)\n assert len(resp) == 0", "def get_transcript_gene_map(db_path, table=Annotation.__tablename__, index_col='TranscriptId'):\n df = read_attrs(db_path, table, index_col)\n return dict(list(zip(df.index, df.GeneId)))", "def map_probes(probeset, entrez_ids): \n entrez_idx = None\n mapping = {}\n with open(probeset) as probes:\n for line in probes:\n if line.startswith('ID'):\n entrez_idx = line.split('\\t').index('ENTREZ_GENE_ID')\n elif entrez_idx:\n # if the index has been defined then we're past the header\n row = [x.strip() for x in line.split('\\t')]\n # if we're doing percentile rank, we need all the mappings, otherwise can just track the mappings of interest\n if PERCENTILE_RANK:\n if '///' in row[entrez_idx]:\n # multile genes add an entry for every gene overlapped by the probe\n # TODO: FIX; THIS IS A MANY TO MANY MAPPING ISSUE \n # since this only happens once in this dataset, I'm just using the first one but can also use last (or develop a solution that works for all cases...)\n mapping[row[0]] = row[entrez_idx].split(' /// ')[0]\n \"\"\" # option to use the last one \n for entrez_id in [x for x in row[entrez_idx].split(' /// ')]:\n print('Entrez ID:'+str(entrez_id)+' in probe that maps to multiple genes')\n mapping[row[0]] = entrez_id[0] \n \"\"\"\n print('MANY TO MANY: '+str(row[0])+\"->\"+str(row[entrez_idx]))\n else:\n mapping[row[0]] = row[entrez_idx]\n elif row[entrez_idx] in entrez_ids:\n mapping[row[0]] = row[entrez_idx]\n\n return mapping", "def parse_transcripts(trans):\n s = SeqIO.parse(trans, 'fasta')\n seq_dict = SeqIO.to_dict(s)\n # Remove the _whatever at the end\n seq_dict_nosuff = {}\n for seqid in seq_dict:\n seq_dict_nosuff[seqid.split('_')[0]] = seq_dict[seqid]\n return seq_dict_nosuff", "def sum_list(lst_formula_maps):\n result_map_result = {}\n map_zero = lst_formula_maps[0]\n for formula_id in map_zero:\n all_exist = True\n formula_vector = map_zero[formula_id]\n for lst_index in range(1, len(lst_formula_maps)):\n if lst_formula_maps[lst_index][formula_id] is None:\n all_exist = False\n break\n else:\n formula_vector_temp = lst_formula_maps[lst_index][formula_id]\n formula_vector = formula_vector + formula_vector_temp\n if all_exist:\n result_map_result[formula_id] = formula_vector\n return result_map_result", "def add_ta_alias_to_map(ta_aliases, ta_map):\n\n for tup in ta_aliases:\n ta1, ta2 = tup\n s = ta_map[ta1]\n s.update(ta_map[ta2])\n # point key of all elements of the set to the same set.\n for x in s:\n ta_map[x] = s\n\n return ta_map", "def parse_transcripts(transcript_lines):\n LOG.info(\"Parsing transcripts\")\n transcripts = parse_ensembl_transcripts(transcript_lines)\n\n # Since there can be multiple lines with information about the same transcript\n # we store transcript information in a dictionary for now\n parsed_transcripts = {}\n # Loop over the parsed transcripts\n for tx in transcripts:\n tx_id = tx[\"ensembl_transcript_id\"]\n ens_gene_id = tx[\"ensembl_gene_id\"]\n\n # Check if the transcript has been added\n # If not, create a new transcript\n if not tx_id in parsed_transcripts:\n tx_info = {\n \"chrom\": tx[\"chrom\"],\n \"transcript_start\": tx[\"transcript_start\"],\n \"transcript_end\": tx[\"transcript_end\"],\n \"mrna\": set(),\n \"mrna_predicted\": set(),\n \"nc_rna\": set(),\n \"ensembl_gene_id\": ens_gene_id,\n \"ensembl_transcript_id\": tx_id,\n }\n parsed_transcripts[tx_id] = tx_info\n\n tx_info = parsed_transcripts[tx_id]\n # Add the ref seq information\n if tx.get(\"refseq_mrna_predicted\"):\n tx_info[\"mrna_predicted\"].add(tx[\"refseq_mrna_predicted\"])\n if tx.get(\"refseq_mrna\"):\n tx_info[\"mrna\"].add(tx[\"refseq_mrna\"])\n if tx.get(\"refseq_ncrna\"):\n tx_info[\"nc_rna\"].add(tx[\"refseq_ncrna\"])\n\n return parsed_transcripts", "async def test_get_mane_transcripts_from_genomic_pos(test_db):\n resp = await test_db.get_transcripts_from_genomic_pos(\"NC_000007.14\",\n 140753336)\n assert set(resp) == {\n \"NM_001354609.1\", \"NM_001354609.2\", \"NM_001374244.1\", \"NM_001374258.1\",\n \"NM_001378467.1\", \"NM_001378468.1\", \"NM_001378469.1\", \"NM_001378470.1\",\n \"NM_001378471.1\", \"NM_001378472.1\", \"NM_001378473.1\", \"NM_001378474.1\",\n \"NM_001378475.1\", \"NM_004333.4\", \"NM_004333.5\", \"NM_004333.6\"\n }\n\n # invalid pos\n resp = await test_db.get_transcripts_from_genomic_pos(\"NC_000007.14\",\n 150753336)\n assert resp == []\n\n # invalid ac\n resp = await test_db.get_transcripts_from_genomic_pos(\"NC_000007.14232\",\n 140753336)\n assert resp == []", "def calculate_distance(geneid, genes, tes):\n # Get which chromosome\n for c in genes:\n if geneid in genes[c]:\n chromosome = c\n break\n # Get the gene position\n genestart, geneend = genes[chromosome][geneid]\n # if the gene chromosome does not have any TEs, return NA\n if chromosome not in tes:\n return ('NA', 'NA', 'NA', 'NA')\n # Then get the TE that is closest downstream. We do this by iterating\n # forwards, then breaking when we find a TE that starts after the end of\n # the gene.\n for downstream in sorted(list(tes[chromosome].iteritems()), key=lambda x: x[1][0]):\n if downstream[1][0] > geneend:\n break\n # And get the TE that is closest upstream. We do this by doing the same\n # strategy as above, but in reverse\n for upstream in reversed(sorted(list(tes[chromosome].iteritems()), key=lambda x: x[1][0])):\n if upstream[1][1] < genestart:\n break\n # Then calculate the distances\n # Gene start - TE end and TE start - gene end\n dist_upstream = genestart - upstream[1][1]\n dist_downstream = downstream[1][0] - geneend\n # then return it all\n return (upstream[0], downstream[0], dist_upstream, dist_downstream)", "def active_genes_and_tf(active):\n active = list(active)\n t_factors = list({gene.regulatory_product for gene in active if\\\n isinstance(gene.regulatory_product, TranscriptionFactor)})\n return t_factors + active", "def tallying_genes():\n #Creating a tallying Mechanism of genes with multiple sequences in file and\n # an output file for future alignment of sequences \n blast_hit_results = open('blast_hits_report.txt', 'r')\n gene_dict={}\n\n for line in blast_hit_results:\n data = line.split(\"\\t\")\n \n if line.startswith('SeqID'):\n continue\n else:\n #Test to see if organism in dictionary\n verdict = gene_dict.get(data[6])\n \n if str(verdict) == \"None\":\n #creating new entry\n key = data[6]\n seq_info=str(data[0])+\"|\"+str(data[1])\n counter = 1\n #Value[Counts, Trimmed_Length, Blast Length, Blast_Score, Blast_Percent_Identity]\n value=[data[5], counter, [seq_info]]\n gene_dict.update({key:value})\n else:\n #Fills dictionary based on organism name\n seq_info=str(data[0])+\"|\"+str(data[1])\n gene_dict[data[6]][1]+=1\n gene_dict[data[6]][2].append(seq_info)\n blast_hit_results.close()\n return(gene_dict)", "def CreateGeneModels(genes_cmpt, transcripts_cmpt, exons_cmpt, utr3_cmpt, utr5_cmpt, cds_cmpt):\n gene_counter, gene_models = 1, []\n for gene_entry in genes_cmpt: ## Figure out the genes and transcripts associated feature \n if gene_entry in transcripts_cmpt:\n gene=init_gene() \n gene['id']=gene_counter\n gene['name']=gene_entry[1]\n gene['chr']=genes_cmpt[gene_entry]['chr']\n gene['source']=genes_cmpt[gene_entry]['source']\n gene['start']=genes_cmpt[gene_entry]['start']\n gene['stop']=genes_cmpt[gene_entry]['stop']\n gene['strand']=genes_cmpt[gene_entry]['strand']\n if not gene['strand'] in ['+', '-']:\n gene['strand']='.' # Strand info not known replaced with a dot symbol instead of None, ?, . etc.\n if len(transcripts_cmpt[gene_entry])>1:\n gene['is_alt_spliced'] = 1\n gene['is_alt'] = 1\n\t gtype=[]\n for tids in transcripts_cmpt[gene_entry]: ## transcript section related tags \n gene['transcripts'].append(tids['ID'])\n\t\tgtype.append(tids['type'])\n exon_cod, utr5_cod, utr3_cod, cds_cod = [], [], [], []\n if (gene['chr'], tids['ID']) in exons_cmpt:\n exon_cod = [[feat_exon['start'], feat_exon['stop']] for feat_exon in exons_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr5_cmpt:\n utr5_cod = [[feat_utr5['start'], feat_utr5['stop']] for feat_utr5 in utr5_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr3_cmpt:\n utr3_cod = [[feat_utr3['start'], feat_utr3['stop']] for feat_utr3 in utr3_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in cds_cmpt:\n cds_cod = [[feat_cds['start'], feat_cds['stop']] for feat_cds in cds_cmpt[(gene['chr'], tids['ID'])]]\n if len(exon_cod) == 0: ## build exon coordinates from UTR3, UTR5 and CDS\n if cds_cod != []:\n exon_cod=createExon(gene['strand'], utr5_cod, cds_cod, utr3_cod) \n\n if gene['strand']=='-': ## general order to coordinates\n if len(exon_cod) >1:\n if exon_cod[0][0] > exon_cod[-1][0]:\n exon_cod.reverse()\n if len(cds_cod) >1:\n if cds_cod[0][0] > cds_cod[-1][0]: \n cds_cod.reverse()\n if len(utr3_cod) >1:\n if utr3_cod[0][0] > utr3_cod[-1][0]: \n utr3_cod.reverse()\n if len(utr5_cod) >1:\n if utr5_cod[0][0] > utr5_cod[-1][0]:\n utr5_cod.reverse()\n\n tis, cdsStop, tss, cleave = [], [], [], [] ## speacial sited in the gene region \n if cds_cod != []:\n if gene['strand'] == '+':\n tis = [cds_cod[0][0]]\n cdsStop = [cds_cod[-1][1]-3]\n elif gene['strand'] == '-':\n tis = [cds_cod[-1][1]]\n cdsStop = [cds_cod[0][0]+3]\n if utr5_cod != []:\n if gene['strand'] == '+':\n tss = [utr5_cod[0][0]]\n elif gene['strand'] == '-':\n tss = [utr5_cod[-1][1]]\n if utr3_cod != []:\n if gene['strand'] == '+':\n cleave = [utr3_cod[-1][1]]\n elif gene['strand'] == '-':\n cleave = [utr3_cod[0][0]]\n\n cds_status, exon_status, utr_status = 0, 0, 0 ## status of the complete elements of the gene\n if cds_cod != []: ## adding phase to the CDS region \n cds_cod_phase = addCDSphase(gene['strand'], cds_cod)\n cds_status = 1\n gene['cds_exons'].append(cds_cod_phase)\n\n if exon_cod != []: \n exon_status = 1\n if utr5_cod != [] or utr3_cod != []: \n utr_status = 1\n if cds_status != 0 and exon_status != 0 and utr_status != 0:\n gene['transcript_status'].append(1)\n else:\n gene['transcript_status'].append(0)\n\n if exon_cod: ## final check point for a valid gene model \n gene['exons'].append(exon_cod)\n gene['utr3_exons'].append(utr3_cod)\n gene['utr5_exons'].append(utr5_cod)\n gene['tis'].append(tis)\n gene['cdsStop'].append(cdsStop)\n gene['tss'].append(tss)\n gene['cleave'].append(cleave) \n\t \n\t gtype=list(set(gtype)) ## different types \n gene['gene_info']=dict(ID=gene_entry[1],\n\t\t\t\tSource=genes_cmpt[gene_entry]['source'],\n\t\t\t\tType=gtype)\n gene=FeatureValueFormat(gene) ## get prepare for MAT writing \n gene_counter+=1\n gene_models.append(gene)\n return gene_models" ]
[ "0.59052914", "0.5718111", "0.57166183", "0.5713127", "0.5624114", "0.55903983", "0.5565028", "0.5310848", "0.53084624", "0.5292143", "0.5210532", "0.5177025", "0.514381", "0.51284915", "0.5062096", "0.5026479", "0.5016952", "0.5011995", "0.49969754", "0.49333945", "0.49022913", "0.48854688", "0.48699713", "0.4863825", "0.48356953", "0.48327962", "0.48269907", "0.48160076", "0.4798709", "0.47703362" ]
0.69606656
0
Records a param measurement and returns it.
def measure(self, timestamp, param): if param in self.faulty: value = random.randint(*self.FAULTY[param]) else: value = self.patient.measure(param) self.__buffer[param].append(Measurement(timestamp, value)) return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_trial_param(self, trial_id: int, param_name: str) -> float:\n raise NotImplementedError", "def get_last_measurement(self, param):\n return self.__buffer[param][-1]", "def get_measurements(self, param):\n return tuple(self.__buffer[param])", "def log_param(self, name: str, value):\n self.params[name] = value\n\n self._sync_log_event()", "def log_param(key, value):\n mlflow.log_param(key, value)", "def _record(self, metric_point: MetricPoint,\n measurement_map: MeasurementMap):\n metric_name = metric_point.metric_name\n tags = metric_point.tags\n\n metric = self._registry.get(metric_name)\n # Metrics should be always registered dynamically.\n assert metric\n\n tag_map = tag_map_module.TagMap()\n for key, value in tags.items():\n tag_key = tag_key_module.TagKey(key)\n tag_value = tag_value_module.TagValue(value)\n tag_map.insert(tag_key, tag_value)\n\n metric_value = metric_point.value\n measurement_map.measure_float_put(metric.measure, metric_value)\n # NOTE: When we record this metric, timestamp will be renewed.\n measurement_map.record(tag_map)", "def addParameter(self, param_id, val, units=\"per_second\"):\n\n k = self.model.createParameter()\n self.check(k, \"create parameter k\")\n self.check(k.setId(param_id), \"set parameter k id\")\n self.check(k.setConstant(True), 'set parameter k \"not constant\"')\n self.check(k.setValue(val), \"set parameter k value\")\n self.check(k.setUnits(units), \"set parameter k units\")\n return k", "def add_measurement(self):\n key, ok = QInputDialog.getText(self, 'Add measurement', 'Enter the parameter to measure:')\n if ok:\n if key in self.mgr.obj.measurements:\n print(\"parameter {:s} is already measured by the survey\".format(key))\n return\n idx = self.measurementsListWidget.currentRow()+1\n self.mgr.obj.add_measurement(key, idx=idx)\n self.load_measurements()\n self.measurementsListWidget.setCurrentRow(idx)", "def get_param_duration(param):\n\n # dummy value\n value = rospy.Duration(1)\n\n try:\n # only a default value in case the param gets fuzzed.\n value = rospy.Duration(get_param_num(param))\n except ValueError:\n err_msg = (\n \"Param %s has the invalid value '%s'.\"\n % (param, rospy.get_param(param)))\n rospy.logerr(err_msg)\n rospy.signal_shutdown(err_msg)\n value = rospy.Duration(1)\n return value", "def param(self, *args, **kwargs):\n return self.options.param(*args,**kwargs)", "def param(self):\n return self._param", "def parameters(self):\n #print \"in instrument.parameter()\"\n return self._params", "def get_value(self, param, freq):\n p = self.get_parameter(param)\n value = p.get_value(freq)\n return value", "def record(self, point):\n for var, value in zip(self.varnames, self.f(point)):\n self.samples[var].append(value)\n return self", "def add_param(self, param):\n self.params.append(param)\n return self", "def define_measurement(self, trace: int, parameter: str) -> None:\n if trace not in range(1, self.ntraces + 1):\n self.ntraces = trace\n\n self.write(f\"CALC:PAR{trace}:DEF {parameter}\")", "def posterior_sample_parameter(self, parameter):\n pass", "def patience_param(x):\n # -- TODO: make this do something!\n return x", "def param(*args, **kwargs):\n p = Param(*args, **kwargs)\n\n def decorator(func):\n func.param = p\n return func\n\n return decorator", "def get_custom_param(plot):\n return Plot.get_custom_param(plot)", "def add_param(self, param):\n self._params.append(param)\n self.add_decompostion(param)", "def get_param(self, param):\n return self.params.get(param, None)", "def add(self, param):\n self._data.add(param)", "def get_measurement_parameter(self, trace: int) -> str:\n if trace not in range(1, 5):\n raise ValueError(\"Trace must be between 1 and 4\")\n\n return self.query(f\"CALC:PAR{trace}:DEF?\")", "def record(self, config, value, time_ms):\n raise NotImplementedError", "def parameter(self, parameter):\n def decorator(fn):\n swag = self.get_swag(fn)\n swag.setdefault('parameters', []).append(\n core.Parameter(**parameter))\n self.set_swag(fn, swag)\n return fn\n return decorator", "def _add_or_update_measurement(self,eq,meas_type,mplane_param2value,period):\r\n meas = self._pvsr.create_pvsr_object(\"Measurement\")\r\n meas.ParentId = eq.Id\r\n meas.Type = meas_type\r\n if \"index_mplane_name\" in self._meas:\r\n if self._meas[\"index_mplane_name\"] not in mplane_param2value:\r\n raise ValueError(\"Missing {0} value\".format(self._meas[\"index_mplane_name\"]))\r\n meas.Index = mplane_param2value[self._meas[\"index_mplane_name\"]]\r\n else:\r\n meas.DescriptionToShow = self._meas[\"name\"] + \" \" + self._pvsr_meas_types[meas_type][\"Name\"]\r\n \r\n measA = self._pvsr.listMeasurements(meas)\r\n if len(measA) == 0:\r\n if \"index_mplane_name\" not in self._meas:\r\n meas.Index = self._meas[\"name\"]\r\n measA = self._pvsr.listMeasurements(meas)\r\n \r\n add2 = None\r\n \r\n if len(measA) == 0:\r\n #add\r\n if self._verb==mplane.model.VERB_QUERY:\r\n if \"index_mplane_name\" in self._meas:\r\n raise ValueError(\"The measurement does not exists: Index={0}\".format(meas.Index))\r\n else:\r\n raise ValueError(\"The measurement does not exists: Name={0}\".format(meas.DescriptionToShow))\r\n \r\n if \"index_mplane_name\" in self._meas:\r\n if eq.CollectorType == 'c':\r\n meas.DescriptionToShow = mplane_param2value[self._meas[\"index_mplane_name\"]] + \" \" + self._pvsr_meas_types[meas_type][\"Name\"]\r\n else:\r\n meas.DescriptionToShow = self._meas[\"name\"] + \" \" + self._pvsr_meas_types[meas_type][\"Name\"]\r\n \r\n if \"uda_constants\" in self._meas:\r\n for uda,value in self._meas[\"uda_constants\"].items():\r\n param=self._pvsr.create_pvsr_object(\"Parameter\")\r\n param.Name = uda\r\n param.Value = value\r\n meas.Parameter.append(param)\r\n\r\n for mplane_param,uda in self._mplane2uda.items():\r\n if mplane_param in mplane_param2value and mplane_param2value[mplane_param] != \"\":\r\n param=self._pvsr.create_pvsr_object(\"Parameter\")\r\n param.Name = uda\r\n param.Value = mplane_param2value[mplane_param]\r\n meas.Parameter.append(param)\r\n elif self._uda_name2uda[uda].Required == \"Yes\":\r\n raise ValueError(\"Missing required parameter: {0}\".format(mplane_param))\r\n \r\n logging.info(\"Creating measurement, eq: {0}, type: {1}, index: {2}, name: {3}\".format(eq.Name,meas.Type,meas.Index,meas.DescriptionToShow))\r\n \r\n meas.Switched = \"No\"\r\n meas.RetainRawData = 365\r\n meas.IntervalInSec = period\r\n \r\n add2 = 1\r\n meas = self._pvsr.addMeasurement(meas)\r\n else:\r\n #update\r\n meas = measA[0]\r\n logging.info(\"Measurement already exists: eq: {0}, type: {1}, index: {2}, name: {3}\".format(eq.Name,meas.Type,meas.Index,meas.DescriptionToShow))\r\n \r\n need_mod = False\r\n meas_param_name2value = {}\r\n if hasattr(meas,\"Parameter\"):\r\n for i in range(len(meas.Parameter)):\r\n meas_param_name2value[meas.Parameter[i].Name]=meas.Parameter[i].Value\r\n\r\n if \"check_udas\" in self._meas and self._meas[\"check_udas\"] == False:\r\n pass\r\n else:\r\n for mplane_param,uda in self._mplane2uda.items():\r\n if mplane_param in mplane_param2value and mplane_param2value[mplane_param] != \"\":\r\n if uda not in meas_param_name2value or meas_param_name2value[uda] != mplane_param2value[mplane_param]:\r\n if uda not in meas_param_name2value:\r\n logging.warn(\"Parameter mismatch: {0}: NULL != {1}\".format(uda,mplane_param2value[mplane_param]))\r\n else:\r\n logging.warn(\"Parameter mismatch: {0}: {1} != {2}\".format(uda,meas_param_name2value[uda],mplane_param2value[mplane_param]))\r\n index2remove=None\r\n for i in range(len(meas.Parameter)):\r\n if meas.Parameter[i].Name == uda:\r\n index2remove = i\r\n break\r\n del meas.Parameter[index2remove]\r\n need_mod = True\r\n param=self._pvsr.create_pvsr_object(\"Parameter\")\r\n param.Name = uda\r\n param.Value = mplane_param2value[mplane_param]\r\n meas.Parameter.append(param)\r\n else:\r\n if uda in meas_param_name2value:\r\n index2remove=None\r\n for i in range(len(meas.Parameter)):\r\n if meas.Parameter[i].Name == uda:\r\n index2remove = i\r\n break\r\n if index2remove is not None:\r\n logging.warn(\"Parameter mismatch: {0}: {1} != NULL\".format(uda,meas_param_name2value[uda]))\r\n need_mod = True\r\n del meas.Parameter[index2remove]\r\n \r\n if meas.IntervalInSec != period:\r\n need_mod = True\r\n meas.IntervalInSec = period\r\n logging.warn(\"Parameter mismatch: IntervalInSec: {0} != {1}\".format(meas.IntervalInSec,period))\r\n \r\n if need_mod:\r\n if self._verb==mplane.model.VERB_QUERY:\r\n raise ValueError(\"The measurement parameters do not match: Name={0}\".format(meas.DescriptionToShow))\r\n \r\n logging.warn(\"Modifying measurement: eq: {0}, type: {1}, index: {2}, name: {3}\".format(eq.Name,meas.Type,meas.Index,meas.DescriptionToShow))\r\n meas = self._pvsr.modMeasurement(meas)\r\n add2 = 2\r\n else:\r\n add2 = 0\r\n \r\n return (meas,add2)", "def metric_function_parameter(parameter_name, target_value, metric_name,\n return_variable_name=None):\n metric_func = construct_metric_function(metric_name)\n if return_variable_name is None:\n return_variable_name = parameter_name\n\n def custom_metric_function(sampler):\n cur_parameter = getattr(sampler.parameters, parameter_name)\n metric_value = metric_func(cur_parameter, target_value)\n metric = {'variable': return_variable_name,\n 'metric': metric_name,\n 'value': metric_value\n }\n return metric\n return custom_metric_function", "def _paramUpdate(self):\n\n # Update the database attributes accordingly.\n dt.utilities.DB_attrs_save(self.Database, self.newParam)", "def add_parameter(self,\n name, # The name of the parameter\n scaling=None, # The type of scaling to be used for the parameter\n type=\"int\", # The type of the parameter, such as float\n min=0, # The minimum value of the parameter\n max=100, # The maximum value of the parameter\n significance=1, # The smallest significant step size\n value=None, # The value or value parameters\n distribution=None): # The distribution of the parameter\n config = {\"scaling\" : scaling, \n \"type\": type,\n \"min\": min, \n \"max\": max, \n \"significance\": significance,\n \"value\": value,\n \"distribution\": distribution}\n self.param_names.append(name)\n self.param_settings.append(config)" ]
[ "0.64668816", "0.61866486", "0.61315846", "0.5945125", "0.59324706", "0.579286", "0.578003", "0.5778229", "0.57359004", "0.5732038", "0.57237446", "0.5661884", "0.5656225", "0.5632293", "0.56025463", "0.5585704", "0.55407315", "0.5513971", "0.5506482", "0.5504652", "0.54864067", "0.548124", "0.54226315", "0.54126036", "0.5411881", "0.53936857", "0.5392172", "0.53805375", "0.5374186", "0.53646225" ]
0.64251155
1
Gets param last measurment.
def get_last_measurement(self, param): return self.__buffer[param][-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last_percept(self):\n return self.percept", "def last_fmeasure(self):\n return self.get_fvalue(self.last_position())", "def param(self):\n return self._param", "def getLatestMeasurement(self): \n return self.measurement[len(self.measurement)-1]", "def get_output(self, last = 1):\n\t\tif last == -1:\n\t\t\ttmp = self.out_param[::]\n\t\t\tself.out_param = []\n\t\t\treturn tmp\n\t\treturn self.out_param[-last:]", "def last_value(self):\n return self.samples[-1]", "def get_last_sample(self) -> InternalSample:", "def last_value(self):\n return self._waveforms[-1].last_value", "def get_last_saved_estimation(self):\n return None", "def LastParameter(*args):\n return _Geom2dLProp.Geom2dLProp_Curve2dTool_LastParameter(*args)", "def get_current_param(self, t=None):\n if self.current_context is None:\n raise Exception(\"The MAB game is not started.\")\n \n return self.get_param(self.current_context)", "def get_last(self):\n self.accumulated_time_last = pg.time.get_ticks() - self.start_time_last\n return self.accumulated_time_last", "def _param(self) ->nn.Parameter:\n return next(self.parameters())", "def _get_last_meas_time(self):\n\n #if flag for whole data regeneration is set\n if self._process_type == 'full_gen':\n return datetime.datetime(1900, 1, 1, 0, 0, 0)\n \n \n res = self._db.Query(\"\"\"SELECT last_measurement_time\n FROM last_dashboard_element_segment_value\n WHERE\n element_id = %s\n AND segment_value_id = %s\n \"\"\",(self._id, self._segment_value_id))\n if not res:\n return datetime.datetime(1900, 1, 1, 0, 0, 0)\n item = self._db.record[0]\n if item['last_measurement_time']:\n return item['last_measurement_time']\n return datetime.datetime(1900, 1, 1, 0, 0, 0)", "def get_value(self):\n return self.last_value", "def last_value(self):\n return self._last_value", "def pcurrent(self):\n return self.pointlist[-1]", "def getLast(self):\r\n return self._data[-1]", "def get_last_result(self):\n return self.last_result", "def get_param(self, param):\n return self.params.get(param, None)", "def last_voltage(self):\n return self._last_voltage", "def last_value(self):\n return self._stop", "def get_measurements(self, param):\n return tuple(self.__buffer[param])", "def getLatestSpectrumMeasurements(self): \n return self.spectrum[len(self.spectrum)-1]", "def last_provided(self):\n last = self.current()\n if last == 0:\n return None\n return self._cnt2bc(last - 1)", "def last(self, trace):\n return trace[-1]", "def getParam(self):\n return self.__alpha0, self.__alpha1, self.__beta, self.__eta", "def getLast(self):\n return self.dataBuffer[len(self.dataBuffer) - 1]", "def last_value(self):\n return self._value", "def last_value(self):\n return 0" ]
[ "0.69294596", "0.6815849", "0.67619956", "0.673588", "0.67033076", "0.66313666", "0.6617073", "0.6605436", "0.65295345", "0.648252", "0.6449733", "0.64441687", "0.6424671", "0.6354086", "0.63483137", "0.6319382", "0.63120097", "0.62854356", "0.62735873", "0.6260872", "0.62413585", "0.6239695", "0.6229598", "0.6192552", "0.6181082", "0.6167539", "0.6139105", "0.613291", "0.611809", "0.61137587" ]
0.83966666
0
Initial profanity check using profanity_check
def profanityCheck(text): return predict_prob([text])[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_tokens_with_profanity():\n list_responses = ['test this code', ' for bad words', 'such as shit']\n check = edurate_gensim.create_tokens(list_responses)\n assert check == [['test', 'code'], ['bad', 'words']]\n assert (\"shit\" in check) is False", "def verify():", "def main():\n from argparse import ArgumentParser\n description = \"Check whether an OKCupid username is taken.\"\n parser = ArgumentParser(description=description)\n parser.add_argument(\"usernames\",\n metavar=\"username\", type=str, nargs=\"+\",\n help=\"a username to check\")\n parser.add_argument(\"-v\", \"--verbose\",\n dest=\"verbose\", action=\"store_const\",\n const=True, default=False,\n help=\"verbose output including recommendations\")\n parser.add_argument(\"-s\", \"--shortcircuit\",\n dest=\"shortcircuit\", action=\"store_const\",\n const=True, default=False,\n help=\"stop on first success\")\n parser.add_argument(\"-C\", \"--nocolor\",\n dest=\"nocolor\", action=\"store_const\",\n const=True, default=False,\n help=\"plain, unstyled output\")\n parser.add_argument(\"-q\", \"--quiet\",\n dest=\"quiet\", action=\"store_const\",\n const=True, default=False,\n help=\"no output (return 0 if at least one successful)\")\n args = parser.parse_args()\n if args.nocolor:\n colorama.init(strip=True, convert=False)\n else:\n colorama.init()\n if not run_check_usernames(args):\n sys.exit(1)", "def check_vulnerability(self):\n\t\tpass", "def profanity(message_data):\n if not message_data:\n raise TypeError('message_data must not be None')\n\n profane_terms = profanity_terms.profanity\n not_in = []\n message = message_data['message'].replace(' ', '').lower()\n for i in itertools.takewhile(lambda x: x not in message, profane_terms):\n not_in.append(i)\n\n # if the lengths of the two lists match, the term is not profane\n return len(not_in) != len(profane_terms)", "def __profanity_scan(self, title, text):\n profane_list = set(PROFANITY)\n text_words = nltk.word_tokenize(text)\n text_count = 0\n title_count = 0\n for word in text_words:\n if word.lower() in profane_list:\n text_count += 1\n for word in title.split():\n if word.lower() in profane_list:\n title_count += 1\n return title_count, text_count", "def main():\r\n _evaluative_test(5)\r\n _fuzz_test(1)\r\n _fuzz_test(1, 512)\r\n _fuzz_test(1, 1512)\r\n _fuzz_test(1000)\r\n _fuzz_test(1000, 512)\r\n _fuzz_test(1000, 4077)", "def main():\n check_slugs()\n check_identifiers()", "def check():\n hokusai.check()", "def check():", "def begin_subjunctiveimp_are_quiz():\n\n print(\"Get ready for a quiz.\\nInstructions: You'll be shown a verb and a pronoun. Conjugate it in the imperfect \"\n \"subjunctive tense\"\n \".\\n\")\n verb_good = False\n verb = \"\"\n pronoun = \"\"\n go_again = True\n while go_again:\n while not verb_good:\n verb = random.choice(are_verb_options)\n checker = functions.verb_ending_good(verb)\n if checker is True:\n verb_good = True\n pronoun_good = False\n while not pronoun_good:\n pronoun = random.choice(pronouns)\n if pronoun in pronouns:\n pronoun_good = True\n ending = functions.verb_ending(verb)\n if ending == \"are\":\n answer = are_subjunctiveimp_quiz(verb, pronoun)\n checker = input(f'Tense: Congiuntivo Imperfetto \\nVerb: {verb} \\nPronoun: {pronoun}...\\n')\n if answer == checker:\n print(\"Correct!\")\n else:\n print(f'Incorrect.\\nCorrect answer is {answer}')\n another = input(\"Go again? y/n\\n\").lower()\n if another != \"y\":\n go_again = False\n print(\"Quiz over.\")\n else:\n verb_good = False", "def begin_imperfect_are_quiz():\n\n print(\"Get ready for a quiz.\\nInstructions: You'll be shown a verb and a pronoun. Conjugate it in the imperfect \"\n \"tense\"\n \".\\n\")\n verb_good = False\n verb = \"\"\n pronoun = \"\"\n go_again = True\n while go_again:\n while not verb_good:\n verb = random.choice(are_verb_options)\n checker = functions.verb_ending_good(verb)\n if checker is True:\n verb_good = True\n pronoun_good = False\n while not pronoun_good:\n pronoun = random.choice(pronouns)\n if pronoun in pronouns:\n pronoun_good = True\n ending = functions.verb_ending(verb)\n if ending == \"are\":\n answer = are_imperfect_quiz(verb, pronoun)\n checker = input(f'Tense: Imperfetto \\nVerb: {verb} \\nPronoun: {pronoun}...\\n')\n if answer == checker:\n print(\"Correct!\")\n else:\n print(f'Incorrect.\\nCorrect answer is {answer}')\n another = input(\"Go again? y/n\\n\").lower()\n if another != \"y\":\n go_again = False\n print(\"Quiz over.\")\n else:\n verb_good = False", "def yes_straw_warts():\n check50.run(\"python3 palindrome.py\"\n ).stdout(\"Word? \", regex=False\n ).stdin(\"straw warts\", prompt=False\n ).stdout(\"YES\", regex=False\n ).exit()", "def test_sanitization_function(self):\n for (broken, clean) in self.needSanitization:\n self.assertEquals(clean, sanitizeFeedback(broken))\n\n for test in self.noSanitizingNeeded:\n self.assertEquals(test, sanitizeFeedback(test))", "def FirstPart(): \n return passwordChecker_incorrect(data)", "def test_naked_domain(create_user):\n emails = [\"[email protected]\"]\n patterns = [\"bar.com\"]\n assert create_user.preprocess_pattern(emails, patterns) == True\n fail_emails = [\"[email protected]\"]\n assert create_user.preprocess_pattern(fail_emails, patterns) == False", "def no_banana():\n check50.run(\"python3 palindrome.py\"\n ).stdout(\"Word? \", regex=False\n ).stdin(\"banana\", prompt=False\n ).stdout(\"NO\", regex=False\n ).exit()", "def begin_subjunctiveimp_ire_quiz():\n\n print(\"Get ready for a quiz.\\nInstructions: You'll be shown a verb and a pronoun. Conjugate it in the imperfect \"\n \"subjunctive tense\"\n \".\\n\")\n verb_good = False\n verb = \"\"\n pronoun = \"\"\n go_again = True\n while go_again:\n while not verb_good:\n verb = random.choice(ire_verb_options)\n checker = functions.verb_ending_good(verb)\n if checker is True:\n verb_good = True\n pronoun_good = False\n while not pronoun_good:\n pronoun = random.choice(pronouns)\n if pronoun in pronouns:\n pronoun_good = True\n ending = functions.verb_ending(verb)\n if ending == \"ire\":\n answer = ire_subjunctiveimp_quiz(verb, pronoun)\n checker = input(f'Tense: Congiuntivo Imperfetto \\nVerb: {verb} \\nPronoun: {pronoun}...\\n')\n if answer == checker:\n print(\"Correct!\")\n else:\n print(f'Incorrect.\\nCorrect answer is {answer}')\n another = input(\"Go again? y/n\\n\").lower()\n if another != \"y\":\n go_again = False\n print(\"Quiz over.\")\n else:\n verb_good = False", "def run_check_username(args, username):\n if not args.quiet:\n colored_username = Style.BRIGHT + username + Style.RESET_ALL\n print \"Checking '{}'..\".format(colored_username),\n result = check_username(username)\n if not result.valid:\n if not args.quiet:\n print Fore.RED + \"invalid\" + Style.RESET_ALL\n elif result.available:\n if not args.quiet:\n print Fore.GREEN + \"available\" + Style.RESET_ALL\n else:\n if not args.quiet:\n print Fore.RED + \"taken\" + Style.RESET_ALL\n if args.verbose and result.recommendations:\n for suggestion in result.recommendations:\n print \"\\t\" + Fore.YELLOW + suggestion + Style.RESET_ALL\n return result.available", "def begin_imperfect_ire_quiz():\n\n print(\"Get ready for a quiz.\\nInstructions: You'll be shown a verb and a pronoun. Conjugate it in the imperfect \"\n \"tense\"\n \".\\n\")\n verb_good = False\n verb = \"\"\n pronoun = \"\"\n go_again = True\n while go_again:\n while not verb_good:\n verb = random.choice(ire_verb_options)\n checker = functions.verb_ending_good(verb)\n if checker is True:\n verb_good = True\n pronoun_good = False\n while not pronoun_good:\n pronoun = random.choice(pronouns)\n if pronoun in pronouns:\n pronoun_good = True\n ending = functions.verb_ending(verb)\n if ending == \"ire\":\n answer = ire_imperfect_quiz(verb, pronoun)\n checker = input(f'Tense: Imperfetto \\nVerb: {verb} \\nPronoun: {pronoun}...\\n')\n if answer == checker:\n print(\"Correct!\")\n else:\n print(f'Incorrect.\\nCorrect answer is {answer}')\n another = input(\"Go again? y/n\\n\").lower()\n if another != \"y\":\n go_again = False\n print(\"Quiz over.\")\n else:\n verb_good = False", "def begin_subjunctivepr_are_quiz():\n\n print(\"Get ready for a quiz.\\nInstructions: You'll be shown a verb and a pronoun. Conjugate it in the present \"\n \"subjunctive tense\"\n \".\\n\")\n verb_good = False\n verb = \"\"\n pronoun = \"\"\n go_again = True\n while go_again:\n while not verb_good:\n verb = random.choice(are_verb_options)\n checker = functions.verb_ending_good(verb)\n if checker is True:\n verb_good = True\n pronoun_good = False\n while not pronoun_good:\n pronoun = random.choice(pronouns)\n if pronoun in pronouns:\n pronoun_good = True\n ending = functions.verb_ending(verb)\n if ending == \"are\":\n answer = are_subjunctivepr_quiz(verb, pronoun)\n checker = input(f'Tense: Congiuntivo Imperfetto \\nVerb: {verb} \\nPronoun: {pronoun}...\\n')\n if answer == checker:\n print(\"Correct!\")\n else:\n print(f'Incorrect.\\nCorrect answer is {answer}')\n another = input(\"Go again? y/n\\n\").lower()\n if another != \"y\":\n go_again = False\n print(\"Quiz over.\")\n else:\n verb_good = False", "def verify(self):", "def main():\n user_input = user_input_state()\n check_user_input(user_input)", "def yes_a():\n check50.run(\"python3 palindrome.py\"\n ).stdout(\"Word? \", regex=False\n ).stdin(\"a\", prompt=False\n ).stdout(\"YES\", regex=False\n ).exit()", "def testNSESanityChecks(self):\n self.assertEqual(100, self.c.get_species_richness())\n self.assertEqual(67, self.c2.get_species_richness())", "def yes_abba():\n check50.run(\"python3 palindrome.py\"\n ).stdout(\"Word? \", regex=False\n ).stdin(\"abba\", prompt=False\n ).stdout(\"YES\", regex=False\n ).exit()", "def init_func(unicore_fuzz, uc):\n pass", "def begin_subjunctiveimp_ere_quiz():\n\n print(\"Get ready for a quiz.\\nInstructions: You'll be shown a verb and a pronoun. Conjugate it in the imperfect \"\n \"subjunctive tense\"\n \".\\n\")\n verb_good = False\n verb = \"\"\n pronoun = \"\"\n go_again = True\n while go_again:\n while not verb_good:\n verb = random.choice(ere_verb_options)\n checker = functions.verb_ending_good(verb)\n if checker is True:\n verb_good = True\n pronoun_good = False\n while not pronoun_good:\n pronoun = random.choice(pronouns)\n if pronoun in pronouns:\n pronoun_good = True\n ending = functions.verb_ending(verb)\n if ending == \"ere\":\n answer = ere_subjunctiveimp_quiz(verb, pronoun)\n checker = input(f'Tense: Congiuntivo Imperfetto \\nVerb: {verb} \\nPronoun: {pronoun}...\\n')\n if answer == checker:\n print(\"Correct!\")\n else:\n print(f'Incorrect.\\nCorrect answer is {answer}')\n another = input(\"Go again? y/n\\n\").lower()\n if another != \"y\":\n go_again = False\n print(\"Quiz over.\")\n else:\n verb_good = False", "def yes_tenet():\n check50.run(\"python3 palindrome.py\"\n ).stdout(\"Word? \", regex=False\n ).stdin(\"tenet\", prompt=False\n ).stdout(\"YES\", regex=False\n ).exit()", "def check_auth():" ]
[ "0.5606062", "0.54521835", "0.5415538", "0.5389216", "0.53835446", "0.53203183", "0.52006304", "0.51796556", "0.512445", "0.51217043", "0.5049988", "0.49923262", "0.496746", "0.4898047", "0.48900947", "0.48552576", "0.48544675", "0.4842774", "0.48151883", "0.48013282", "0.48003983", "0.47937477", "0.47889596", "0.4765915", "0.47656935", "0.4762366", "0.47143617", "0.4710295", "0.47092742", "0.46962693" ]
0.60663515
0
Returns a DrsClient. This will delete any documents, aliases, or users made by this client after the test has completed. Currently the default user is the admin user Runs once per test.
def drs_client(indexd_server): try: user = create_user("user", "user") except Exception: user = ("user", "user") client = DrsClient(baseurl=indexd_server.baseurl, auth=user) yield client clear_database()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def admin_drf_client(admin_user):\n client = APIClient()\n client.force_authenticate(user=admin_user)\n return client", "def client(self):\n\n if self._client is None:\n self._client = self._get_client()\n return self._client", "def get_client(self):\n return self.client", "def CreateClient():\n client = gdata.docs.client.DocsClient(source=SampleConfig.APP_NAME)\n client.http_client.debug = SampleConfig.DEBUG\n # Authenticate the user with CLientLogin, OAuth, or AuthSub.\n try:\n gdata.sample_util.authorize_client(\n client,\n service=client.auth_service,\n source=client.source,\n scopes=client.auth_scopes\n )\n except gdata.client.BadAuthentication:\n exit('Invalid user credentials given.')\n except gdata.client.Error:\n exit('Login Error')\n return client", "def _get_client(self):\n credentials = service_account.Credentials.from_service_account_info(self.service_account_info)\n client = googleapiclient.discovery.build('container', 'v1', credentials=credentials)\n\n return client", "def user_drf_client(user):\n client = APIClient()\n client.force_authenticate(user=user)\n return client", "def client(self):\n\t\t# pylint: disable=invalid-name\n\t\treturn self._client", "def get_client():\n\n return MongoClientManager().client", "def delete_client():\n preserve_cache = request.args.get('preserve_cache', False)\n delete_client(g.client_id, preserve_cache)\n return jsonify({'Success': True})", "def _get_client():\n\n return datastore.Client()", "def _get_client():\n\n return datastore.Client()", "def decapod_client(get_decapod_client):\n return get_decapod_client()", "def user_client(user):\n client = Client()\n client.force_login(user)\n return client", "def test_delete_o_auth_client(self):\n pass", "def _client(self):\n\n if self._suds_client is None:\n self._suds_client = suds.client.Client(SERVICE_WSDL_URL)\n # Add SOAP Security tokens\n self.set_security_token()\n\n return self._suds_client", "def get_client():\n return storage.Client(project=project_id)", "def test_delete_client(self):\n pass", "def staff_client(staff_user):\n client = Client()\n client.force_login(staff_user)\n return client", "def get_client(self):\n token = self.get_access_token()\n if self.client is None:\n credentials = AccessTokenCredentials(token, 'vetware/1.0')\n # credentials = SignedJwtAssertionCredentials(self.email, self.private_key,\n # \"https://www.googleapis.com/auth/calendar\")\n http = credentials.authorize(Http())\n self.client = build('calendar', 'v3', http=http)\n return self.client", "def _get_user_client(self):\n return api.OAuthClient(settings.CLIENT_ID, settings.CLIENT_SECRET, settings.USER, settings.PASSWORD)", "def affirm_client(self):\n if self.user_name and self.password and self.host:\n uri = f'mongodb://{self.user_name}:{self.password}@\\\n {self.host}/{self.db_name}'\n client = MongoClient(uri)\n elif self.host:\n client = MongoClient(self.host)\n else:\n client = MongoClient()\n return client[self.db_name]", "def client():\n return Client(**common_data.AUTH_ARGS)", "def client(self):\n\n return self._client", "def base_client(self):\n return self._client", "def test_05_delete_client(self):\n try:\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n client.host_name = test_str[1]\n client.user_name = test_str[2]\n ClientsUnitTest._client_dao.add(client)\n self.assertTrue(\n ClientsUnitTest._client_dao.get_client(client.user_id))\n\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n self.assertTrue(ClientsUnitTest._client_dao.delete(client))\n\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n self.assertTrue(ClientsUnitTest._client_dao.delete(client))\n\n except ClientAlreadyExistsException as error:\n print(error.get_message())\n\n except ClientNotFoundException as error:\n print(error.get_message())\n\n except DBException as error:\n print(error.get_message())", "def Client(self):\n return self._client", "def client(self):\n return self._client", "def get_client():\n client_class = _import_by_path(settings.REDISIO_CLIENT_CLASS)\n return client_class(host=settings.REDISIO_HOST,\n port=settings.REDISIO_PORT,\n db=settings.REDISIO_DB)", "def test_client_delete(self, mock_input, mock_pass):\n # Patch username and password.\n mock_input.return_value = \"user\"\n mock_pass.return_value = \"pass\"\n\n # Instantiate Agave object making reference to local mock server.\n local_uri = \"http://localhost:{port}/\".format(port=self.mock_server_port)\n ag = Agave(api_server=local_uri)\n ag.client_name = \"client-name\"\n ag.api_key = \"some api key\"\n ag.api_secret = \"some secret\"\n\n # Create client.\n ag.clients_delete()\n\n assert ag.api_key == \"\"\n assert ag.api_secret == \"\"", "def client(self) -> 'BaseClient':\n return self" ]
[ "0.62618375", "0.5801828", "0.57856905", "0.57658434", "0.5730438", "0.5724532", "0.56575555", "0.5643841", "0.5600832", "0.5596945", "0.5596945", "0.5586577", "0.55524814", "0.55345035", "0.55280924", "0.55232245", "0.5517509", "0.5496027", "0.54853994", "0.545057", "0.54008114", "0.5388988", "0.538087", "0.5368203", "0.5366162", "0.5358505", "0.5334062", "0.5308237", "0.5296569", "0.52824175" ]
0.61927664
1
send 200 OK response, and set server.stop to True
def do_QUIT(self): self.send_response(200) self.end_headers() self.server.stop = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serve(self):\n\t\tself.keep_running=1\n\t\tif self.debug:\n\t\t\tprint \"server started\"\n\t\ttry:\n\t\t\twhile self.keep_running:\n\t\t\t\tself.handle_request()\n\t\tfinally:\n\t\t\tif self.debug:\n\t\t\t\tprint \"server finished\"\n\t\t\tself.keep_running=0\n\t\t\tself.close()", "def send_200_resp(self, response, content_type):\n self.send_response(200)\n self.send_header(\"Content-type\", content_type)\n if response is not None:\n resplen = str(len(response))\n else:\n resplen = 0\n self.send_header(\"Content-length\", resplen)\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n if response is not None:\n self.wfile.write(response)", "def serve_response(self):\n try:\n print self.path\n response_info = self.responses_qeues[self.path.split(\"?\").pop(0)].pop(0)\n print response_info\n except:\n self.send_response(404)\n self.end_headers()\n return\n\n \"\"\"If response_info has also a delay set, wait the time specified.\"\"\"\n if \"delay\" in response_info:\n time.sleep(response_info[\"delay\"])\n\n \"\"\"Send the status code.\"\"\"\n status_code = response_info[\"status_code\"]\n self.send_response(status_code)\n\n \"\"\"Send specific headers, if any.\"\"\"\n if \"headers\" in response_info:\n headers = response_info[\"headers\"]\n for header_name in headers.keys():\n self.send_header(header_name, headers.get(header_name))\n self.end_headers()\n\n \"\"\"Send the body, if any.\"\"\"\n if \"body\" in response_info:\n body = response_info[\"body\"]\n self.wfile.write(json.dumps(body))", "def _HandleShutdown(self):\n self.send_response(httplib.OK)\n self.send_header('Content-Type', 'text/plain')\n self.end_headers()\n self.wfile.write('API Server Quitting')\n self.server.shutdown()", "def serve_forever(self, unused_parameter=0.5):\r\n self.stop = False\r\n while not self.stop:\r\n self.handle_request()", "def _send_response(self, request):\n request_line, headers = split_http_request(request)\n if DEBUG_LEVEL > 1:\n print \"Request: {}\\nHeaders: {}\".format(request_line, headers)\n\n request = HTTPRequest.HTTPRequest(request_line, headers, DEBUG_LEVEL)\n\n uri = request.get_uri_with_no_params()\n uri = uri[1:] if uri[0] == \"/\" else uri\n\n if uri in server_functions.AVAILABLE_FUNCTIONS.keys():\n response, flag = server_functions.\\\n AVAILABLE_FUNCTIONS[uri](request.get_params())\n self._client.send(response.build_response())\n return flag\n\n result = self._check_status_errors(request)\n if result == -1:\n return False\n elif result == 1:\n return True\n\n full_file_path = self._get_full_path(request)\n\n requested_file = open(full_file_path, \"r\")\n data = requested_file.read()\n requested_file.close()\n\n headers = HTTPHeaders.HTTPHeaders()\n public_response_functions.add_default_headers(headers)\n headers[\"Content-Length\"] = str(len(data))\n\n response = HTTPResponse.HTTPResponse(version=1.0, status_code=200,\n phrase=\"OK\", headers=headers)\n self._client.send(response.build_response() + data)\n return True", "def end():\n\tdata = bottle.request.json\n\t#print(\"END:\", json.dumps(data))\n\treturn HTTPResponse(status=200)", "def server_exit():\n return", "def ping():\r\n return make_response(\"pong!\", 200)", "def ping_response():\n\n return Response(\"ok\", status=200)", "def exit(self):\n self._status = \"\"\n self._sock.settimeout(1.0)\n self._sock.sendto(bytes(\"bla\", \"utf-8\"), (self._cfg.host, self._cfg.port))", "def root():\n return Response(\"It's alive!\", status=200)", "def continue_server():\n update_server_status({'ready': True})", "def test_stop_interrupts_serve():\n httpserver = HTTPServer(\n bind_addr=(ANY_INTERFACE_IPV4, EPHEMERAL_PORT),\n gateway=Gateway,\n )\n\n httpserver.prepare()\n serve_thread = threading.Thread(target=httpserver.serve)\n serve_thread.start()\n\n serve_thread.join(0.5)\n assert serve_thread.is_alive()\n\n httpserver.stop()\n\n serve_thread.join(0.5)\n assert not serve_thread.is_alive()", "def ping():\n\treturn HTTPResponse(status=200)", "def respond(self):\r\n response = self.wsgi_app(self.environ, self.start_response)\r\n try:\r\n for chunk in response:\r\n # \"The start_response callable must not actually transmit\r\n # the response headers. Instead, it must store them for the\r\n # server or gateway to transmit only after the first\r\n # iteration of the application return value that yields\r\n # a NON-EMPTY string, or upon the application's first\r\n # invocation of the write() callable.\" (PEP 333)\r\n if chunk:\r\n self.write(chunk)\r\n stackless.schedule()\r\n finally:\r\n if hasattr(response, \"close\"):\r\n response.close()\r\n if (self.ready and not self.sent_headers):\r\n self.sent_headers = True\r\n self.send_headers()\r\n if self.chunked_write:\r\n self.sendall(\"0\\r\\n\\r\\n\")", "def write_empty_response(self, status_code):\n self.send_response(status_code)\n self.end_headers()", "def _send_response(self, req, code=200, body='', headers=None):\n req.send_response(code)\n headers = headers or {}\n headers.setdefault('Content-Length', len(body))\n for header in headers:\n req.send_header(header, headers[header])\n req.write(body)\n raise RequestDone", "def send_response(self, code, message=None, size='-'):\n BaseHTTPRequestHandler.send_response(self, code, message)\n BaseHTTPRequestHandler.log_request(self, code, size)", "def serve_http(handle, *, port=8080, address='127.0.0.1', start:Optional[str]=''):\n\tlog_lines = {\n\t\tcode: \"<-- %d %s\"%(code, str(reason, 'UTF-8', errors='replace'))\n\t\tfor code, reason in Response.REASON.items()\n\t}\n\tdef reply(response:Response):\n\t\ttry: client.sendall(response.content)\n\t\texcept: log.exception(\"Failed to send.\")\n\t\telse: log.info(log_lines[response.code])\n\t\n\tserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tserver.bind((address, port))\n\tserver.listen(1)\n\tif start is not None:\n\t\tos.startfile('http://%s:%d/%s'%(address, port, start.lstrip('/')))\n\tlog.info(\"Listening...\")\n\talive = True\n\twhile alive:\n\t\t(client, address) = server.accept()\n\t\tlog.info(\"Accepted...\")\n\t\ttry: request = Request.from_reader(ClientReader(client))\n\t\texcept socket.timeout: log.info(\"Timed out.\") # No reply; just hang up and move on.\n\t\texcept ProtocolError:\n\t\t\tlog.warning(\"Protocol Error\")\n\t\t\treply(Response.generic(code=400))\n\t\telse:\n\t\t\ttry:\n\t\t\t\tresponse = handle(request)\n\t\t\t\tif not isinstance(response, Response): response = Response(response)\n\t\t\t\talive = not response.shut_down\n\t\t\texcept:\n\t\t\t\tlog.exception(\"During %s %s\", request.command, request.uri)\n\t\t\t\tresponse = Response.from_exception(request)\n\t\t\treply(response)\n\t\tclient.shutdown(socket.SHUT_RDWR)\n\tlog.info(\"Shutting Down.\")", "def _send_response(self, content, code=200):\n if content:\n self._send_head(content, code)\n if not self._header_only:\n try:\n self.wfile.write(content)\n except socket.error:\n # clients like to stop reading after they got a 404\n pass\n else:\n self._send_internal_server_error()", "def do_HANDLE_SUCCESSFUL_REQUEST(self, statusResponse: str = \"Success.\"):\n\n self.send_response(200)\n self.end_headers()\n self.do_SEND_SIMPLE_RESPONSE('200 - ' + statusResponse)", "def run(self):\r\n self.rpc_server.serve_forever(0.5)", "def _send_immediate_response(self, success, message=\"\"):\r\n\r\n # Send the response indicating success/failure\r\n response_str = json.dumps(\r\n {'return_code': 0 if success else 1, 'content': message}\r\n )\r\n\r\n if self._is_grade_request():\r\n self.send_response(\r\n 200, content=response_str, headers={'Content-type': 'text/plain'}\r\n )\r\n self.log_message(\"XQueue: sent response {0}\".format(response_str))\r\n\r\n else:\r\n self.send_response(500)", "def respond(self, status, body):\n self.send_response(status)\n self.send_header(\"Content-type\", \"text/plain\")\n self.end_headers()\n self.wfile.write(body.encode())", "def send_resp(self):\n self.n_send_resp += 1", "def main():\n\n httpd = BaseHTTPServer.HTTPServer(\n ('localhost', int(ADDON.getSetting(\"port\"))),\n StupidHTTPRequestHandler)\n httpd_thread = threading.Thread(target=httpd.serve_forever)\n httpd_thread.start()\n\n monitor = xbmc.Monitor()\n \n while not monitor.abortRequested():\n # Sleep/wait for abort for 10 seconds\n if monitor.waitForAbort(10):\n # Abort was requested while waiting. We should exit\n break\n\n httpd.shutdown()\n httpd.server_close()", "def do_GET(self):\n self.send_response(200)\n self.send_header('Content-type','text/html')\n self.end_headers()\n # Send the message to browser\n self.wfile.write(\"Hello from server!\")\n return", "def _serve(path, port):\n print(\"running HTTP server on port %d...\" % port)\n print('use Ctrl-Break to stop webserver')\n os.chdir(path)\n handler = http.server.SimpleHTTPRequestHandler\n server = socketserver.TCPServer(('', port), handler)\n server.serve_forever()", "def do_SEND_SIMPLE_RESPONSE(self, response: str):\n self.wfile.write(response.encode('utf-8'))" ]
[ "0.6593527", "0.6580389", "0.65228635", "0.6458139", "0.6449098", "0.62513536", "0.6202599", "0.6198447", "0.6197758", "0.6183017", "0.6144795", "0.61393744", "0.6138087", "0.6123219", "0.61206925", "0.6105918", "0.61005306", "0.60456544", "0.6036564", "0.60222226", "0.60135484", "0.59973854", "0.5996558", "0.5987994", "0.59681743", "0.59528697", "0.5937254", "0.59135014", "0.5903669", "0.5901916" ]
0.71259165
0
emulate post request with get handler, we don't need the data
def do_POST(self): self.do_GET()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self, request, *args, **kwargs):\n return self.get(request, *args, **kwargs)", "def get(self):\n self.post()", "def post(self, *args, **kwargs):\n return self.handle_post_request()", "def post(self, request):\n pass", "def get(self):\n self.post()", "def get(self):\n self.post()", "def post(self):", "def _post(self, *args, **kwargs):\n return self._request('post', *args, **kwargs)", "def post(self, *args, **kwargs):\n self.request(\"post\", *args, **kwargs)", "def post(self):\n self.get_or_post(method='POST')", "def post():\n pass", "def do_POST(self): # pylint: disable=invalid-name\n self.handle_request()", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self, *path, **data):\n\t\treturn self.request('POST', *path, **data)", "def post(self, *args, **kwargs):\n return self._hit(\"POST\", *args, **kwargs)", "def http_method_post():\n return 'POST'", "def test_post(self):\n return self.doRequest(self.url, method=\"POST\", body=self.input)" ]
[ "0.78131866", "0.7791229", "0.7586552", "0.7522953", "0.7506268", "0.7506268", "0.74753666", "0.7460196", "0.7440879", "0.73348796", "0.7301347", "0.7189343", "0.7170561", "0.7170561", "0.7170561", "0.7170561", "0.7170561", "0.7170561", "0.7170561", "0.7170561", "0.7170561", "0.7170561", "0.7170561", "0.7170561", "0.7170561", "0.7170561", "0.7035279", "0.6980332", "0.6856018", "0.6848982" ]
0.81476486
0
Parse a request (internal). The request should be stored in self.raw_requestline; the results are in self.command, self.path, self.request_version and self.http_request_headers. Return True for success, False for failure; on failure, an error is sent back.
def parse_request(self): self.command = None # set in case of error on the first line self.request_version = version = self.default_request_version self.close_connection = 1 requestline = self.raw_requestline # hack: quick and dirty fix for doubled request with bad data ok = 0 if requestline.startswith("GET"): ok += 1 if requestline.startswith("POST"): ok += 1 if requestline.startswith("QUIT"): ok += 1 if ok == 0: return False # hack ends here requestline = requestline.rstrip('\r\n') self.requestline = requestline words = requestline.split() if len(words) == 3: command, path, version = words if version[:5] != 'HTTP/': self.send_error(400, "Bad request version (%r)" % version) return False try: base_version_number = version.split('/', 1)[1] version_number = base_version_number.split(".") # RFC 2145 section 3.1 says there can be only one "." and # - major and minor numbers MUST be treated as # separate integers; # - HTTP/2.4 is a lower version than HTTP/2.13, which in # turn is lower than HTTP/12.3; # - Leading zeros MUST be ignored by recipients. if len(version_number) != 2: raise ValueError version_number = int(version_number[0]), int(version_number[1]) except (ValueError, IndexError): self.send_error(400, "Bad request version (%r)" % version) return False if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1": self.close_connection = 0 if version_number >= (2, 0): self.send_error(505, "Invalid HTTP Version (%s)" % base_version_number) return False elif len(words) == 2: command, path = words self.close_connection = 1 if command != 'GET': self.send_error(400, "Bad HTTP/0.9 request type (%r)" % command) return False elif not words: return False else: self.send_error(400, "Bad request syntax (%r)" % requestline) return False self.command, self.path, self.request_version = command, path, version # Examine the http_request_headers and look for a Connection directive self.headers = self.MessageClass(self.rfile, 0) conntype = self.headers.get('Connection', "") if conntype.lower() == 'close': self.close_connection = 1 elif conntype.lower() == 'keep-alive' and self.protocol_version >= "HTTP/1.1": self.close_connection = 0 return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_request(self):\r\n # HTTP/1.1 connections are persistent by default. If a client\r\n # requests a page, then idles (leaves the connection open),\r\n # then rfile.readline() will raise socket.error(\"timed out\").\r\n # Note that it does this based on the value given to settimeout(),\r\n # and doesn't need the client to request or acknowledge the close\r\n # (although your TCP stack might suffer for it: cf Apache's history\r\n # with FIN_WAIT_2).\r\n request_line = self.rfile.readline()\r\n if not request_line:\r\n # Force self.ready = False so the connection will close.\r\n self.ready = False\r\n return\r\n \r\n if request_line == \"\\r\\n\":\r\n # RFC 2616 sec 4.1: \"...if the server is reading the protocol\r\n # stream at the beginning of a message and receives a CRLF\r\n # first, it should ignore the CRLF.\"\r\n # But only ignore one leading line! else we enable a DoS.\r\n request_line = self.rfile.readline()\r\n if not request_line:\r\n self.ready = False\r\n return\r\n \r\n environ = self.environ\r\n \r\n method, path, req_protocol = request_line.strip().split(\" \", 2)\r\n environ[\"REQUEST_METHOD\"] = method\r\n \r\n # path may be an abs_path (including \"http://host.domain.tld\");\r\n scheme, location, path, params, qs, frag = urlparse(path)\r\n \r\n if frag:\r\n self.simple_response(\"400 Bad Request\",\r\n \"Illegal #fragment in Request-URI.\")\r\n return\r\n \r\n if scheme:\r\n environ[\"wsgi.url_scheme\"] = scheme\r\n if params:\r\n path = path + \";\" + params\r\n \r\n environ[\"SCRIPT_NAME\"] = \"\"\r\n \r\n # Unquote the path+params (e.g. \"/this%20path\" -> \"this path\").\r\n # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2\r\n #\r\n # But note that \"...a URI must be separated into its components\r\n # before the escaped characters within those components can be\r\n # safely decoded.\" http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2\r\n atoms = [unquote(x) for x in quoted_slash.split(path)]\r\n path = \"%2F\".join(atoms)\r\n environ[\"PATH_INFO\"] = path\r\n \r\n # Note that, like wsgiref and most other WSGI servers,\r\n # we unquote the path but not the query string.\r\n environ[\"QUERY_STRING\"] = qs\r\n \r\n # Compare request and server HTTP protocol versions, in case our\r\n # server does not support the requested protocol. Limit our output\r\n # to min(req, server). We want the following output:\r\n # request server actual written supported response\r\n # protocol protocol response protocol feature set\r\n # a 1.0 1.0 1.0 1.0\r\n # b 1.0 1.1 1.1 1.0\r\n # c 1.1 1.0 1.0 1.0\r\n # d 1.1 1.1 1.1 1.1\r\n # Notice that, in (b), the response will be \"HTTP/1.1\" even though\r\n # the client only understands 1.0. RFC 2616 10.5.6 says we should\r\n # only return 505 if the _major_ version is different.\r\n rp = int(req_protocol[5]), int(req_protocol[7])\r\n server_protocol = environ[\"ACTUAL_SERVER_PROTOCOL\"]\r\n sp = int(server_protocol[5]), int(server_protocol[7])\r\n if sp[0] != rp[0]:\r\n self.simple_response(\"505 HTTP Version Not Supported\")\r\n return\r\n # Bah. \"SERVER_PROTOCOL\" is actually the REQUEST protocol.\r\n environ[\"SERVER_PROTOCOL\"] = req_protocol\r\n self.response_protocol = \"HTTP/%s.%s\" % min(rp, sp)\r\n \r\n # If the Request-URI was an absoluteURI, use its location atom.\r\n if location:\r\n environ[\"SERVER_NAME\"] = location\r\n \r\n # then all the http headers\r\n try:\r\n self.read_headers()\r\n except ValueError, ex:\r\n self.simple_response(\"400 Bad Request\", repr(ex.args))\r\n return\r\n \r\n creds = environ.get(\"HTTP_AUTHORIZATION\", \"\").split(\" \", 1)\r\n environ[\"AUTH_TYPE\"] = creds[0]\r\n if creds[0].lower() == 'basic':\r\n user, pw = base64.decodestring(creds[1]).split(\":\", 1)\r\n environ[\"REMOTE_USER\"] = user\r\n \r\n # Persistent connection support\r\n if self.response_protocol == \"HTTP/1.1\":\r\n if environ.get(\"HTTP_CONNECTION\", \"\") == \"close\":\r\n self.close_connection = True\r\n else:\r\n # HTTP/1.0\r\n if environ.get(\"HTTP_CONNECTION\", \"\") != \"Keep-Alive\":\r\n self.close_connection = True\r\n \r\n # Transfer-Encoding support\r\n te = None\r\n if self.response_protocol == \"HTTP/1.1\":\r\n te = environ.get(\"HTTP_TRANSFER_ENCODING\")\r\n if te:\r\n te = [x.strip().lower() for x in te.split(\",\") if x.strip()]\r\n \r\n read_chunked = False\r\n \r\n if te:\r\n for enc in te:\r\n if enc == \"chunked\":\r\n read_chunked = True\r\n else:\r\n # Note that, even if we see \"chunked\", we must reject\r\n # if there is an extension we don't recognize.\r\n self.simple_response(\"501 Unimplemented\")\r\n self.close_connection = True\r\n return\r\n \r\n if read_chunked:\r\n if not self.decode_chunked():\r\n return\r\n \r\n # From PEP 333:\r\n # \"Servers and gateways that implement HTTP 1.1 must provide\r\n # transparent support for HTTP 1.1's \"expect/continue\" mechanism.\r\n # This may be done in any of several ways:\r\n # 1. Respond to requests containing an Expect: 100-continue request\r\n # with an immediate \"100 Continue\" response, and proceed normally.\r\n # 2. Proceed with the request normally, but provide the application\r\n # with a wsgi.input stream that will send the \"100 Continue\"\r\n # response if/when the application first attempts to read from\r\n # the input stream. The read request must then remain blocked\r\n # until the client responds.\r\n # 3. Wait until the client decides that the server does not support\r\n # expect/continue, and sends the request body on its own.\r\n # (This is suboptimal, and is not recommended.)\r\n #\r\n # We used to do 3, but are now doing 1. Maybe we'll do 2 someday,\r\n # but it seems like it would be a big slowdown for such a rare case.\r\n if environ.get(\"HTTP_EXPECT\", \"\") == \"100-continue\":\r\n self.simple_response(100)\r\n \r\n self.ready = True", "def process(self, raw: bytes) -> Tuple[bool, bytes]:\n line, raw = find_http_line(raw)\n if line is None:\n return False, raw\n\n if self.state == httpParserStates.INITIALIZED:\n self.process_line(line)\n self.state = httpParserStates.LINE_RCVD\n elif self.state in (httpParserStates.LINE_RCVD, httpParserStates.RCVING_HEADERS):\n if self.state == httpParserStates.LINE_RCVD:\n # LINE_RCVD state is equivalent to RCVING_HEADERS\n self.state = httpParserStates.RCVING_HEADERS\n if line.strip() == b'': # Blank line received.\n self.state = httpParserStates.HEADERS_COMPLETE\n else:\n self.process_header(line)\n\n # When connect request is received without a following host header\n # See\n # `TestHttpParser.test_connect_request_without_host_header_request_parse`\n # for details\n if self.state == httpParserStates.LINE_RCVD and \\\n self.type == httpParserTypes.RESPONSE_PARSER and \\\n raw == CRLF:\n self.state = httpParserStates.COMPLETE\n # When raw request has ended with \\r\\n\\r\\n and no more http headers are expected\n # See `TestHttpParser.test_request_parse_without_content_length` and\n # `TestHttpParser.test_response_parse_without_content_length` for details\n elif self.state == httpParserStates.HEADERS_COMPLETE and \\\n self.type == httpParserTypes.REQUEST_PARSER and \\\n self.method != httpMethods.POST and \\\n self.bytes.endswith(CRLF * 2):\n self.state = httpParserStates.COMPLETE\n elif self.state == httpParserStates.HEADERS_COMPLETE and \\\n self.type == httpParserTypes.REQUEST_PARSER and \\\n self.method == httpMethods.POST and \\\n (b'content-length' not in self.headers or\n (b'content-length' in self.headers and\n int(self.headers[b'content-length'][1]) == 0)) and \\\n self.bytes.endswith(CRLF * 2):\n self.state = httpParserStates.COMPLETE\n\n return len(raw) > 0, raw", "def parse_request(first_line):\n command = None # set in case of error on the first line\n request_version = version = default_request_version\n close_connection = 1\n path = \"\"\n requestline = first_line.rstrip('\\r\\n')\n words = requestline.split()\n if len(words) == 3:\n command, path, version = words\n if version[:5] != 'HTTP/':\n easyHandler.send_error(400, \"Bad request version (%r)\" % version)\n return False\n try:\n base_version_number = version.split('/', 1)[1]\n version_number = base_version_number.split(\".\")\n # RFC 2145 section 3.1 says there can be only one \".\" and\n # - major and minor numbers MUST be treated as\n # separate integers;\n # - HTTP/2.4 is a lower version than HTTP/2.13, which in\n # turn is lower than HTTP/12.3;\n # - Leading zeros MUST be ignored by recipients.\n if len(version_number) != 2:\n raise ValueError\n version_number = int(version_number[0]), int(version_number[1])\n except (ValueError, IndexError):\n easyHandler.send_error(400, \"Bad request version (%r)\" % version)\n return False\n if version_number >= (1, 1) and protocol_version >= \"HTTP/1.1\":\n close_connection = 0\n if version_number >= (2, 0):\n easyHandler.send_error(505,\n \"Invalid HTTP Version (%s)\" % base_version_number)\n return False\n elif len(words) == 2:\n command, path = words\n close_connection = 1\n if command != 'GET':\n easyHandler.send_error(400, \"Bad HTTP/0.9 request type (%r)\" % command)\n return False\n elif not words:\n return False\n else:\n easyHandler.send_error(400, \"Bad request syntax (%r)\" % requestline)\n return easyRequest(command, path, version)", "def process_request(self, request):\n self.req = request\n command = self.get_command()\n file_handler = filehandler.FileHandler(command)\n file_handler.handle_command()\n return command.result", "async def read_request_line(self):\n while True:\n rl = await self.reader.readline()\n # skip empty lines\n if rl == b'\\r\\n' or rl == b'\\n':\n continue\n break\n rl_frags = rl.split()\n if len(rl_frags) != 3:\n raise HTTPException(400)\n self.method = rl_frags[0]\n url_frags = rl_frags[1].split(b'?', 1)\n self.path = url_frags[0]\n if len(url_frags) > 1:\n self.query_string = url_frags[1]", "def handle_request_complete(self) -> bool:\n try:\n request_raw = self.read_socket(self.conn, self.request_chunk_size, self.request_max_size)\n logging.debug('%s -> %s', self.conn_addr, request_raw[:100])\n # If conn.recv() returns an empty bytes object, b'', then the client closed the connection and the loop is terminated.\n if not request_raw:\n logging.info('%s - closed by client.', self.conn_addr)\n return True\n self.request = HTTPRequest.parse_request(request_raw, self.docs_root, self.generate_index, self.conn_timeout)\n logging.info('%s -> %s %s %s', self.conn_addr, self.request.protocol, self.request.command, self.request.rpath)\n do_command = 'do_' + self.request.command.lower()\n if hasattr(self, do_command):\n return getattr(self, do_command)() # todo: add support for Expect directive https://developer.mozilla.org/ru/docs/Web/HTTP/Status/100\n else:\n return self.send_error(HTTPStatus.METHOD_NOT_ALLOWED, f'Method {self.request.command} not allowed.', [('Allow', 'GET, HEAD')])\n except HTTPException as exc:\n return self.send_error(exc.status, exc.details)\n except socket.timeout:\n return self.send_error(HTTPStatus.REQUEST_TIMEOUT)\n except BaseException as exc: # pylint: disable=broad-except\n return self.send_error(HTTPStatus.INTERNAL_SERVER_ERROR, repr(exc))\n return True", "def _parse_request_line(line):\n # Up to the first space is the method.\n index0 = line.index(SPACE)\n method = line[: index0]\n # Starting from the first space, up to the next space is the path.\n index1 = line.index(SPACE, index0 + 1)\n path = line[index0 + 1 : index1]\n # The remainder is the protocol version.\n http_version = line[index1 + 1 :]\n # Make sure it's the protocol version we recognize.\n assert http_version == HTTP_VERSION\n return method, path", "def parse_request(self, request):\n response=''\n http_code = 200\n\n request_line = request.splitlines()[0]\n request_method, path, request_version = request_line.split()\n\n #Try to split path into it's components: the operation requested and the keyvalue\n try:\n request_op, request_keyvalue = path.split('?')\n request_op = request_op[1:]\n\n #If request is a get we split in a different order than if it's a set\n if request_op == 'get':\n request_value, request_key = request_keyvalue.split('=')\n response, http_code = self.get_value(request_key)\n elif request_op == 'set':\n request_key, request_value = request_keyvalue.split('=')\n response, http_code = self.set_value(request_key, request_value)\n else:\n response = 'Unknown operation in URL. Must be either GET or SET.'\n http_code = 400\n\n except ValueError: #Catch any paths that don't match the form we're interested in\n response = dedent(\"\"\"Incorrect path (%s)\n Requested URL must take the form http://%s:%s/[operation]?[value]\"\"\" % (path, self.server_address, self.server_port))\n http_code = 400\n return response, http_code\n\n return response, http_code", "def validate_http_request(request):\r\n if request != b'':\r\n # Divide the request line: [method, sp, url, version, cr lf]\r\n request = request.decode().split('\\r')[0]\r\n method = request.split()[0]\r\n url = request.split()[1]\r\n version = request.split()[2]\r\n if method == METHOD and version == VERSION:\r\n return True, url\r\n else:\r\n return False, None\r\n else:\r\n return True, None", "def process_request(self, request):\n # if the site is disabled, then it's like it's invisible, so it's not a match for this site\n if (not self.get_isenabled()):\n return False\n\n # if the request does not match site prefix, then it's not a match for this site\n if (not self.does_request_match_siteprefix(request)):\n return False;\n\n # ok, looks like it was meant for us\n\n # before we start a request we might have stuff to do\n self.process_request_starts(request)\n\n # log it\n #self.logevent(EInfo(\"Request URL: {0} from {1}.\".format(request.get_fullurlpath_original(), request.get_remote_addr())),request=request)\n self.logevent(EInfo(\"Request URL: {0} from {1}.\".format(request.get_fullurlpath_original(), request.get_remote_addr())))\n\n # handle the request\n ishandled = self.comp('routemanager').process_request(self, request)\n\n # after we end a request we might have stuff to do (this might include, for example, flushing the database)\n self.process_request_ends(request, ishandled)\n\n # return whether we handled it\n return ishandled", "def parse_request(cls, request_raw: bytes, docs_root: Path, generate_index: bool = False, timeout: float = None) -> typing.NamedTuple:\n if not request_raw:\n raise HTTPException(HTTPStatus.BAD_REQUEST, 'Empty request')\n\n lines = request_raw.decode(HTTP_DEFAULT_ENCODING).rstrip(END_LINE).split(END_LINE)\n\n request_line_parts = lines[0].split()\n if len(request_line_parts) not in [2, 3]:\n raise HTTPException(HTTPStatus.BAD_REQUEST, f'Invalid request line format: {lines[0]}')\n\n command, uri = request_line_parts[:2]\n\n if len(request_line_parts) == 2 and command != 'GET':\n raise HTTPException(HTTPStatus.BAD_REQUEST, f'Bad HTTP/0.9 request type: {command}')\n\n protocol = Protocol(DEFAULT_REQUEST_VERSION if len(request_line_parts) == 2 else request_line_parts[-1])\n\n rpath = unquote(urlsplit(uri).path).lstrip('/')\n logging.debug('%s wanted.', rpath)\n if any(part in rpath.split('/') for part in ['~', '.', '..']):\n raise HTTPException(HTTPStatus.FORBIDDEN, f'Forbidden path format: {rpath}')\n abs_path = docs_root.joinpath(rpath)\n if abs_path.is_file() and rpath.endswith('/'):\n raise HTTPException(HTTPStatus.NOT_FOUND)\n if abs_path.is_dir() and not generate_index:\n abs_path = abs_path.joinpath(INDEX_FILE_NAME)\n if not abs_path.exists():\n raise HTTPException(HTTPStatus.NOT_FOUND)\n\n headers = OrderedDict(list((key.title(), value.strip().lower()) for key, _, value in (line.partition(':') for line in lines[1:] if line)))\n return cls(\n protocol=protocol,\n command=command,\n rpath=rpath,\n abs_path=abs_path,\n docs_root=docs_root,\n headers=headers,\n timeout=timeout,\n ctime=time.time(),\n body=None)", "def handle_one_request(self):\n import socket\n try:\n self.raw_requestline = self.rfile.readline(65537)\n if len(self.raw_requestline) > 65536:\n self.requestline = ''\n self.request_version = ''\n self.command = ''\n self.send_error(414)\n return\n if not self.raw_requestline:\n self.close_connection = 1\n return\n if not self.parse_request():\n # An error code has been sent, just exit\n return\n\n ##### Customization\n # origin\n \"\"\"\n mname = 'do_' + self.command\n if not hasattr(self, mname):\n self.send_error(501, \"Unsupported method (%r)\" % self.command)\n return\n method = getattr(self, mname)\n method()\n \"\"\"\n # now\n #import pdb; pdb.set_trace()\n self.delegate(self.get_environ(), self.gen_response, self.send_error)\n\n self.wfile.flush() #actually send the response if not already done.\n except socket.timeout, e:\n #a read or a write timed out. Discard this connection\n self.log_error(\"Request timed out: %r\", e)\n self.close_connection = 1\n return", "def is_valid_http_request(split_request_header: list) -> bool:\n if len(split_request_header) < 3:\n return False\n if split_request_header[0] not in HttpServer.REQUESTS:\n return False\n if split_request_header[2] != \"HTTP/1.1\":\n return False\n if split_request_header[0] == \"PUT\" or split_request_header[0] == \"POST\":\n # see if there aren't any directories preceding the file\n occurrences = re.findall(\"/\", split_request_header[1])\n if len(occurrences) > 1:\n return False\n\n return True\n\n # return len(split_request_header) < 3 or \\\n # split_request_header[0] not in HttpServer.REQUESTS or \\\n # split_request_header[2] != \"HTTP/1.1\"", "def parse_request(self, request):\n request.process_inputs()", "def handle(self):\n\t\ttry:\n\t\t\trequest_line = self.rfile.readline().decode(\"ascii\")\n\t\t\tassert request_line.endswith(\"\\r\\n\"), \"Request line must end in CRLF\"\n\t\t\tparts = request_line.strip().split()\n\t\t\tassert len(parts)==3, \"Invalid request line\"\n\t\t\thost, path, content_length = parts\n\t\t\tif (content_length:=int(content_length))>0:\n\t\t\t\tdata = self.rfile.read(content_length)\n\t\t\telse:\n\t\t\t\tdata = b''\n\t\t\tself.handle_request(host,path,data)\n\t\texcept AssertionError as e:\n\t\t\tself.response_code(4,e.args[0])", "def _process_request(self, request):\n try:\n self._validate_rpc_request(request)\n except ValueError as err:\n return self._build_rpc_error(None, RpcErrors.INVALID_REQUEST, err, keep_null_id=True)\n\n id = request.get('id', None)\n\n try:\n method = getattr(rpc, request['method'])\n except AttributeError as err:\n return self._build_rpc_error(id, RpcErrors.METHOD_NOT_FOUND, err)\n\n try:\n params = request.get('params', None)\n if params is None:\n result = method()\n elif isinstance(params, list):\n result = method(*params)\n elif isinstance(params, dict):\n result = method(**params)\n\n return self._build_rpc_result(id, result)\n\n except TypeError as err:\n return self._build_rpc_error(id, RpcErrors.INVALID_PARAMS, err)\n except Exception as err:\n return self._build_rpc_error(id, RpcErrors.INTERNAL_ERROR, err)", "def _parse_in_request(self, request):\n error = None\n self.logger.debug(\"Http method: %s\" % request.method)\n if request.method == 'GET':\n self._params = request.args.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)\n \n elif request.method == 'POST':\n self._params = request.form.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)", "def parse_request(request: bytes) -> Tuple[RequestLineHeader, str]:\r\n\r\n request = request.decode('ascii')\r\n print(request)\r\n split_request = request.split('\\r\\n')\r\n method, path, http_version = split_request[0].split(' ')\r\n path = ROOT_DIR + ('index.html' if path == '/' else path[1:])\r\n args = split_request[-1] if method == 'POST' else ''\r\n\r\n return RequestLineHeader(method, path, http_version), args", "def parse_http_request(source_addr, http_raw_data):\n r1 = http_raw_data.split('\\n')[0]\n method = r1.split()[0]\n path = r1.split()[1]\n if path == \"/\":\n r2 = http_raw_data.split('\\n')[1]\n host = r2.split()[0]\n if host == \"Host:\":\n host = re.sub(\"[:]\", \"\", host)\n r3 = r2.split(':')\n url = r2.split()[1]\n headers = []\n r3 = ' '.join(r3).replace('\\r', '').split()\n headers.append(r3)\n headers.append(url)\n headers\n requested_host = headers[0:]\n requested_path = path\n portno = re.findall(r'[0-9]+', r2)\n if portno == []:\n portno = \"80\"\n requested_port = portno\n requested_host = url\n print(\"*\" * 50)\n print(\"[parse_http_request] Implement me!\")\n print(\"*\" * 50)\n # Replace this line with the correct values.\n request_info = HttpRequestInfo(source_addr, method, requested_host, requested_port, requested_path, headers)\n return request_info", "async def parse_handle_request(self, json_command):\n try:\n # Check signature\n vasp = self.vasp\n other_key = vasp.info_context.get_peer_compliance_verification_key(\n self.other_address_str\n )\n\n message = await other_key.verify_message(json_command)\n request = json.loads(message)\n\n # Parse the request whoever necessary.\n request = CommandRequestObject.from_json_data_dict(\n request, JSONFlag.NET\n )\n\n # Going ahead to process the request.\n logger.debug(\n f'(other:{self.other_address_str}) '\n f'Processing request seq #{request.cid}',\n )\n response = self.handle_request(request)\n\n except OffChainInvalidSignature as e:\n logger.warning(\n f'(other:{self.other_address_str}) '\n f'Signature verification failed. OffChainInvalidSignature: {e}'\n )\n response = make_parsing_error(f'{e}', code=OffChainErrorCode.invalid_signature)\n\n except JSONParsingError as e:\n logger.error(\n f'(other:{self.other_address_str}) JSONParsingError: {e}',\n exc_info=True,\n )\n response = make_parsing_error()\n except Exception as e:\n logger.error(\n f'(other:{self.other_address_str}) exception: {e}',\n exc_info=True,\n )\n raise e\n\n # Prepare the response.\n full_response = await self.package_response(response)\n return full_response", "def processGithubRequest(self):\n self.event = self.headers.getheader('X-Github-Event')\n LOGGER.info(\"Recieved event %s\", self.event)\n\n if self.event == 'ping':\n LOGGER.info('Ping event received')\n self.respond(204)\n return False\n if self.event != 'push':\n LOGGER.error('We only handle ping and push events')\n self.respond(304)\n return False\n\n self.respond(204)\n\n length = int(self.headers.getheader('content-length'))\n body = self.rfile.read(length)\n payload = json.loads(body)\n self.branch = payload['ref']\n self.urls = [payload['repository']['url']]\n return True", "def dispatch_or_exit(self, request):\n requestType = request.get(\"request\")\n if requestType == \"STATUS\":\n status = self.process.get_status()\n self.send_status(status)\n elif (requestType == \"START\"):\n self.start_command(request.get(\"payload\", dict()))\n elif requestType == \"STOP\" and not self.process.shutdown_in_progress():\n return self.stop_command(request.get(\"payload\", dict()).get(\"timeout\"))\n else:\n logging.warn(\"unexpected request\")\n return False", "def found_terminator (self):\r\n if self.current_request:\r\n self.current_request.found_terminator()\r\n else:\r\n header = self.in_buffer\r\n self.in_buffer = ''\r\n lines = header.split('\\r\\n')\r\n\r\n # --------------------------------------------------\r\n # crack the request header\r\n # --------------------------------------------------\r\n\r\n while lines and not lines[0]:\r\n # as per the suggestion of http-1.1 section 4.1, (and\r\n # Eric Parker <[email protected]>), ignore a leading\r\n # blank lines (buggy browsers tack it onto the end of\r\n # POST requests)\r\n lines = lines[1:]\r\n\r\n if not lines:\r\n self.close_when_done()\r\n return\r\n\r\n request = lines[0]\r\n\r\n command, uri, version = http_server.crack_request (request)\r\n header = http_server.join_headers (lines[1:])\r\n\r\n # unquote path if necessary (thanks to Skip Montanaro for pointing\r\n # out that we must unquote in piecemeal fashion).\r\n rpath, rquery = http_server.splitquery(uri)\r\n if '%' in rpath:\r\n if rquery:\r\n uri = http_server.unquote (rpath) + '?' + rquery\r\n else:\r\n uri = http_server.unquote (rpath)\r\n\r\n r = deferring_http_request (self, request, command, uri, version,\r\n header)\r\n self.request_counter.increment()\r\n self.server.total_requests.increment()\r\n\r\n if command is None:\r\n self.log_info ('Bad HTTP request: %s' % repr(request), 'error')\r\n r.error (400)\r\n return\r\n\r\n # --------------------------------------------------\r\n # handler selection and dispatch\r\n # --------------------------------------------------\r\n for h in self.server.handlers:\r\n if h.match (r):\r\n try:\r\n self.current_request = r\r\n # This isn't used anywhere.\r\n # r.handler = h # CYCLE\r\n h.handle_request (r)\r\n except:\r\n self.server.exceptions.increment()\r\n (file, fun, line), t, v, tbinfo = \\\r\n asyncore.compact_traceback()\r\n self.server.log_info(\r\n 'Server Error: %s, %s: file: %s line: %s' %\r\n (t,v,file,line),\r\n 'error')\r\n try:\r\n r.error (500)\r\n except:\r\n pass\r\n return\r\n\r\n # no handlers, so complain\r\n r.error (404)", "def __call__(self, request, parser):\r\n logger.info('requesting %r', request)\r\n\r\n uri = request.uri\r\n data = None\r\n\r\n if request.method.upper() in http.URLENCODE_METHODS:\r\n uri = encode_uri(request)\r\n else:\r\n data = encode_data(request)\r\n\r\n logger.debug('request uri: %r, data: %r, headers: %r',\r\n uri, data, request.headers)\r\n\r\n req = RequestWithMethod(uri, data, request.headers)\r\n req.set_method(request.method)\r\n\r\n opener = port.urllib_request.build_opener(*self.handlers)\r\n resp = opener.open(req)\r\n\r\n body = resp.read()\r\n headers = dict(resp.info())\r\n logger.debug('response code: %r, body: %r, headers: %r',\r\n resp.code, body, headers)\r\n\r\n return parser(body, resp.code, headers)", "def __receive_request(self):\n # get the request's length\n request_size = self.__socket.recv(Commands.SIZE_LENGTH)\n # if the master sent an empty msg, then he has closed himself\n if not request_size:\n print \"Master Has Been Closed\"\n # TODO: close the peasant and start the run function all over again\n return False\n # fix the request's length\n request_size = int(request_size) - Commands.COMMAND_LENGTH\n # get the request's command's number\n command = int(Commands.decrypt(self.__socket.recv(Commands.COMMAND_LENGTH)))\n # if the request size's is 0, then there are not args\n args = []\n # else, there are args, read them (decrypted)\n if request_size != 0:\n args = Commands.decrypt(self.__socket.recv(request_size)).split(Commands.SEPARATE_CHAR)\n if self.__DEBUG:\n print args\n # handle the command and add the command number and return value to the responses list\n self.__responses.append(str(command) + Commands.handle_command_request(command, args))\n return True", "def _process_request(self):\n if not self._requests:\n if self._stream:\n self._stream.close()\n self._stream = None\n if self._processing:\n self._processing = False\n Engine.instance().stop()\n return\n\n request = self._requests[0]\n\n request.append(\n Engine.instance().defer(request[5], self._request_timeout, request))\n\n port = request[2].port\n if not port:\n if request[2].scheme.lower() == 'https':\n port = 443\n else:\n port = 80\n\n host = \"%s:%d\" % (request[2].hostname, port)\n\n if self._stream:\n if not self._server == host.lower() or not \\\n self._is_secure == (request[2].scheme.lower() == 'https'):\n self._stream.end()\n return\n\n if not self._stream:\n # Store the current server.\n self._server = host.lower()\n\n # Create a Stream, hook into it, and connect.\n self._stream = Stream()\n\n self._stream.on_close = self._on_close\n self._stream.on_connect = self._on_connect\n\n self._is_secure = request[2].scheme.lower() == 'https'\n if self._is_secure:\n raise Exception(\"SSL has not yet been implemented in this version of Pants.\")\n self._stream.startTLS()\n\n self._stream.connect((request[2].hostname, port))\n return\n\n # If we got here, we're connected, and to the right server. Do stuff.\n self._stream.write('%s %s HTTP/1.1%s' % (request[0], request[8], CRLF))\n for k, v in request[3].iteritems():\n self._stream.write('%s: %s%s' % (k, v, CRLF))\n\n if request[4]:\n self._stream.write('%s%s' % (CRLF, request[4]))\n else:\n self._stream.write(CRLF)\n\n # Now, wait for a response.\n self._stream.on_read = self._read_headers\n self._stream.read_delimiter = DOUBLE_CRLF", "def do_POST(self): # pylint:disable=invalid-name\n if not self.is_log_path_valid():\n self.report_404()\n return\n\n try:\n # Get arguments by reading body of request.\n # We read this in chunks to avoid straining\n # socket.read(); around the 10 or 15Mb mark, some platforms\n # begin to have problems (bug #792570).\n max_chunk_size = 10 * 1024 * 1024\n size_remaining = int(self.headers[\"content-length\"])\n chunk_list = []\n while size_remaining:\n chunk_size = min(size_remaining, max_chunk_size)\n chunk = self.rfile.read(chunk_size)\n if not chunk:\n break\n chunk_list.append(chunk)\n size_remaining -= len(chunk_list[-1])\n data = ''.join(chunk_list)\n\n data = self.decode_request_content(StringIO(data))\n if data is None:\n return # response has been sent\n\n if 'data' in data.keys() and 'method' in data['data'].keys():\n response = self.server.dispatch(data['data']['method'], data)\n else:\n response = None\n\n except Exception: # This should only happen if the module is buggy\n # internal error, report as HTTP server error\n self.send_response(500)\n raise\n else:\n # got a valid LOG response\n self.send_200_resp(response, \"text/plain\")", "def route_request(self,):\n # Let's parse and prepare url, path, query etc..\n up = urlparse(self.path, 'http')\n self.log_data['url'] = path = up.path\n splitpath = path.split('/')\n if len(splitpath) == 1:\n raise HTTPError(404, 'Not found.')\n self.query = parse_qs(up.query)\n\n # Get the route\n route = self.get_route(self.http_method, path.encode('utf-8'))\n # Parse URL path\n urlvars = self.parse_path(path.encode('utf-8'), route)\n post_raw = None\n # Load POST content if any\n if self.http_method == 'POST':\n # TODO: raise an HTTP error if the content-length is\n # too large.\n try:\n post_raw = self.rfile.read(int(self.headers['Content-Length']))\n except Exception as e:\n logger.exception(str(e))\n logger.debug(self.headers)\n logger.error('Unable to read post data')\n raise HTTPError(400, 'Unable to read post data')\n\n username = None\n checked = False\n\n # Authentication checking out\n\n # 1. Try the auth' by key: if this method is available for this API\n # and 'key' arg exists.\n key = self.headers.get('X-TemBoard-Agent-Key')\n if key:\n logger.debug(\"Authentication by key from header.\")\n elif 'key' in self.query:\n # TODO: Remove auth from query in 8.0\n key = self.query['key'][0]\n logger.debug(\"Authentication by key from argument.\")\n\n if route['check_key'] and key:\n if self.app.config.temboard.key is None:\n raise HTTPError(401, \"Authentication key not configured\")\n if key != self.app.config.temboard.key:\n raise HTTPError(401, \"Invalid key\")\n checked = True\n\n # 2. Check session ID if available and not previously auth'd by key\n if not checked and route['check_session']:\n username = check_sessionid(self.headers, self.sessions)\n checked = True\n\n # 3. At this point, if not yet checked out and auth' by key is\n # available then we need to raise an error because 'key' arg hasn't\n # been passed and auth' by key is the only available method.\n if not checked and route['check_key']:\n raise HTTPError(401, \"Missing key\")\n\n try:\n # Load POST content expecting it is in JSON format.\n if self.http_method == 'POST':\n self.post_json = json.loads(post_raw.decode('utf-8'))\n except Exception as e:\n logger.exception(str(e))\n logger.error('Invalid json format')\n raise HTTPError(400, 'Invalid json format')\n\n http_context = dict(\n headers=self.headers,\n query=self.query,\n post=self.post_json,\n urlvars=urlvars,\n username=username,\n )\n\n # Handle the request\n func = getattr(sys.modules[route['module']], route['function'])\n self.log_data['handler'] = route['module'] + '.' + route['function']\n if route['module'] == 'temboardagent.api':\n # some core APIs need to deal with sessions\n return (200, func(http_context, self.app, self.sessions))\n else:\n # plugin\n return (200, func(http_context, self.app))", "def _parse(self, line):\n comd, value = cmd.parse(line, CLIENT_PREFIX)\n if comd == 'reg':\n self.peername = value\n elif comd == 'eof':\n self._reset()\n msg = cmd.clientcmd(comd, value)\n msg = cmd.addFirst(msg, self.peername)\n self.sendString(msg)\n elif comd == 'fail':\n self._reset()\n else:\n return False\n return True", "def handle_request(self):\n\t\ttry:\n\t\t\trequest, client_address = self.get_request()\n\t\texcept socket.error:\n\t\t\treturn\n\t\tif self.verify_request(request, client_address):\n\t\t\ttry:\n\t\t\t\tself.process_request(request, client_address)\n\t\t\texcept:\n\t\t\t\tself.handle_error(request, client_address)\n\t\t\t\tself.close_request(request)" ]
[ "0.6622271", "0.64777374", "0.63694507", "0.60598505", "0.59718955", "0.590743", "0.57738644", "0.5756934", "0.5742927", "0.555604", "0.55017346", "0.5494741", "0.54922587", "0.5484244", "0.5446223", "0.54096335", "0.53933066", "0.5307045", "0.5289728", "0.52803946", "0.526739", "0.5251216", "0.5237434", "0.5194454", "0.5134923", "0.5129967", "0.5105918", "0.5102994", "0.50999296", "0.5066618" ]
0.7992387
0
Handle one request at a time until stopped.
def serve_forever(self, unused_parameter=0.5): self.stop = False while not self.stop: self.handle_request()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n while self.running:\n self.handle_request()", "def run(self):\n while True:\n req = self._requests.get()[1]\n req.start()\n logging.info('Running request %s', req)", "def process_request_thread(self):\n while True:\n try:\n request, client_address = self._request_queue.get(\n timeout=self.timeout_on_get,\n )\n except Queue.Empty:\n # You wouldn't believe how much crap this can end up leaking,\n # so we clear the exception.\n sys.exc_clear()\n if self._shutdown_event.isSet():\n return\n continue\n try:\n self.finish_request(request, client_address)\n self.shutdown_request(request)\n except:\n self.handle_error(request, client_address)\n self.shutdown_request(request)\n self._request_queue.task_done()", "def handle_request(self, request):\n return self._first_handler(self, request)", "def serve(self):\n self._stop = False\n self.prepare()\n while not self._stop:\n self.handle()", "def handle_one_request(self):\n import socket\n try:\n self.raw_requestline = self.rfile.readline(65537)\n if len(self.raw_requestline) > 65536:\n self.requestline = ''\n self.request_version = ''\n self.command = ''\n self.send_error(414)\n return\n if not self.raw_requestline:\n self.close_connection = 1\n return\n if not self.parse_request():\n # An error code has been sent, just exit\n return\n\n ##### Customization\n # origin\n \"\"\"\n mname = 'do_' + self.command\n if not hasattr(self, mname):\n self.send_error(501, \"Unsupported method (%r)\" % self.command)\n return\n method = getattr(self, mname)\n method()\n \"\"\"\n # now\n #import pdb; pdb.set_trace()\n self.delegate(self.get_environ(), self.gen_response, self.send_error)\n\n self.wfile.flush() #actually send the response if not already done.\n except socket.timeout, e:\n #a read or a write timed out. Discard this connection\n self.log_error(\"Request timed out: %r\", e)\n self.close_connection = 1\n return", "def _handle_requests(self):\n for request in self._requests[:]:\n self.logger.debug(\"Handling request: %r\", request)\n\n # an orphan request, client is not alive.\n if not request.server_request and not request.worker.is_alive:\n self.logger.warning(\"Client %r disconnected, request dropped\",\n request.worker.name)\n self._requests.remove(request)\n continue\n\n try:\n request_handler = self._get_request_handler(request)\n reply = request_handler(request)\n\n except _WaitingForResourceException as ex:\n self.logger.exception(str(ex))\n continue\n\n except Exception as ex:\n if isinstance(ex, ServerError):\n code = ex.ERROR_CODE\n content = ex.get_error_content()\n\n else:\n code = ServerError.ERROR_CODE\n content = str(ex)\n\n self.logger.exception(str(ex))\n reply = ErrorReply(code=code, content=content)\n\n reply.request_id = request.message.msg_id\n self._reactor.callFromThread(request.respond, reply)\n\n self._requests.remove(request)", "def serve(self):\n\t\tself.keep_running=1\n\t\tif self.debug:\n\t\t\tprint \"server started\"\n\t\ttry:\n\t\t\twhile self.keep_running:\n\t\t\t\tself.handle_request()\n\t\tfinally:\n\t\t\tif self.debug:\n\t\t\t\tprint \"server finished\"\n\t\t\tself.keep_running=0\n\t\t\tself.close()", "def serve_requests(self):\n while True:\n self.server_socket.listen(self.request_queue_size)\n client_connection, client_address = self.server_socket.accept()\n self.request_handler(client_connection)", "def process_request(t):\n time.sleep(t)", "def process_request(t):\n time.sleep(t)", "def process_request(t):\n time.sleep(t)", "def handle_request(self):\n \n # Clear request immediately.\n request = self.request\n self.request = None\n self.save()\n \n self.log.info(\"Request received: %s\" % Request.name(request))\n \n if request == Request.PAUSE:\n self.set_status(Status.PAUSED)\n \n elif request == Request.RESUME:\n if self.status != Status.PAUSED:\n self.log.info(\"Must be paused to resume; clearing request.\")\n else:\n self.set_status(Status.RUNNING)\n \n elif request == Request.STOP:\n self.set_status(Status.ENDED)\n \n elif request == Request.KILL:\n self.set_status(Status.KILLED)\n \n elif request == Request.RELOAD:\n changed = MultiQuerySet(Schedule, CronSchedule)\n changed = changed.objects.unfinished.filter(\n changed=True, scheduler=self)\n for item in self.timer.tasks:\n s = item[2][0]\n if s in changed:\n self.log.info(\"Removing outdated: %s\" % s)\n self.timer.tasks.remove(item)\n self.set.remove(s)\n s = type(s).objects.get(pk=s.pk)\n for s in changed:\n self.log.info(\"Adding updated: %s\" % s)\n self.add(s)\n changed.update(changed=False)", "def _HandleFlowProcessingRequestLoop(self, handler):\n while not self.flow_handler_stop:\n with self.lock:\n todo = self._GetFlowRequestsReadyForProcessing()\n for request in todo:\n self.flow_handler_num_being_processed += 1\n del self.flow_processing_requests[(request.client_id,\n request.flow_id)]\n\n for request in todo:\n handler(request)\n with self.lock:\n self.flow_handler_num_being_processed -= 1\n\n time.sleep(0.2)", "def handle_one_request(self):\n \n try:\n \n self.raw_requestline = self.rfile.readline(65537)\n \n if len(self.raw_requestline) > 65536:\n \n self.requestline = ''\n \n self.request_version = ''\n \n self.command = ''\n \n self.send_error(414)\n \n return\n \n if not self.raw_requestline:\n \n self.close_connection = 1\n \n return\n \n if not self.parse_request():\n \n # An error code has been sent, just exit\n \n return\n \n mname = 'do_' + self.command\n \n if not hasattr(self, mname):\n \n self.send_error(501, \"Unsupported method (%r)\" % self.command)\n \n return\n \n method = getattr(self, mname)\n \n print \"before call do_Get\"\n \n method()\n \n #增加 debug info 及 wfile 判断是否已经 close\n \n print \"after call do_Get\"\n \n if not self.wfile.closed:\n self.wfile.flush() #actually send the response if not already done.\n \n print \"after wfile.flush()\"\n \n except socket.timeout, e:\n \n #a read or a write timed out. Discard this connection\n self.log_error(\"Request timed out: %r\", e)\n self.close_connection = 1\n return", "def _listen_to_requests(self):\n while True:\n try:\n request = self._client.recv(1024)\n except socket.error as err:\n if DEBUG_LEVEL >= 1:\n print \"Got socket error: {}\".format(err.message)\n self._client.close()\n return True\n\n if not request:\n if DEBUG_LEVEL >= 0:\n print \"Closing connection\"\n self._client.close()\n return True\n\n if DEBUG_LEVEL >= 2:\n print request\n\n if not HTTPValidation.validate_request(request):\n if DEBUG_LEVEL >= 0:\n print \"Invalid request, closing...\"\n self._client.send(public_response_functions.get_error_response())\n self._client.close()\n return True\n\n if not self._send_response(request):\n if DEBUG_LEVEL >= 0:\n print \"Closing connection...\"\n self._client.close()\n return", "def _on_response(self):\n request = self._requests.pop(0)\n try:\n request[-1].cancel()\n left = request[-1].end - Engine.instance().time\n except Exception:\n left = request[5]\n pass\n\n response = self.current_response\n\n close_after = response.headers.get('Connection', '') == 'close'\n close_after &= self.keep_alive\n\n # Is this a 100 Continue?\n if response.status == 100:\n self.current_response = None\n del response\n\n # Process the request.\n if close_after:\n if self._stream:\n self._stream.close()\n return\n\n self._process_request()\n return\n\n # Did we catch a redirect?\n if response.status in (301,302) and request[9] <= self.max_redirects:\n # Generate a new request, using the new URL.\n new_url = urlparse.urljoin(response.full_url,\n response.headers['Location'])\n\n new_headers = request[3].copy()\n del new_headers['Host']\n\n new_req = self._add_request(request[0], new_url, new_headers,\n request[4], left, False)\n new_req[6] = request[6]\n new_req[7] = request[7]\n new_req[9] = request[9] + 1\n\n new_req.append(\n Engine.instance().defer(left, self._request_timeout, new_req))\n\n self._requests.insert(0, new_req)\n self.current_response = None\n del response\n\n # Process the request.\n if close_after:\n if self._stream:\n self._stream.close()\n return\n\n self._process_request()\n return\n\n # Try converting to unicode?\n if self.unicode:\n content_type = response.headers.get('Content-Type','')\n if 'charset=' in content_type:\n content_type, _, encoding = content_type.partition('charset=')\n try:\n response.body = response.body.decode(encoding)\n except (LookupError, UnicodeDecodeError):\n pass\n\n # Determine the handler function to use.\n if callable(request[6]):\n func = request[6]\n else:\n func = self.on_response\n\n # Call the handler function.\n try:\n func(0, response)\n except Exception:\n log.exception('Error in HTTP response handler.')\n\n # Process the next request.\n self.current_response = None\n\n if close_after:\n if self._stream:\n self._stream.close()\n return\n\n self._process_request()", "def _handle_first_request(self):\n pass", "def run(self):\n while self._num_workers > 0:\n self.server.handle_request()\n self._graph = None", "def _process_request(self):\n if not self._requests:\n if self._stream:\n self._stream.close()\n self._stream = None\n if self._processing:\n self._processing = False\n Engine.instance().stop()\n return\n\n request = self._requests[0]\n\n request.append(\n Engine.instance().defer(request[5], self._request_timeout, request))\n\n port = request[2].port\n if not port:\n if request[2].scheme.lower() == 'https':\n port = 443\n else:\n port = 80\n\n host = \"%s:%d\" % (request[2].hostname, port)\n\n if self._stream:\n if not self._server == host.lower() or not \\\n self._is_secure == (request[2].scheme.lower() == 'https'):\n self._stream.end()\n return\n\n if not self._stream:\n # Store the current server.\n self._server = host.lower()\n\n # Create a Stream, hook into it, and connect.\n self._stream = Stream()\n\n self._stream.on_close = self._on_close\n self._stream.on_connect = self._on_connect\n\n self._is_secure = request[2].scheme.lower() == 'https'\n if self._is_secure:\n raise Exception(\"SSL has not yet been implemented in this version of Pants.\")\n self._stream.startTLS()\n\n self._stream.connect((request[2].hostname, port))\n return\n\n # If we got here, we're connected, and to the right server. Do stuff.\n self._stream.write('%s %s HTTP/1.1%s' % (request[0], request[8], CRLF))\n for k, v in request[3].iteritems():\n self._stream.write('%s: %s%s' % (k, v, CRLF))\n\n if request[4]:\n self._stream.write('%s%s' % (CRLF, request[4]))\n else:\n self._stream.write(CRLF)\n\n # Now, wait for a response.\n self._stream.on_read = self._read_headers\n self._stream.read_delimiter = DOUBLE_CRLF", "def queue_processor(self):\n\n while self.state != consts.SMPP_CLIENT_STATE_CLOSED:\n try:\n p = self.queue.get(timeout=1)\n self._request_handler(p)\n self.queue.task_done()\n except Empty:\n pass", "def handle_request(self):\n\t\ttry:\n\t\t\trequest, client_address = self.get_request()\n\t\texcept socket.error:\n\t\t\treturn\n\t\tif self.verify_request(request, client_address):\n\t\t\ttry:\n\t\t\t\tself.process_request(request, client_address)\n\t\t\texcept:\n\t\t\t\tself.handle_error(request, client_address)\n\t\t\t\tself.close_request(request)", "def do_GET(self):\n global st_point, cur_request\n if time.time() - st_point < 1 and cur_request > args.MAX_REQ:\n self.send_response(429)\n self.send_header(\"Content-type\",\"text/html\")\n self.end_headers()\n time.sleep(0.2)\n return\n elif time.time() - st_point > 1:\n st_point = time.time()\n cur_request = 1\n self.func_PARSE()\n if self.parsed_url[2] in [\"/ping\", \"/cats\"]:\n self.func_DO()\n else:\n self.send_response(400)\n text=\"<h1 align=center>Bad request</h1>\"\n self.func_PRINT(text)", "def process(self):\n if not self._requests:\n return\n\n self._processing = True\n Engine.instance().start()", "def serve(self):\r\n self.channel.wait()\r\n handler, seq, obj = self._recv()\r\n if handler == \"result\":\r\n self.dispatch_result(seq, obj)\r\n elif handler == \"exception\":\r\n self.dispatch_exception(seq, obj)\r\n else:\r\n self.dispatch_request(handler, seq, obj)", "def _worker(self):\n while True:\n request = self.queue.get()\n self.worker(request)\n self.queue.task_done()", "async def handle_request(self, request: aioweb.request.Request):", "def process_request_thread(self, request, client_address):\n # pylint: disable=broad-except\n try:\n self.finish_request(request, client_address)\n except Exception:\n self.handle_error(request, client_address)\n self.shutdown_request(request)", "def _http_thread_func(self):\r\n while not self._terminating:\r\n # pop queued request from the queue and process it\r\n (api_endpoint, params, reqid) = self.http_requests.get(True)\r\n translated = None\r\n try:\r\n answer = self.http_signed_call(api_endpoint, params)\r\n if answer[\"result\"] == \"success\":\r\n # the following will reformat the answer in such a way\r\n # that we can pass it directly to signal_recv()\r\n # as if it had come directly from the websocket\r\n translated = {\r\n \"op\": \"result\",\r\n \"result\": answer[\"data\"],\r\n \"id\": reqid\r\n }\r\n else:\r\n if \"error\" in answer:\r\n if answer[\"token\"] == \"unknown_error\":\r\n # enqueue it again, it will eventually succeed.\r\n self.enqueue_http_request(api_endpoint, params, reqid)\r\n else:\r\n\r\n # these are errors like \"Order amount is too low\"\r\n # or \"Order not found\" and the like, we send them\r\n # to signal_recv() as if they had come from the\r\n # streaming API beause Gox() can handle these errors.\r\n translated = {\r\n \"op\": \"remark\",\r\n \"success\": False,\r\n \"message\": answer[\"error\"],\r\n \"token\": answer[\"token\"],\r\n \"id\": reqid\r\n }\r\n\r\n else:\r\n self.debug(\"### unexpected http result:\", answer, reqid)\r\n\r\n except Exception as exc:\r\n # should this ever happen? HTTP 5xx wont trigger this,\r\n # something else must have gone wrong, a totally malformed\r\n # reply or something else.\r\n #\r\n # After some time of testing during times of heavy\r\n # volatility it appears that this happens mostly when\r\n # there is heavy load on their servers. Resubmitting\r\n # the API call will then eventally succeed.\r\n self.debug(\"### exception in _http_thread_func:\",\r\n exc, api_endpoint, params, reqid)\r\n\r\n # enqueue it again, it will eventually succeed.\r\n self.enqueue_http_request(api_endpoint, params, reqid)\r\n\r\n if translated:\r\n self.signal_recv(self, (json.dumps(translated)))\r\n\r\n self.http_requests.task_done()", "def request() -> None:\n\t_flag.set()" ]
[ "0.7609261", "0.71269566", "0.69555724", "0.6718361", "0.6676004", "0.6669508", "0.6668242", "0.6592521", "0.657124", "0.65122896", "0.65122896", "0.65122896", "0.6483223", "0.6456908", "0.6451984", "0.6424715", "0.6336171", "0.6316808", "0.6310098", "0.63058025", "0.629759", "0.62565595", "0.62523717", "0.6231393", "0.61261195", "0.61199874", "0.6111971", "0.61055845", "0.60707414", "0.60474014" ]
0.7465852
1
Generate machine id based on default adapters mac address.
def _generate_machine_id(self): mach_id = "machine_" try: gws = netifaces.gateways() # get all gateways default = gws['default'] # get the default gw adapter = default[2][1] # get the adapter identifier real_adapter = netifaces.ifaddresses(adapter) # get the adapter link_info = real_adapter[netifaces.AF_LINK] mac = link_info[0]['addr'] mac = re.sub('[:]', '', mac) except: mac = "unsup" self.logger.error("Getting mac of internet card is not supported, needs netifaces >= 0.10") self.machine_id = mach_id + mac
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_mac(topology_id):\n tid = int(topology_id)\n global mac_counter\n global used_macs\n base = '52:54:00:00:00:00'\n ba = base.split(':')\n ba[2] = '%02x' % int(tid / 256)\n ba[3] = '%02x' % int(tid % 256)\n ba[4] = '%02x' % int(len(used_macs[topology_id]) / 256)\n ba[5] = '%02x' % int(mac_counter)\n\n mac_counter += 1\n\n mac_counter = mac_counter % 256\n return ':'.join(ba)", "def generate_mac():\n rand_str = generate_name(choices=\"0123456789abcdef\", length=12)\n return \":\".join(re.findall(\"..\", rand_str))", "def generate_mac():\n rand_str = generate_name(choices=\"0123456789abcdef\", length=12)\n return \":\".join(re.findall(\"..\", rand_str))", "def random_mac():\n return '\"02:%02x:%02x:%02x:%02x:%02x\"' % (random.randint(0,255),\n random.randint(0,255),\n random.randint(0,255),\n random.randint(0,255),\n random.randint(0,255))", "def get_random_mac():\r\n mac = [0x00, 0x16, 0x3e,\r\n random.randint(0x00, 0x7f),\r\n random.randint(0x00, 0xff),\r\n random.randint(0x00, 0xff)]\r\n return ':'.join(map(lambda x: \"%02x\" % x, mac))", "def unique_id(self):\n return self._device.mac", "def get_machine_id(self):\n try:\n self.boardcon.flushInput()\n self._write(chr(self.outgoing_machine_id))\n sleep(0.5)\n machine_id = ord(self._read(1))\n logger.info(\"Mapped /dev/ttyUSB{0} to Mac ID {1}\".format(\n self.dev_id, machine_id\n ))\n return int(machine_id)\n except Exception as e:\n error_msg = \"No Machine ID received from /dev/ttyUSB{0}.\".format(\n self.dev_id\n )\n logger.error(error_msg)\n raise DeviceConnectionException(error_msg)", "def get_random_mac():\n\t\n\t# use the Dlink range\n\tmac = \"00:05:5D\"\n\t\n\tfor i in range(0,3):\n\t\tmac += \":%s\" % hex(random.randrange(0,256))[2:]\n\t\t\n\t\t\n\treturn mac", "def get_mac(self, node_id):\n nc = '%02x' % self.node_class\n nr_iface = '%02x' % self.nr_host_interface\n node_id = '%08x' % node_id\n\n return '%s:%s:%s:%s:%s:%s' % (nc, nr_iface, node_id[0:2], node_id[2:4], node_id[4:6], node_id[6:8])", "def generate_random_mac_addr(self):\n return \"02:00:00:%02x:%02x:%02x\" % (random.randint(0, 255),\n random.randint(0, 255),\n random.randint(0, 255))", "def __get_mac_address(self):\n str_hex_mac = uuid.UUID(int=uuid.getnode()).hex[-12:]\n return str_hex_mac", "def get_mac_string():\n mac_int = getnode()\n mac_str = ':'.join((\"%012x\" % mac_int)[i:i + 2] for i in range(0, 12, 2))\n return mac_str", "def default_code():\n return uuid.uuid4().hex", "def get_tid():\n\n mac_char='0123456789abcdef'\n mac_addr='02:'\n available=0\n while available == 0:\n for i in range(5):\n for y in range(2):\n mac_addr = mac_addr + random.choice(mac_char)\n mac_addr = mac_addr + ':'\n\n mac_addr = mac_addr[:-1]\n if mac_addr not in TID_POOL:\n available = 1\n TID_POOL.append(mac_addr)\n return(mac_addr)", "def generate_next_mac(topology_id):\n\n if configuration.deployment_backend == 'openstack':\n # we just don't need this for openstack at all, just return a string that will\n # never get used\n return '52:54:11:22:33:44'\n\n global used_macs\n if topology_id not in used_macs:\n used_macs[topology_id] = list()\n\n macs_for_topology = used_macs[topology_id]\n mac = _generate_mac(topology_id)\n\n while mac in macs_for_topology:\n logger.info('this mac %s has already been used!' % mac)\n mac = _generate_mac(topology_id)\n\n macs_for_topology.append(mac)\n return mac", "def mac_address(self):\n mac = [\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff)\n ]\n return ':'.join(map(lambda x: f\"{x:02X}\", mac))", "def generate_unique_name():\n return 'titanic-' + str(get_mac())", "def unique_id(self) -> str:\n return f\"{self._inst.lower()}-enable_switch-{self._data['port-mac-address']}_{self._data['default-name']}\"", "def macFor(cls, board):\n return cls.MAC_PREFIX + '{:02X}'.format(int(board))", "def unique_id(self):\n return f\"{self._mac_address}:{self._device_id}:{self._zone_id}:switch\"", "def generate_mac_addr(self):\n\t\tcall_sdk_function('PrlVmDevNet_GenerateMacAddr', self.handle)", "def generate_mercury_id(inspected_dmi, inspected_interfaces):\n mercury_id = _dmi_methods(inspected_dmi)\n if mercury_id:\n return mercury_id\n else:\n meta_type = META_TYPE_MAC\n embedded = _get_embedded(inspected_interfaces)\n if embedded:\n LOG.debug('Generating mercury ID using embedded interfaces ')\n inspected_interfaces = embedded\n else:\n LOG.debug('Generating mercury ID using all interfaces')\n\n target = ''\n for interface in inspected_interfaces:\n address = interface.get('address') # mac address\n if address:\n target += address\n\n if not target:\n raise MercuryIdException('Could not generate MercuryId')\n\n return _build_hash(target, meta_type)", "def _get_mac_address():\n if not sys.platform.startswith('linux'):\n raise RuntimeError(\n 'Cannot get the MAC address on non-Linux platforms'\n )\n ifname = get_default_iface_name_linux()\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n info = fcntl.ioctl(s.fileno(), 0x8927,\n struct.pack('256s', bytes(ifname, 'utf-8')[:15]))\n return ''.join('%02x' % b for b in info[18:24])", "def get_base_mac(self):\n raise NotImplementedError", "def create_default_identifier():\n return random.randint(0, constants.UINT64_MAX)", "def get_rand_mac(self):\n random_mac = []\n\n # add manufacturer\n random_mac.append(self.get_rand_manufact())\n\n # generate the last 24 bits of the random hex\n for i in range(0, 3):\n rand_digit1 = self.get_rand_hex_digit()\n rand_digit2 = self.get_rand_hex_digit()\n random_mac.append(rand_digit1 + rand_digit2)\n\n return ':'.join(random_mac)", "def GenDistinctId(self):\t\n \"\"\"4 bits to unique a machine \\\n\t5 bits for processes\"\"\"\n\tmachineId = format(self.mid, 4)\n processId = format(self.pid) \n \treturn machineId + processId", "def unique_id(self) -> str:\n return f\"{self._device.mac}_{self._router.config_entry.entry_id}\"", "def unique_id(self) -> str:\n return '{0}_{1}'.format(self._mac.replace(':', ''), self.entity_id)", "def generateMinionID(self, machine_name):\n minion_id = self._removeWhitespaces(machine_name)\n return minion_id" ]
[ "0.6963345", "0.6941117", "0.6941117", "0.6683865", "0.6677888", "0.6657878", "0.6549006", "0.65344495", "0.6532284", "0.6504228", "0.6499489", "0.64969695", "0.6482017", "0.64627934", "0.645557", "0.6432604", "0.6382546", "0.6377715", "0.63437086", "0.63416886", "0.6336635", "0.63161856", "0.6301804", "0.62703824", "0.62596184", "0.6243473", "0.6222595", "0.62027776", "0.61615807", "0.6160536" ]
0.86592156
0
Push a file to cnc server with optional rc4 encryption.
def push_file_to_server(cnc_bot, filename, content, encryption_key=None): c = content if encryption_key is not None: c = rc4.encrypt(c, encryption_key, salt_length=0) # encrypt content via rc4 cfg = {'filename': filename, 'content': c} cnc_bot.host_orders(cPickle.dumps(cfg)) # upload a serialized dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def swift_push_file(job_log_dir, file_path, swift_config):\n with open(file_path, 'r') as fd:\n name = os.path.join(job_log_dir, os.path.basename(file_path))\n con = swiftclient.client.Connection(\n authurl=swift_config['authurl'],\n user=swift_config['user'],\n key=swift_config['password'],\n os_options={'region_name': swift_config['region']},\n tenant_name=swift_config['tenant'],\n auth_version=2.0)\n con.put_object(swift_config['container'], name, fd)\n return swift_config['prepend_url'] + name", "def scp_push_file(job_log_dir, file_path, local_config):\n pass", "async def insert_file(self, file_name: FormalName, desired_copies: int, packets: int, size: int, fetch_prefix: FormalName):\n # send command interest\n file = File()\n file.file_name = file_name\n file.desired_copies = desired_copies\n file.packets = packets \n file.size = size\n fetch_path = FetchPath()\n fetch_path.prefix = fetch_prefix\n cmd = RepoCommand()\n cmd.file = file\n cmd.sequence_number = 0\n cmd.fetch_path = fetch_path\n cmd_bytes = cmd.encode()\n\n # publish msg to repo's insert topic\n await self.pb.wait_for_ready()\n print(Name.to_str(self.repo_prefix + ['insert']))\n is_success = await self.pb.publish(self.repo_prefix + ['insert'], cmd_bytes)\n if is_success:\n logging.info('Published an insert msg and was acknowledged by a subscriber')\n else:\n logging.info('Published an insert msg but was not acknowledged by a subscriber')\n return is_success", "def send_file(username, local_file, server='euler.ethz.ch'):\n command = 'scp {0} {1}@{2}:'.format(local_file, username, server)\n local_command(username, command)", "def main(file, key, encrypt, host, username, password):\n ssh = SSHClient()\n ssh.load_system_host_keys()\n if password != '-':\n ssh.connect(host, username=username, password=password)\n else:\n ssh.connect(host, username=username)\n \n scp = SCPClient(ssh.get_transport())\n\n if encrypt:\n print(\"Encrypting... \", end=\"\")\n to_send = encrypt_file(file, key)\n print(\"Done.\")\n print(\"Sending to {}...\".format(host), end=\"\")\n scp.put(to_send)\n print(\"Done.\")\n else:\n print(decrypt_file(file, key))", "def send_file(cobj, dest, port, fname, hash, handler):\n pass", "def publish(self, filename):\n # 1) Encrypt file\n # 2) Publish to remote cloud server\n # 3) Wait for the result\n # 4) Store results in files located inside RAM folder", "def do_push_file(dbsync, bibkey):\n dbsync.push_file_to_dpt(bibkey)", "def push(self, file: str, **kwargs):\n channel = self.author + \"/\" + self.project\n\n if \"with_tag_rule\" in kwargs:\n tag: ButlerPlatformType = self.tag_rules.get(\n kwargs[\"with_tag_rule\"], ButlerPlatformType.OTHER)\n channel += \":\" + tag.value\n\n if \"with_custom_tag\" in kwargs:\n channel += \"-\" + kwargs[\"with_custom_tag\"]\n\n command = [self.bin, \"push\", file, channel]\n\n if \"user_version\" in kwargs:\n command += [\"--userversion\", kwargs[\"user_version\"]]\n return proc.check_call(command)", "def upload_to_s3(channel, file):\n s3_resource = boto3.resource('s3')\n data = open(file, \"rb\")\n key = channel + '/' + file\n s3_resource.Bucket(BUCKET).put_object(Key=key, Body=data)", "def receive_file(username, remote_file, local_file, server='euler.ethz.ch'):\n command = 'scp {0}@{1}:{2} {3}'.format(username, server, remote_file, local_file)\n local_command(command)", "def _upload_file_to_rackspace(self, file, container):\r\n chksum = pyrax.utils.get_checksum(file)\r\n self.cf.upload_file(container,\r\n file,\r\n obj_name=secure_filename(file.filename),\r\n etag=chksum)\r\n return True", "def send_file():\n data = ARGS.data\n filename = ARGS.file\n outstream = \"POST||\" + filename + \"||\" + data\n CLIENT_SOCKET.send(outstream.encode())", "def upload_a_file(self, package, version, file_path):\n cmd_args = [self._push_executable]\n cmd_args += [\"--user\", self._username]\n cmd_args += [\"--api_key\", self._api_key]\n cmd_args += [\"--subject\", self._subject]\n cmd_args += [\"--repo\", self._repo]\n cmd_args += [\"--package\", package]\n cmd_args += [\"--version\", version]\n cmd_args += [\"--file_path\", file_path]\n\n if self._component:\n cmd_args += [\"--component\", self._component]\n if self._distribution:\n cmd_args += [\"--distribution\", self._distribution]\n if self._architecture:\n cmd_args += [\"--architecture\", self._architecture]\n\n cmd_args += [\"--package\", package]\n cmd_args += [\"--version\", version]\n cmd_args += [\"--file_path\", file_path]\n\n try:\n proc = subprocess.Popen(cmd_args,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n shell=False)\n (out, err) = proc.communicate()\n if proc.returncode != 0:\n raise RuntimeError(err)\n except subprocess.CalledProcessError as ex:\n raise RuntimeError(\"Failed to upload file {0} due to {1}\".format(file_path, ex))\n\n return True", "def encrypt_file(filename):\n\ttry:\n\t\tinf = open(filename, 'r')\n\texcept:\n\t\tprint('File does not exist.')\n\t\treturn -1, -1\n\tpayload = inf.read()\n\tpayload = payload.encode('ascii')\n\tcipher = AES.new(my_privaeskey, AES.MODE_GCM)\n\tciphertext, auth_tag = cipher.encrypt_and_digest(payload)\n\tnonce = cipher.nonce\n\treturn auth_tag, nonce + ciphertext", "def send_file(self, file_name: str) -> None:\n\n with open(file_name, 'rb') as f:\n message = {'type': 'DATA', 'data': None}\n read_size = 16 * 60\n while True:\n if self.ppos != 0:\n f.seek(self.ppos)\n self.ppos = 0\n\n if self.count == 20:\n self.state = STATE_ROTATION\n\n #Generate DH client private and public keys\n bytes_public_key,p,g,y = self.crypto.dh_client()\n message = {'type':'DH_PARAMETERS','parameters':{'p':p,'g':g,'public_key':str(bytes_public_key,'ISO-8859-1')}}\n self.count=0\n self.ppos=f.tell()\n self._send(message)\n break\n\n self.count += 1\n \n data = f.read(16 * 60)\n message['data'] = base64.b64encode(data).decode()\n #logger.debug(\"Data: {} read size {}\".format(data,f.tell()))\n secure_message = self.encrypt_message(message)\n \n self._send(secure_message)\n self.send_mac()\n \n if len(data) != read_size:\n self.end = True\n break\n \n # When it ends create MAC\n if self.end:\n self._send(self.encrypt_message({'type': 'CLOSE'}))\n self.send_mac()\n logger.info(\"File transfer finished. Closing transport\")\n self.transport.close()", "def push(self, filepath):\n logger.debug(\"Starting to push %r\", str(filepath))\n\n def _progress(monitor):\n # XXX Facundo 2020-07-01: use a real progress bar\n if monitor.bytes_read <= monitor.len:\n progress = 100 * monitor.bytes_read / monitor.len\n print(\"Uploading... {:.2f}%\\r\".format(progress), end=\"\", flush=True)\n\n with filepath.open(\"rb\") as fh:\n encoder = MultipartEncoder(\n fields={\"binary\": (filepath.name, fh, \"application/octet-stream\")}\n )\n\n # create a monitor (so that progress can be displayed) as call the real pusher\n monitor = MultipartEncoderMonitor(encoder, _progress)\n response = _storage_push(monitor, self.storage_base_url)\n\n if not response.ok:\n raise CommandError(\n \"Failure while pushing file: [{}] {!r}\".format(\n response.status_code, response.content\n )\n )\n\n result = response.json()\n if not result[\"successful\"]:\n raise CommandError(\"Server error while pushing file: {}\".format(result))\n\n upload_id = result[\"upload_id\"]\n logger.debug(\"Uploading bytes ended, id %s\", upload_id)\n return upload_id", "async def cat(self, ctx, file):\n await ctx.send(file=discord.File(file))", "def upload_file(self, filename):\n # Convert file to bytearray\n file = open(filename, \"r\")\n data = file.read()\n self.data_stream = data.encode(\"ascii\")\n\n start = 0\n if len(self.data_stream % 512)!=0:\n while len(self.data_stream % 512) != 0:\n self.data_stream += b\"0\"\n while self.data_stream:\n self.data_buffer.append(self.data_stream[start: start + 511])\n start += 512\n self.num_packets += 1\n\n # Create a WRQ\n packet = bytearray()\n packet.append(0)\n packet.append(2)\n name_barr = bytearray(filename.encode('ascii'))\n packet += name_barr\n packet.append(0)\n mode = bytearray(\"octet\".encode('ascii'))\n packet += mode\n packet.append(0)\n return packet", "def send_file(self, filename, BUFF_LENGTH):\n out_file = open(filename,\"rb\")\n file_bytes = out_file.read(1024) \n while file_bytes != b'':\n self.client.send(file_bytes)\n file_bytes = out_file.read(1024) # read next block from file\n self.client.send(b'')", "async def send_file(self, file):\n with open(file, \"rb\") as file_bytes: # Opening file as readable in bytes\n print(f\"Send: {file_bytes!r}\")\n\n total_bytes = 0\n while True:\n chunk = file_bytes.read(1024)\n total_bytes += len(chunk)\n\n if not chunk: # Error with write_eof(). Need a way to finish\n print(\"Draining...\")\n\n check = \"end\"\n self.writer.write(check.encode())\n # Maybe writing at the end an empty list could work\n\n await self.writer.drain()\n break\n\n self.writer.write(chunk)\n print(f\"Sent: {total_bytes!r} bytes\")", "def cib_push(cibfile, scope=\"configuration\", extra_args=None):\n cmd = [\"pcs\", \"cluster\", \"cib-push\", cibfile]\n if isinstance(scope, str):\n cmd += [\"scope={}\".format(scope)]\n if isinstance(extra_args, (list, tuple)):\n cmd += extra_args\n\n return __salt__[\"cmd.run_all\"](cmd, output_loglevel=\"trace\", python_shell=False)", "def serve_upload(conn, ssn_key, file_name, client_name):\n # get signal to begin upload\n request = aes.decrypt(ssn_key, conn.recv(1024))\n if request != SIG_START:\n conn.sendall(aes.encrypt(ssn_key, SIG_BAD))\n return print(\"Bob: something went wrong with file transfer\")\n response = aes.encrypt(ssn_key, SIG_GOOD)\n conn.sendall(response)\n print(\"Bob: beginning transfer for {}...\".format(file_name))\n\n # get file contents from client\n contents = list()\n completed_upload = False\n response = aes.encrypt(ssn_key, SIG_GOOD)\n while not completed_upload:\n request = aes.decrypt(ssn_key, conn.recv(1024))\n if request == SIG_END:\n completed_upload = True\n print(\"Bob: completed transfer for {}\".format(file_name))\n else:\n contents.append(request)\n conn.sendall(response)\n\n # save file to server folder\n file_path = \"{}/{}\".format(client_name, file_name)\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n with open(file_path, \"w\") as outputStream:\n outputStream.write(''.join(contents))\n print(\"Bob: file saved in {}\".format(file_path))", "def _upload_file(cluster_connection, girder_client, file, path):\n\n r = requests.get(\n '%s/file/%s/download' % (girder_client.urlBase, file['_id']),\n headers={'Girder-Token': girder_client.token}, stream=True)\n check_status(r)\n cluster_connection.put(r.raw, os.path.join(path, file['name']))", "def transfer(file_obj):", "def send_file_message(self, filename):\n data = self._readFile(filename)\n self.print_debug_message(data)\n self.socket.send(data)", "def upload(connection, server_pub_key, priv_key, max_message_size):\r\n\r\n while True:\r\n # Get file name from user\r\n file_path = input('Which file would you like to send to the server?: ')\r\n\r\n # Verify file exists\r\n if os.path.isfile(file_path) is True:\r\n break\r\n\r\n # File doesn't exist\r\n else:\r\n print('Could not find specified file, please try again', file=sys.stderr)\r\n\r\n try:\r\n # Tell server file is being sent\r\n connection.sendall(rsa.encrypt(b'UPLOAD', server_pub_key))\r\n time.sleep(1)\r\n connection.sendall(rsa.encrypt(str.encode(file_path), server_pub_key))\r\n time.sleep(1)\r\n\r\n # Tell the server the file size of the file attempting to be uploaded\r\n connection.sendall(rsa.encrypt(str(os.path.getsize(file_path)).encode(), server_pub_key))\r\n time.sleep(1)\r\n\r\n # Get requirement from server\r\n data = rsa.decrypt(connection.recv(1024), priv_key)\r\n if data == b'PERMISSION CHECK':\r\n security_level = input('What security level should the file have?: ')\r\n connection.sendall(rsa.encrypt(security_level.encode(), server_pub_key))\r\n\r\n elif data == b'TRAVERSAL':\r\n print('Failed attempting to upload file outside of scope', file=sys.stderr)\r\n return\r\n\r\n elif data == b'SIZE EXCEEDED':\r\n print('Maximum storage exceeded', file=sys.stderr)\r\n return\r\n\r\n else:\r\n print('Unexpected response from server', file=sys.stderr)\r\n return\r\n\r\n # Attempt to upload file to the server\r\n status = rsa.decrypt(connection.recv(1024), priv_key)\r\n if status == b'CONTINUE':\r\n\r\n # Send the file to the server\r\n shared.send_file(connection, server_pub_key, file_path, max_message_size)\r\n\r\n # Get the result from the server\r\n result = rsa.decrypt(connection.recv(1024), priv_key)\r\n\r\n # If success\r\n if result == b'SUCCESS':\r\n print('Successfully added file to the storage system')\r\n\r\n # If failure\r\n elif result == b'FAILURE':\r\n print('Failed to add file to the storage system')\r\n\r\n # Attempt to overwrite file on the server\r\n elif status == b'OVERWRITE':\r\n\r\n # Ask the user if they would like to overwrite the file on the server\r\n while True:\r\n overwrite = input('Would you like to overwrite the file on the server with the same name?\\n'\r\n '1 - Yes\\n'\r\n '2 - No\\n'\r\n 'Choice: ')\r\n\r\n # Overwrite file\r\n if overwrite == '1':\r\n connection.sendall(rsa.encrypt(b'YES', server_pub_key))\r\n break\r\n\r\n # Don't overwrite file\r\n elif overwrite == '2':\r\n connection.sendall((rsa.encrypt(b'NO', server_pub_key)))\r\n return\r\n\r\n # Invalid input\r\n else:\r\n print('Invalid input, please select an available option', file=sys.stderr)\r\n\r\n # Send the file to the server\r\n shared.send_file(connection, server_pub_key, file_path, max_message_size)\r\n\r\n # Get the result from the server\r\n result = rsa.decrypt(connection.recv(1024), priv_key)\r\n\r\n # If success\r\n if result == b'SUCCESS':\r\n print('Successfully added file to the storage system')\r\n\r\n # If failure\r\n elif result == b'FAILURE':\r\n print('Failed to add file to the storage system')\r\n\r\n else:\r\n print('Failed to upload file to the server with desired security level', file=sys.stderr)\r\n\r\n # Catch file not found\r\n except FileNotFoundError:\r\n print(''.join(['\\nCould not find the file ', file_path]), file=sys.stderr)\r\n connection.sendall(rsa.encrypt(b'MISSING', server_pub_key))", "async def send_file(self, l_file: str, r_dest: str) -> None:\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n\n # get/create ssh connection to miner\n conn = await self.get_connection(\"root\", \"admin\")\n # send file over scp\n await asyncssh.scp(l_file, (conn, r_dest))\n self.add_to_output(f\"File sent...\")", "def local_push_file(job_log_dir, file_path, local_config):\n dest_dir = os.path.join(local_config['path'], job_log_dir)\n dest_filename = os.path.basename(file_path)\n if not os.path.isdir(dest_dir):\n os.makedirs(dest_dir)\n\n dest_file = os.path.join(dest_dir, dest_filename)\n\n shutil.copyfile(file_path, dest_file)\n return local_config['prepend_url'] + os.path.join(job_log_dir,\n dest_filename)", "def onMessage(self, payload, isBinary):\n user_id, cmd, file_id, self.file_enc_psw = payload[:87].replace('[', '').replace(']', '').split(':')\n self.file_enc_psw = self.file_enc_psw.replace('~', '')\n data = payload[87:]\n operation, status, commentary = \"UNK\", \"C\", \"Successfull!\"\n if cmd in ('WRITE_FILE', 'READU_FILE', 'DELET_FILE', 'STATUS_SRV', 'RSYNC_FILE', 'WSYNC_FILE'):\n operation, status, commentary = self.commands_handlers[cmd](user_id, file_id, data)\n self.file_enc_psw = None\n self.sendMessage('[%s][%s]%s' % (operation, status, commentary), isBinary=True, sync=True)" ]
[ "0.61895895", "0.6184544", "0.56849754", "0.56627935", "0.56338185", "0.55360377", "0.55232894", "0.5497455", "0.5397069", "0.5368183", "0.530934", "0.530456", "0.5285475", "0.5281322", "0.52686185", "0.52633834", "0.5241551", "0.5230669", "0.5227321", "0.52060777", "0.5188671", "0.51649886", "0.5161658", "0.514465", "0.51429576", "0.51411295", "0.5131463", "0.5108903", "0.50751925", "0.5063475" ]
0.73508626
0
Given a short_lineage, return the full lineage required to find exact lineage match within ID3C.
def get_full_lineage(short_lineage): lineage_map = { 'h1n1pdm': 'Influenza.A.H1N1', 'h3n2': 'Influenza.A.H3N2', 'vic': 'Influenza.B.Vic', 'yam': 'Influenza.B.Yam' } return lineage_map[short_lineage]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_similarity(long, short):\n similarity1 = 0\n similarity2 = 0\n for i in range(len(long)-len(short)+1):\n a = 0\n part = long[i:i+len(short)]\n for j in range(len(part)):\n if part[j] == short[j]:\n a += 1\n if a == len(short):\n similarity1 = part\n return similarity1\n elif a > similarity2:\n similarity2 = a\n similarity1 = part\n return similarity1", "def EuclidI3(self, short_line: Line, long_line: Line, interesting=True) -> Line:\n if short_line not in self.lines or long_line not in self.lines:\n raise ValueError(f'Cannot cut off line segment. {short_line} or {long_line} not in {self}')\n a, b = long_line.point1, long_line.point2\n line_ad = self.EuclidI2(short_line, a, interesting=interesting)\n d = line_ad.point2\n circle_def = self.add_circle(center=a, point2=d, interesting=interesting)\n intersections = self.find_intersections_line_circle(long_line, circle_def)\n e = self.pick_point_on_side(Line(a, d), b, intersections)\n return Line(a, e)", "def get_original_url(short_url):\n global URL_PAIR_STORE\n record_idx = URL_PAIR_STORE.short_url == short_url\n if sum(record_idx) == 0:\n raise ValueError(f\"Failed to find `{short_url}` in records!\")\n else:\n return URL_PAIR_STORE.long_url[record_idx].values[0]", "def give_short():\n short = input('What DNA sequence would you like to match? ')\n short = short.upper()\n return short", "def call(lineages: List[\"Lineage\"]) -> Optional[\"Lineage\"]:\n if not lineages:\n return None\n if len(lineages) == 1:\n return lineages[0]\n lineages.sort()\n minors_of_same_len = filter(\n lambda l: len(l.minor) == len(lineages[0].minor), lineages\n )\n lineage = lineages[0]\n for lin in minors_of_same_len:\n lineage = lin.mrca(lineage)\n\n return lineage", "def test_valdshortref():\n for vslstr in vald_short_line_strings:\n vsr = ValdShortLine(vslstr).ref\n datastr, refstr = vslstr.strip().split(\", '\")\n refs = refstr.split()[1::2]\n assert isinstance(vsr, ValdShortRef)\n assert refs[0] in [vsr.wlcent, 'wl:' + vsr.wlcent]\n assert refs[1] == vsr.excit\n assert refs[2] in [vsr.loggf, 'gf:' + vsr.loggf]\n assert refs[3] == vsr.gamrad\n assert refs[4] == vsr.gamqst\n assert refs[5] == vsr.gamvw\n assert refs[6] == vsr.lande_mean\n with raises(ValdFileError, match='expected 15 words'):\n vsr = ValdShortLine(datastr + \", 'invalid reference string'\")", "def test_single_match_returns_line(self):\n eq_(self.line,line_matches_greps(self.line,[\"foo\"]))", "def match_ion_state(line, all_lines):\n matches = match_ion_state_all(line, all_lines)\n\n N_matches = len(matches)\n if N_matches == 0:\n msg = \"No matches found!\"\n line_match = None\n\n elif N_matches == 1:\n line_match = matches[0]\n msg = \"Found 1 match: %s\" % line_match.tag\n\n else:\n line_strength = [ll.l0 * ll.f for ll in matches]\n idx = np.argmax(line_strength)\n line_match = matches[idx]\n msg = \"Found %i matches. Strongest line: %s\" % (N_matches, line_match.tag)\n\n return line_match, msg", "def test_write_qual_line_short_seq_retains_bc(self):\r\n\r\n demultiplexed_qual_f = FakeOutFile()\r\n qual_seq = [25, 24, 22, 24, 24, 24, 25, 30, 23, 22, 22, 24, 25]\r\n label_line = \"sample3_1 ABCD1234\"\r\n keep_barcode = True\r\n bc_len = 4\r\n write_qual_line(demultiplexed_qual_f, qual_seq, label_line,\r\n keep_barcode, bc_len)\r\n\r\n expected_data =\\\r\n '>sample3_1 ABCD1234\\n25 24 22 24 24 24 25 30 23 22 22 24 25\\n'\r\n\r\n self.assertEqual(demultiplexed_qual_f.data, expected_data)", "def short_to_long(self, urlcode):\n query = 'SELECT * from urls where short=\"{short}\";'.format(\n short=urlcode)\n with sq.connect(self.DB) as conn:\n conn.row_factory = sq.Row\n cursor = conn.cursor()\n try:\n cursor.execute(query)\n row = cursor.fetchone()\n return row\n except:\n return False", "def find_closest_segment(LineString, street, streetvolume):\r\n streetdf = streetvolume[streetvolume['streetname'] == street]\r\n if streetdf.shape[0] == 0:\r\n streetdf = streetvolume\r\n streetdf['distanceTo'] = streetdf['geometry'].apply(lambda x: LineString.distance(x))\r\n streetdf.sort_values(by = 'distanceTo', ascending = True, inplace = True)\r\n return streetdf['lineid'].iloc[0]", "def test_write_qual_line_long_seq_retain_bc(self):\r\n\r\n demultiplexed_qual_f = FakeOutFile()\r\n qual_seq = [25, 24, 22, 24, 24, 24, 25, 30, 23, 22, 22, 24, 25,\r\n 14, 25, 27, 29, 30, 14, 10, 1, 23, 24, 27, 28, 30, 22, 24, 21,\r\n 24, 22, 21, 15, 17, 17, 15, 22, 13, 11, 10, 22, 24, 27, 28, 30,\r\n 14, 25, 27, 29, 30, 14, 10, 1, 23, 24, 27, 28, 30, 22, 24, 21,\r\n 14, 25, 27, 29, 30, 14, 10, 1, 23, 24, 27, 28, 30, 22, 24, 21]\r\n\r\n label_line = \"sample3_1 ABCD1234\"\r\n keep_barcode = True\r\n bc_len = 4\r\n write_qual_line(demultiplexed_qual_f, qual_seq, label_line,\r\n keep_barcode, bc_len)\r\n\r\n expected_data = '>sample3_1 ABCD1234\\n25 24 22 24 24 24 25 30 23 22 22 24 25 14 25 27 29 30 14 10 1 23 24 27 28 30 22 24 21 24 22 21 15 17 17 15 22 13 11 10 22 24 27 28 30 14 25 27 29 30 14 10 1 23 24 27 28 30 22 24\\n21 14 25 27 29 30 14 10 1 23 24 27 28 30 22 24 21\\n'\r\n self.assertEqual(demultiplexed_qual_f.data, expected_data)", "def get_rel_pos(gRNA, min_anchor_length):\n if gRNA['cassette_label'] == 'Orphan':\n return -gRNA['gene_rel_start']\n\n if gRNA['strand'] == 'coding':\n rel_pos = gRNA['circle_start']-gRNA['forward_end']-gRNA['gene_rel_start']\n else:\n rel_pos = gRNA['reverse_start']-gRNA['gene_rel_start']-gRNA['circle_end']\n if rel_pos is pd.NA:\n rel_pos = 0\n\n if rel_pos < 0:\n # find position of first non-WC in pairing\n # If the resulting shortening of the alignment causes the anchor to be\n # less than the min anchor length, make rel_pos just past the non-WC bp\n match = mm_regex.search(gRNA['pairing'][::-1])\n mm_dist = match.start(0)\n if mm_dist + rel_pos < min_anchor_length:\n rel_pos = -(mm_dist+1)\n return rel_pos", "def hamming_distance_to_true_naive(self, true_line, line, query_name, restrict_to_region='', normalize=False, padfo=None, debug=False):\n\n true_naive_seq = utils.get_full_naive_seq(self.germlines, true_line)\n inferred_naive_seq = utils.get_full_naive_seq(self.germlines, line)\n\n left_hack_add_on = ''\n right_hack_add_on = ''\n if len(true_line['seq']) > len(line['seq']): # ihhhmmm doesn't report the bits of the sequence it erodes off the ends, so we have to add them back on\n # if len(true_naive_seq) > len(inferred_naive_seq): # hm, now why did I use line['seq'] stuff before?\n start = true_line['seq'].find(line['seq'])\n assert start >= 0\n end = len(line['seq']) + start\n left_hack_add_on = true_line['seq'][: start]\n right_hack_add_on = true_line['seq'][ end :]\n # extra_penalty = len(left_hack_add_on) + len(right_hack_add_on)\n inferred_naive_seq = 'N'*len(left_hack_add_on) + inferred_naive_seq + 'N'*len(right_hack_add_on)\n if debug:\n print ' adding to inferred naive seq'\n\n # if restrict_to_region == '':\n # print ' before', inferred_naive_seq\n if padfo is not None: # remove N padding from the inferred sequence\n inferred_naive_seq = inferred_naive_seq[padfo['padleft'] : ]\n if padfo['padright'] > 0:\n inferred_naive_seq = inferred_naive_seq[ : -padfo['padright']]\n # if restrict_to_region == '':\n # print ' after ', inferred_naive_seq\n\n bounds = None\n if restrict_to_region != '':\n bounds = utils.get_regional_naive_seq_bounds(restrict_to_region, self.germlines, true_line) # get the bounds of this *true* region\n true_naive_seq = true_naive_seq[bounds[0] : bounds[1]]\n inferred_naive_seq = inferred_naive_seq[bounds[0] : bounds[1]]\n\n if debug:\n print restrict_to_region, 'region, bounds', bounds\n print ' true ', true_naive_seq\n print ' infer', inferred_naive_seq\n\n if len(true_naive_seq) != len(inferred_naive_seq):\n raise Exception('still not the same lengths for %s\\n %s\\n %s' % (query_name, true_naive_seq, inferred_naive_seq))\n fraction, len_excluding_ambig = utils.hamming_fraction(true_naive_seq, inferred_naive_seq, return_len_excluding_ambig=True)\n total_distance = int(fraction * len_excluding_ambig)\n if len(true_naive_seq) == 0:\n print 'WARNING zero length sequence in hamming_distance_to_true_naive'\n return 0\n if normalize:\n return int(100 * (float(total_distance) / len(true_naive_seq)))\n else:\n return total_distance", "def test_valdshortline():\n for vslstr in vald_short_line_strings:\n vsl = ValdShortLine(vslstr)\n assert isinstance(vsl, ValdShortLine)\n assert vsl.__str__() == vslstr\n assert vsl.__repr__() == type(vsl).__name__ + f'({vslstr!r})'", "def match(line,keyword):\n line=line.lstrip()\n length=len(keyword)\n if line[:length] == keyword:\n return line[length:]\n else:\n return None", "def filter_line(line:str) -> bool:\n fails = is_short_sentence(line, MIN_LINE_LENGTH)\n\n return not fails", "def matchlines(nlines, wl, z, eml):\n lbdas = np.array(list(eml.keys()))\n a = (wl[:, np.newaxis] / (1 + z) - lbdas[np.newaxis, :]) ** 2.0\n jfound = np.argmin(a, axis=1)\n error = np.diag(a[:, jfound]).sum()\n error = np.sqrt(error / nlines)\n if((nlines >= 2)and(jfound[0] == jfound[1])):\n error = 15.\n return(error, jfound)", "def test_match_can_find_longer_sequences_starting_at_beginning_of_string(self):\n first_three_letters = \"abc\"\n s = \"abcdef\"\n self.assertEqual(__, re.match(first_three_letters, s).group())", "def test_write_qual_line_short_seq(self):\r\n\r\n demultiplexed_qual_f = FakeOutFile()\r\n qual_seq = [25, 24, 22, 24, 24, 24, 25, 30, 23, 22, 22, 24, 25]\r\n label_line = \"sample3_1 ABCD1234\"\r\n keep_barcode = False\r\n bc_len = 4\r\n write_qual_line(demultiplexed_qual_f, qual_seq, label_line,\r\n keep_barcode, bc_len)\r\n\r\n expected_data = '>sample3_1 ABCD1234\\n24 24 25 30 23 22 22 24 25\\n'\r\n\r\n self.assertEqual(demultiplexed_qual_f.data, expected_data)", "def getMatchingLine(self, line):\n tokens = map(str.lower, line.text.split()) # easy way to ignore case and whitespace\n for l in self.lines:\n if map(str.lower, l.text.split()) == tokens:\n return l # found a match\n \n return None # no matches found", "def get_match_line(smali_line):\n field_match = re.search(r'^([ ]*?)\\.field(.*?) (?P<fieldName>([^ ]*?)):(?P<fieldType>([^ ]*?))(.*?)$', smali_line) # Match a field definition\n if field_match is None:\n print smali_line, # Otherwise print back the line unchanged\n return None # Return None\n field_name = field_match.group('fieldName') # Recover the field name\n if re.search(r'\\$', field_name) is None: # If it does not contain '$'' (no sub-field)\n smali_line = smali_line.replace(field_name + ':', crypt_identifier(field_name) + ':') # Append\n print smali_line,\n add_random_fields(smali_line)\n return field_name # Return the field name\n else:\n print smali_line, # Otherwise print back the line unchanged\n return None # Return None", "def subStringMatchExact(target,key,length):\r\n index = find(target,key)\r\n if index < 0:\r\n return ()\r\n else:\r\n matches = subStringMatchExact(target[index+len(key):len(target)],key,length)\r\n index += (length - len(target))\r\n matches += (index,)\r\n print matches\r\n return matches", "def _get_relevant_line(self):\n # () -> (Phi.Line)\n line_name = self._get_line_name()\n print(\"looking for \"+str(line_name))\n return Phi.findLine(line_name)", "def pick_by_altloc(line, altloc):\n if line[16] == altloc or line[16] == ' ':\n return line", "def fp_of_short_flights(annual_short_flights):\n annul_lb_of_short_flights=annual_short_flights*1100\n annul_kg_of_short_flights=pound_to_kg( annul_lb_of_short_flights)\n return annul_kg_of_short_flights", "def find_tradegood(filepath):\n\twith open(filepath) as f:\n\t\tfor line in f:\n\t\t\tif \"trade_good\" in line:\n\t\t\t\treturn line.replace(\"trade_goods = \", \"\").strip()\n\t\treturn None", "def street_line_1(self):\n return self._street_line_1", "def line_substring(\n self, start: ir.FloatingValue, end: ir.FloatingValue\n ) -> ir.LineStringValue:\n return ops.GeoLineSubstring(self, start, end).to_expr()", "def find_match(line,dic):\n seqid = line[0:seqid_len]\n sequence = line[(seqid_len + f_primer_len):(len(line) - r_primer_len)]\n if seqid in dic:\n increment(dic[seqid],sequence,1)\n else:\n dic[seqid] = {sequence:1}" ]
[ "0.53577715", "0.5188768", "0.49944058", "0.4973644", "0.49226177", "0.48873967", "0.48870274", "0.4741497", "0.4731417", "0.47129855", "0.46796983", "0.46796364", "0.4671364", "0.46615598", "0.46497273", "0.46433398", "0.45880193", "0.45723385", "0.45651704", "0.45559737", "0.45543426", "0.45521754", "0.452607", "0.45153025", "0.4510907", "0.4496705", "0.44762802", "0.44712168", "0.44694823", "0.4466464" ]
0.77502173
0
Generate the full URL for the API endpoint to get sequences of a specific lineage and segment
def generate_full_url(base_url, lineage, segment): params = "/".join([lineage, segment]) return urljoin(base_url, params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_api_request_uri(self, http_method=\"GET\"):\n return self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)", "def _build_api_request_uri(self, http_method=\"GET\"):\n return self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)", "def generateUrl(line, stn):\n return predictionDetailed.format(\n line=urllib.parse.quote(line, safe=''),\n station=urllib.parse.quote(stn, safe=''))", "def url(vmanage_host,vmanage_port,api):\r\n \"\"\" function to get the url provide api endpoint \"\"\"\r\n \r\n return f\"https://{vmanage_host}:{vmanage_port}{api}\"", "def _generate_url(self, endpoint:str, params:Dict[str, str]=None) -> str:\n if params:\n return f\"{self.BASE_URL}/{self._api_version}{endpoint}?{urlencode(params)}\"\n return f\"{self.BASE_URL}/{self._api_version}{endpoint}\"", "def __build_url(path, api_site_parameter, **params):\n \n query = [\"%s=%s\" % (key, params[key]) for key in params if (params[key] or key == 'pagesize') ]\n query_string = \"&\".join(query)\n url = \"%s/%s/%s?\" % (__api_endpoint, __api_version, path)\n url += query_string\n return url", "def home():\n return (\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n \n )", "def _construct_url(self, endpoint):\n return self.base_url + self.api_path + endpoint.strip('/')", "def make_url(site,node,instrument,method,stream,API_USERNAME,API_TOKEN):\n\n SENSOR_BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/'\n VOCAB_BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12586/vocab/inv'\n meta_request_url ='/'.join((VOCAB_BASE_URL,site,node,instrument)) # Python wizard best\n data_request_url ='/'.join((SENSOR_BASE_URL,site,node,instrument,method,stream))\n\n # Retrieve vocabulary information for a given instrument\n r = requests.get(meta_request_url, auth=(API_USERNAME, API_TOKEN))\n meta_data = r.json()\n\n return (data_request_url,meta_data)", "def base_url(self):\n return 'http://%s/api.php?token=%s&path_info=' % \\\n (self.ac_url, self.api_key)", "def api_url(url_base):\n return f\"{url_base}/api/v2\"", "def request_uri(self, identifier):\n path = self.PATH_TEMPLATE % (identifier, identifier)\n return self.api_baseurl + path", "def _build_url(self, story):\n return u'/api/items/{}/schedule/'.format(story)", "def create_url(path, controller_ip=DNAC):\n print(\"3\")\n return \"https://%s:%s/api/v1/%s\" % (controller_ip, DNAC_PORT, path)", "def urlGenerator(self):\n \n # VERMONT #\n baseurl = 'https://www.vermontjudiciary.org'\n path = '/opinions-decisions'\n # from date\n param1 = 'facet_from_date=01/01'\n # to date\n param2 = 'facet_to_date=01/01/'\n # division\n param3 = 'f%5B0%5D=court_division_opinions_library%3A'\n # search by text\n param4 = 'search_api_fulltext='\n # page\n param5 = 'page='\n # generate list of URL\n listURL = []\n \n # list of divisions\n vt_court_division = {\"civil\": \"1\", \"supreme court\": \"7\", \"environmental\": \"3\", \"family\": \"4\", \"criminal\": \"2\"}\n # inputs\n from_year = 2000\n to_year = 2017\n endPages = 75 #0-74\n startPages = 0\n # make change to pull data from different division by changing division name below to any of the division in vt_court_vivision dict\n division = vt_court_division[\"environmental\"]\n # url generating\n for i in range(startPages, endPages):\n build_url = baseurl + path + '?' + param1 + str(from_year) + \"&\" + param2 + str(to_year) + \"&\" + param3 + division + param4 + \"&\" + param5 + str(i) + \"\"\n # append url to listUrl\n listURL.append(build_url)\n i += 1\n \n # return full list of URLs\n return listURL", "def index():\n sn = request.args.get('sn')\n if not sn:\n abort(400, \"SN is required, e.g. https://myfibo.herokuapp.com/?sn=<number>\")\n try:\n sn = int(sn)\n if sn < 0 or sn > MAX_SN:\n abort(400, \"SN must be 0 - {}\".format(MAX_SN))\n except ValueError:\n abort(400, \"Invalid SN. It must be an integer.\")\n\n return Response(_generate_seq2(sn))", "def Url(self) -> str:", "def __str__(self):\n self._buildSignatureString()\n \n url=self.API_END_POINT\n for key, value in self.kargs.iteritems():\n url = url + (\"&%s=%s\" % (key, value))\n \n return url", "def url(self):\r\n course_key = \"slashes:{course_org}+{course_num}+{course_run}\".format(**self.course_info)\r\n return \"/\".join([BASE_URL, self.url_path, course_key])", "def get_url(self):\n # Replace erddapy get_download_url()\n # We need to replace it to better handle http responses with by-passing the _check_url_response\n # https://github.com/ioos/erddapy/blob/fa1f2c15304938cd0aa132946c22b0427fd61c81/erddapy/erddapy.py#L247\n\n # First part of the URL:\n protocol = self.erddap.protocol\n dataset_id = self.erddap.dataset_id\n response = self.erddap.response\n url = f\"{self.erddap.server}/{protocol}/{dataset_id}.{response}?\"\n\n # Add variables to retrieve:\n self.erddap.variables = (\n self._minimal_vlist\n ) # Define the list of variables to retrieve\n variables = self.erddap.variables\n variables = \",\".join(variables)\n url += f\"{variables}\"\n\n # Add constraints:\n self.define_constraints() # Define constraint to select this box of data (affect self.erddap.constraints)\n constraints = self.erddap.constraints\n _constraints = copy.copy(constraints)\n for k, v in _constraints.items():\n if k.startswith(\"time\"):\n _constraints.update({k: parse_dates(v)})\n _constraints = quote_string_constraints(_constraints)\n _constraints = \"\".join([f\"&{k}{v}\" for k, v in _constraints.items()])\n url += f\"{_constraints}\"\n\n # Last part:\n url += '&distinct()&orderBy(\"time,pres\")'\n return url", "def url(self, path):\n return '%s://%s/v2/%s' % (self.scheme, self.host, path)", "def api_path(self, path=\"\"):\n return \"https://{domain}/{path}\".format(\n domain=self.setting(\"DOMAIN\"), path=path\n )", "def GenerateUrl():\n params = {}\n params['client_id'] = Constants.USER['CLIENT_ID']\n params['redirect_uri'] = Constants.AUTH['REDIRECT']\n params['scope'] = Constants.AUTH['SCOPE']\n params['response_type'] = 'code'\n return '%s?%s' % (Constants.OAUTH, FormatUrl(params))", "def construct_url(self):\n path = [self.path]\n path.extend([str(x) for x in self.params])\n\n url = self.client.base_url + '/'.join(x for x in path if x)\n query = self.kwargs.get('query')\n\n if query:\n # Dict -> List\n if type(query) is dict:\n query = query.items()\n\n # Remove items with `None` value\n query = [\n (k, v) for (k, v) in query\n if v is not None\n ]\n\n # Encode query, append to URL\n url += '?' + urlencode(query)\n\n return url", "def _generate_url(action, query_params=None):\r\n if query_params:\r\n query_params = urllib.parse.urlencode(query_params)\r\n action = f\"{action}?{query_params}\"\r\n \r\n\r\n url = urllib.parse.urljoin(api_url, action)\r\n\r\n return url", "def _assemble_id_url(self, award_id):\n award_id_api = 'http://api.nsf.gov/services/v1/awards/{}.xml?'\\\n .format(award_id)\n search_params = self._build_param_request()\n include = self._build_field_request()\n request_url = award_id_api + include + search_params\n return request_url", "def smUrl(path):\n return SM_API_ORIGIN + path", "def build_url(vehicle, coordinates, format=\"json\", geometryformat=\"isoz\"):\n load = vehicle.load if vehicle.load > -1.0 else 0\n params = {\n \"format\": format,\n \"height\": vehicle.height,\n \"length\": vehicle.length,\n \"stops\": coordinates,\n \"load\": load,\n \"geometryformat\": geometryformat,\n \"lang\": \"nb-no\",\n }\n\n return '?'.join([ROUTE_URL_BASE, urlencode(params)])", "def api_endpoint(self, url):\n if urlparse(url).scheme in [\"http\", \"https\"]:\n return url # url is already complete\n return urljoin(f\"{RESOURCE}/{API_VERSION}/\", url.lstrip(\"/\"))", "def build_request_url(symbol, start_date, end_date):\n pass" ]
[ "0.6077605", "0.6077605", "0.60385835", "0.5938474", "0.5882254", "0.58656865", "0.584668", "0.58432865", "0.58013654", "0.5792573", "0.5782247", "0.5776951", "0.5737987", "0.5737983", "0.56899506", "0.56754565", "0.56720304", "0.5663998", "0.56569654", "0.56523454", "0.5623124", "0.5619362", "0.56079334", "0.56042147", "0.559868", "0.55848634", "0.55693465", "0.55638784", "0.5558011", "0.55578023" ]
0.70681363
0
GET sequences from ID3C server with provided lineage and segment
def get_sequences_from_id3c(url, username, password, lineage, segment, output): r = requests.get(url, auth=(username,password), stream=True) r.raise_for_status() with open(output, 'w+') as fasta_file: for line in r.iter_lines(): if line: sequence = json.loads(line) strain = sequence['sample'][-8:] # this needs revision in ID3C to match format A/Washington/a2fb5c0f/2019 fasta_file.write("".join([">", strain, "\n", sequence['seq'].lower(), "\n"]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_genomic_data(lineage, segment, session):\n LOG.debug(f\"Exporting genomic data for lineage <{lineage}> and segment <{segment}>\")\n\n sequences = datastore.fetch_genomic_sequences(session, lineage, segment)\n\n return Response((row[0] + '\\n' for row in sequences), mimetype=\"application/x-ndjson\")", "def get_sequence(request, genome, chrom, start, end):\n logger.debug(\"annotation_server.get_sequence called for genome: %s chrom: %s\" % (genome, chrom)) \n offset = int(end) - int(start)\n \n # NO SUBSTRING METHOD USING DJANGO ORM\n if genome in SUPPORTED_GENOMES:\n cursor = connection.cursor() \n db_table = 'annotation_server_dm3_sequence'\n query = \"\"\"select name as chrom, substr(seq, %s, %s) as seq from annotation_server_%s_sequence where name = '%s'\"\"\" % (start, offset, genome, chrom)\n cursor.execute(query)\n return HttpResponse(cursor_to_json(cursor), 'application/javascript') \n else:\n return HttpResponse(status=400)\n\n # POSTBIO QUERY\n #cursor = connection.cursor() \n #query = \"\"\"select name as chrom, substr(seq, %s, %s) as seq from %s.sequence where name = '%s'\"\"\" % (start, offset, genome, chrom)\n #cursor.execute(query)\n #return HttpResponse(cursor_to_json(cursor), 'application/javascript')", "def do_get_sequence():\n sequence_start = 1000\n sequence_range = 9000\n try:\n n = int(request.args.get(\"n\", 1))\n except (TypeError, ValueError):\n return abort(400)\n\n rv = {\n \"sequence_list\": [\n (i - 1) % sequence_range + sequence_start\n for i in sequence_values(sequence, n)\n ]\n }\n\n if n == 1:\n rv[\"sequence_no\"] = rv[\"sequence_list\"][0]\n\n return jsonify(rv)", "def getSequences(self, show):\n log('Getting list of sequences remotely')\n job = flix.remote.remoteHttpCall.FlixJob('user', 'getSequences')\n proxyHttpCall = flix.remote.remoteHttpCall.ProxyHttpCall()\n procs = flix.remote.ProcConfig()\n maxAttempts = 3\n request = job.newRequest(procs.FILE, 'FlixCore.getSequences', show)\n request.timeout = 60\n try:\n result = proxyHttpCall.makeRequest(request, job, False, maxAttempts)\n except utils.FlixException, e:\n raise utils.FlixExceptionReport(e)\n return result", "def get_accession(query):\n api_bus = \"http://www.ebi.ac.uk/ebisearch/ws/rest/nucleotideSequences?query=%s&fieldurl=true&viewurl=true&format=json\" % query\n sra_list = list()\n try:\n res_data = urlopen(api_bus)\n res = json.loads(res_data.read())\n if res[\"hitCount\"] == 0:\n return 0\n for entry in res[\"entries\"]:\n sra_list.append(entry[\"id\"])\n return sra_list\n except Exception as e:\n print(e)\n return 0", "def getSegments(self) -> List[int]:\n ...", "def do_get_batch_sequence():\n sequence_start = 30000\n sequence_range = 10000\n try:\n n = int(request.args.get(\"n\", 1))\n except (TypeError, ValueError):\n return abort(400)\n\n rv = {\n \"sequence_list\": [\n (i - 1) % sequence_range + sequence_start\n for i in sequence_values(batch_sequence, n)\n ]\n }\n\n if n == 1:\n rv[\"sequence_no\"] = rv[\"sequence_list\"][0]\n\n return jsonify(rv)", "def test_getting_segments(self):\n pass", "def _init_sequences(self, resp: Response) -> List[Dict[str, str]]:\n\n time_token = build_time_token(arrow.utcnow(), UPDATE_INTERVAL)\n\n sequences_data = []\n for sequence in resp.json()['value']:\n self._logger.info(self._thread_name + \" sequence:\\n\" + str(json.dumps(sequence)))\n prefix = self._url_sequences + \"/\" + sequence[\"sequenceId\"] + \"/data\"\n\n sequence_name = sequence[\"name\"]\n data = {\"valueType\": sequence_name, \"time\": time_token}\n if sequence_name == \"LAeq\" or sequence_name == \"LCeq\":\n data[\"url_prefix\"] = prefix + \"/single\"\n elif sequence_name == \"Annoyance\":\n data[\"url_prefix\"] = prefix + \"/single\"\n\n # min2_ago_in_seconds = arrow.utcnow() - timedelta(seconds=120)\n # data[\"time\"] = build_time_token(min2_ago_in_seconds, 60)\n\n min10_ago_in_seconds = arrow.utcnow() - timedelta(seconds=600)\n data[\"time\"] = build_time_token(min10_ago_in_seconds, 60)\n elif sequence_name == \"Avg5minLAeq\":\n data[\"url_prefix\"] = prefix + \"/single\"\n\n min10_ago_in_seconds = arrow.utcnow() - timedelta(seconds=600)\n data[\"time\"] = build_time_token(min10_ago_in_seconds, MIN5_IN_SECONDS)\n elif sequence_name == \"CPBLZeq\":\n data[\"url_prefix\"] = prefix + \"/array\"\n else:\n self._logger.info(sequence_name + \" not yet integrated!\")\n continue\n\n sequences_data.append(data)\n\n return sequences_data", "def test_sequence(self):\n request = {\n 'jsonrpc': '2.0',\n 'id': 7,\n 'method': 'sequence',\n 'params': [\n {\n 'jsonrpc': '2.0',\n 'id': 8,\n 'method': 'curl',\n 'params': ['http://169.254.169.254/latest/meta-data/public-hostname']\n },\n {\n 'jsonrpc': '2.0',\n 'id': 9,\n 'method': 'ping'\n }\n ]\n }\n response = self.send_request('&log=1',request)\n result = json.loads(response.content)\n self.assertEqual(result['result'][0]['result'],memcache.Client().get('CURL_TEST_SERVER_DNS'))\n self.assertEqual(result['result'][1]['result'],True)", "def get_reference_seq_ucsc(chrom, start, end):\n if chrom.startswith('chr'):\n chrom = chrom.replace('chr', '')\n request = 'http://genome.ucsc.edu/cgi-bin/das/hg19/dna?segment=chr{}:{},{}'.format(chrom, start, end)\n try:\n dna = xmltodict.parse(urlopen(request).read())['DASDNA']['SEQUENCE']['DNA']['#text'].replace('\\n', '')\n except (URLError, ExpatError) as e:\n print('Could not open UCSC url. Please check your internet connection.\\n{}\\n{}'.format(request, e.message))\n dna = \"n\" * (start - end)\n return dna", "def getSequence( self,\n contig, \n strand = \"+\", \n start = 0, \n end = 0,\n converter = None,\n as_array = False):\n\n if not self.mIsLoaded: self.__loadIndex()\n\n if contig in self.mSynonyms:\n contig = self.mSynonyms[contig]\n\n if contig not in self.mIndex:\n raise KeyError, \"%s not in index\" % contig\n\n data = self.mIndex[contig]\n # dummy is\n # -> pos_seq for seekable streams\n # -> block_size for unseekable streams\n pos_id, dummy, lsequence = data[:3]\n pos_seq = dummy\n block_size = dummy\n \n if end == 0: end = lsequence\n \n if end > lsequence:\n raise ValueError(\"3' coordinate on %s out of bounds: %i > %i\" % (contig, end, lsequence))\n if start < 0:\n raise ValueError(\"5' coordinate on %s out of bounds: %i < 0\" % (contig, start))\n\n if converter:\n first_pos, last_pos = converter( start, end,\n str(strand) in (\"+\", \"1\"),\n lsequence )\n else:\n first_pos, last_pos = start, end\n if str(strand) in (\"-\", \"0\", \"-1\"):\n first_pos, last_pos = lsequence - last_pos, lsequence - first_pos\n \n assert( first_pos < last_pos )\n \n p = SArray( \"c\" )\n \n if self.mNoSeek:\n ## read directly from position\n p.fromstring( self.mDatabaseFile.read( block_size, data[3], first_pos, last_pos) )\n else:\n first_pos += pos_seq\n last_pos += pos_seq\n\n self.mDatabaseFile.seek( first_pos )\n p.fromstring( self.mDatabaseFile.read( last_pos - first_pos ) )\n\n if str(strand) in (\"-\", \"0\", \"-1\"):\n p.reverse() \n p = SArray(\"c\",\n string.translate( p[:],\n string.maketrans(\"ACGTacgt\", \"TGCAtgca\") ) )\n\n if as_array:\n return p\n else:\n # cast to string\n return p[:]", "def get_region_seq(record,start,stop):\n segment = record[start:stop]\n segment_seq = segment.seq\n return segment_seq # pure sequence string", "def get_segments(cst):\n assert isinstance(cst, ChromStruct)\n\n # create a set of coordinates for the start and end of segments\n segs = np.load(cst.sg_files)['sg']\n end = np.cumsum(segs)\n start = np.concatenate(([0], end[:-1]))\n\n return np.column_stack((start, end)).astype(int)", "def do_get_json_sequence():\n sequence_range = 1000000000\n try:\n n = int(request.args.get(\"n\", 1))\n except (TypeError, ValueError):\n return abort(400)\n\n rv = {\n \"sequence_list\": [\n i % sequence_range\n for i in sequence_values(json_sequence, n)\n ]\n }\n\n if n == 1:\n rv[\"sequence_no\"] = rv[\"sequence_list\"][0]\n\n return jsonify(rv)", "def getseg(*args):\n return _ida_segment.getseg(*args)", "def getSegments(source=None, episode=None):\n return None", "def __call__(self, seq_path, result_path=None, log_path=None):\r\n # return list of the chimeric sequences\r\n return self.getResult(seq_path)", "def get_cds(geneid, seqdict):\n nuc_seq = seqdict[geneid]\n # Translate it\n aa_seq = nuc_seq.seq.translate()\n # Decorate it like you would a full SeqRecord object\n aa_seq_rec = SeqRecord.SeqRecord(\n aa_seq,\n id=geneid,\n description='')\n return aa_seq_rec", "def get(self, *args):\n return _libsbml.ListOfLineSegments_get(self, *args)", "async def get_file_text_segments_and_parallels(\n file_name: str,\n active_segment: str = \"none\",\n score: int = 0,\n par_length: int = 0,\n co_occ: int = 0,\n limit_collection: List[str] = Query([]),\n multi_lingual: List[str] = Query([]),\n):\n #parallel_ids_type = \"parallel_ids_limited\"\n parallel_ids_type = \"parallel_ids\"\n # when the limit_collection filter is active,\n # we have to fetch all possible parallels.\n if len(limit_collection) > 0:\n parallel_ids_type = \"parallel_ids\"\n start_int = 0\n if active_segment != \"none\":\n active_segment = unquote(active_segment)\n try:\n text_segment_count_query_result = get_db().AQLQuery(\n query=main_queries.QUERY_SEGMENT_COUNT,\n bindVars={\"segmentnr\": active_segment},\n )\n start_int = text_segment_count_query_result.result[0] - 400\n except DocumentNotFoundError as error:\n print(error)\n raise HTTPException(status_code=404, detail=\"Item not found\") from error\n except AQLQueryError as error:\n print(\"AQLQueryError: \", error)\n raise HTTPException(status_code=400, detail=error.errors) from error\n except KeyError as error:\n print(\"KeyError: \", error)\n raise HTTPException(status_code=400) from error\n if start_int < 0:\n start_int = 0\n limitcollection_positive, limitcollection_negative = get_collection_files_regex(\n limit_collection, get_language_from_filename(file_name)\n )\n current_bind_vars ={\n \"parallel_ids_type\": parallel_ids_type,\n \"filename\": file_name,\n \"limit\": 800,\n \"startint\": start_int,\n \"score\": score,\n \"parlength\": par_length,\n \"coocc\": co_occ,\n \"multi_lingual\": multi_lingual,\n \"limitcollection_positive\": limitcollection_positive,\n \"limitcollection_negative\": limitcollection_negative,\n }\n try:\n text_segments_query_result = get_db().AQLQuery(\n query=main_queries.QUERY_TEXT_AND_PARALLELS,\n bindVars=current_bind_vars,\n )\n if start_int == 0:\n add_source_information(file_name,text_segments_query_result.result[0])\n return text_segments_query_result.result[0]\n\n except DocumentNotFoundError as error:\n print(error)\n raise HTTPException(status_code=404, detail=\"Item not found\") from error\n except AQLQueryError as error:\n print(\"AQLQueryError: \", error)\n raise HTTPException(status_code=400, detail=error.errors) from error\n except KeyError as error:\n print(\"KeyError: \", error)\n raise HTTPException(status_code=400) from error", "def get_seq(self, chrom, start, end):\n # Get sequence from real genome object and save result.\n return self.faidx.fetch(chrom, start, end)", "async def get_segments_for_file(\n file_name: str,\n page: int = 0,\n score: int = 0,\n par_length: int = 0,\n co_occ: int = 0,\n limit_collection: List[str] = Query([]),\n folio: str = \"\",\n):\n language = get_language_from_filename(file_name)\n limitcollection_positive, limitcollection_negative = get_collection_files_regex(\n limit_collection, language\n )\n try:\n database = get_db()\n table_query = database.AQLQuery(\n query=main_queries.QUERY_TABLE_VIEW,\n batchSize=10000,\n bindVars={\n \"filename\": file_name,\n \"score\": score,\n \"parlength\": par_length,\n \"coocc\": co_occ,\n \"sortkey\": \"parallels_sorted_by_src_pos\",\n \"limitcollection_positive\": limitcollection_positive,\n \"limitcollection_negative\": limitcollection_negative,\n \"page\": page,\n \"start_folio\": get_folio_regex(language, file_name, folio),\n },\n )\n segments_result, collection_keys = collect_segment_results(\n create_numbers_view_data(table_query.result,get_folio_regex(language, file_name, folio))\n )\n\n return {\n \"collections\": database.AQLQuery(\n query=menu_queries.QUERY_COLLECTION_NAMES,\n bindVars={\n \"collections\": collection_keys,\n \"language\": get_language_from_filename(file_name),\n },\n ).result,\n \"segments\": segments_result,\n }\n\n except DocumentNotFoundError as error:\n print(error)\n raise HTTPException(status_code=404, detail=\"Item not found\") from error\n except AQLQueryError as error:\n print(\"AQLQueryError: \", error)\n raise HTTPException(status_code=400, detail=error.errors) from error\n except KeyError as error:\n print(\"KeyError: \", error)\n raise HTTPException(status_code=400) from error", "def getAcdcs(url, requests):\n acdcs = []\n for request in requests:\n name=request['id']\n #if a wrong or weird name\n if len(request['key'])<3:\n print request\n continue\n if 'ACDC' not in name:\n continue\n status=request['key']\n #only completed requests\n if status != 'completed':\n continue\n #requestType=request['key'][2]\n #only acdcs\n #if requestType != 'Resubmission':\n # continue\n acdcs.append(name) \n return acdcs", "def fetch(self, segment):\n pass", "def fetchSequence2(self,contig = None):\n if contig == None:\n connection = genomelib.pygrConnect(self.genome)\n seq = connection[self.chr][self.start-1:self.end]\n else:\n seq = contig[self.start-1:self.end]\n if self.strand == \"-\":\n seq = -seq\n self.sequence = str(seq)\n return self.sequence", "def get_100_seq(sequence,seq_size,num_seqs_p_record):\n\n for i in range(num_seqs_p_record):\n ini = i * seq_size\n fin = (i + 1) * seq_size\n sub_seq = sequence[ini:fin]\n sub_seq.id = sub_seq.id + \"_\" + str(i) #Cambia el id del nuevo read\n if if_N_seq(sub_seq): #Mira si es una secuencia con muchas 'N'\n continue\n else:\n fragmented_genome.append(sub_seq)", "def Segments():\n for n in range(ida_segment.get_segm_qty()):\n seg = ida_segment.getnseg(n)\n if seg:\n yield seg.start_ea", "def download_rna_seq(rna_seq_uuid_list, dirpath):\n data_dict = {}\n data_dict[\"ids\"] = rna_seq_uuid_list\n\n headers = {'Content-Type': 'application/json'}\n data = json.dumps(data_dict)\n\n try:\n response = requests.post('https://api.gdc.cancer.gov/data', headers=headers, data=data)\n filename = os.path.join(dirpath,response.headers[\"Content-Disposition\"].split(\"filename=\")[1])\n\n with open(filename, \"wb\") as file:\n file.write(response.content)\n file.close()\n return filename\n except:\n return None", "def read_segment(st, segment, cfile=None, timeout=10):\n\n # assumed read shape (st.readints, st.nbl, st.metadata.nchan_orig, st.npol)\n logger.info(\"Reading segment {0} of datasetId {1}\"\n .format(segment, st.metadata.datasetId))\n if st.metadata.datasource == 'sdm':\n data_read = read_bdf_segment(st, segment)\n elif st.metadata.datasource == 'vys':\n data_read = read_vys_segment(st, segment, cfile=cfile, timeout=timeout)\n elif st.metadata.datasource == 'sim':\n simseg = segment if cfile else None\n data_read = simulate_segment(st, segment=simseg)\n elif st.metadata.datasource == 'vyssim':\n data_read = read_vys_segment(st, segment, cfile=cfile, timeout=timeout,\n returnsim=True)\n else:\n logger.error('Datasource {0} not recognized.'\n .format(st.metadata.datasource))\n\n # report bad values\n if np.any(np.isnan(data_read)):\n logger.warning(\"Read data has some NaNs\")\n if np.any(np.isinf(data_read)):\n logger.warning(\"Read data has some Infs\")\n if np.any(np.abs(data_read) > 1e20):\n logger.warning(\"Read data has values larger than 1e20\")\n\n if not np.any(data_read):\n logger.info('Read data are all zeros for segment {0}.'.format(segment))\n return np.array([])\n else:\n logger.info('Read data with zero fraction of {0:.3f} for segment {1}'\n .format(1-np.count_nonzero(data_read)/data_read.size,\n segment))\n return data_read" ]
[ "0.6074918", "0.59999114", "0.57448626", "0.5712717", "0.5657642", "0.5515684", "0.5482543", "0.527945", "0.52410114", "0.52103394", "0.5157792", "0.51533157", "0.514769", "0.51354444", "0.51207256", "0.511246", "0.50718766", "0.5018996", "0.5016804", "0.4998077", "0.49922892", "0.49619636", "0.49607018", "0.49423566", "0.49353278", "0.49310532", "0.49288133", "0.4927454", "0.4900531", "0.49003163" ]
0.75697505
0
Write the unique IDs to a file, but add a self.prefix to each element of the array. For example, if self.unique_ids is ['image_1.jpg', 'image_2.jpg'] then if the self.prfix is './folder/', then out_file would be written as ./folder/image_1.jpg ./folder/image_2.jpg
def write_unique_ids(self, out_file): with open(out_file,'w') as f: f.writelines([self.prefix+x+'\n' for x in self.unique_ids]) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_ID_files(self):\n for file, IDs in [(self._trn_IDs_file, self._trn_IDs), (self._val_IDs_file,\n self._val_IDs), (self._tst_IDs_file, self._tst_IDs)]:\n with open(file, 'w') as f:\n f.write('\\n'.join('{}###{}###{}'.format(ID[0], ID[1], ID[2]) for ID in IDs))", "def write_ids(ids,fname) :\n\twith open(fname,'w') as fout : \n\t\tfor id in ids: \n\t\t\tfout.write(id+\"\\n\")", "def __write_dupe_file(self, filename):\n sortedList = sorted(self.dupeList, key=lambda file: file[0])\n with open(filename, mode='w') as outfile:\n for size, md5, filename, ino in sortedList:\n outfile.write(\"%s %s %s %s\\n\" % (size, md5, ino, filename))", "def read_unique_ids(self, in_file, prefix=None):\n if prefix is None:\n prefix = self.prefix\n with open(in_file) as f:\n self.unique_ids = [x.strip().replace(prefix, '') for x in f]\n return", "def write_andrewids(ids,fname) :\n\twith open(fname,'w') as fout : \n\t\tfor id in ids: \n\t\t\tfout.write(id+'@andrew.cmu.edu\\n')", "def save_uids_to_file(uids, **load):\n io.write_file(uids, io.UIDS_FILE, load)", "def write_data(self, file_prefix, **kwargs):\n # Add a dot to separate the prefix from the population label if it\n # doesn't already have one and isn't a directory\n if (not os.path.isdir(file_prefix) and\n not file_prefix.endswith('.') and\n not file_prefix.endswith(os.path.sep)):\n file_prefix += '.'\n for comp_array in self.component_arrays.values():\n # @UndefinedVariable\n comp_array.write_data(file_prefix + comp_array.name + '.pkl',\n **kwargs)", "def add_unique_postfix(filename):\n if not os.path.exists(filename):\n return filename\n\n path, name = os.path.split(filename)\n name, ext = os.path.splitext(name)\n\n make_filename = lambda i: os.path.join(path, '{}_{}{}'.format(name, i, ext))\n\n for i in range(1, sys.maxsize):\n unique_filename = make_filename(i)\n if not os.path.exists(unique_filename):\n return unique_filename\n\n return None", "def _unique_path(user_id, filename, category='images'):\n ext = os.path.splitext(filename)[-1]\n new_filename = '{}{}'.format(uuid.uuid4(), ext)\n return os.path.join(category, str(user_id), new_filename)", "def add_unique_id(filename, newfilename,idname):\r\n with open(filename, \"rb\") as infile, open(newfilename, \"wb\") as outfile:\r\n reader = csv.reader(infile)\r\n writer = csv.writer(outfile, delimiter=',')\r\n writer.writerow([idname] + next(reader))\r\n writer.writerows([i] + row for i, row in enumerate(reader, 1))", "def _assignUIDs(self):\n for messagePath in self.maildir:\n\n messageFile = os.path.basename(messagePath)\n\n if not messageFile in self.metadata['uids']:\n\n self.metadata['uids'][messageFile] = self.metadata['uidnext']\n\n self.metadata['uidnext'] += 1\n\n self.saveMetadata()", "def write_OTU(OTU_list, output_file):\n with open(output_file, \"w\") as file:\n iter = 1\n for seq, count in OTU_list:\n file.write(\">OTU_{} occurence:{}\\n\".format(iter, count))\n file.write(\"{}\\n\".format(fill(seq)))\n iter += 1", "def give_unique_id_to_all_files_in_hierarchy(top_directory):\n\n file_list = get_file_list_recursively(top_directory)\n\n for file_path in file_list:\n\n # Split path to maintain absolute path and extension\n file_dir = dirname(file_path)\n _, file_ext = splitext(file_path)\n\n # Generate the new path with unique id\n file_uuid = str(uuid.uuid4())\n file_new_path = join(file_dir, file_uuid + file_ext)\n\n os.rename(file_path, file_new_path)", "def write_representative_sequences_file(self, outname, outdir=None, set_ids_from_model=True):\n\n if not outdir:\n outdir = self.data_dir\n if not outdir:\n raise ValueError('Output directory must be specified')\n\n outfile = op.join(outdir, outname + '.faa')\n\n tmp = []\n for x in self.genes_with_a_representative_sequence:\n repseq = x.protein.representative_sequence\n copied_seq_record = copy(repseq)\n if set_ids_from_model:\n copied_seq_record.id = x.id\n tmp.append(copied_seq_record)\n\n SeqIO.write(tmp, outfile, \"fasta\")\n\n log.info('{}: wrote all representative sequences to file'.format(outfile))\n self.genome_path = outfile\n return self.genome_path", "def rm_dup_individuals(input_prefix, output_dir, base_prefix, prefix='temp_dedups_fids'):\n full_path, pprefix = os.path.split(input_prefix)\n\n # ============= OUTPUT FILES =============\n duplicated_samples_file = os.path.join(output_dir, '{}_samples_to_rm{}.csv'.format(prefix,base_prefix))\n no_dups_plink_prefix = os.path.join(output_dir, \"{}_{}\".format(prefix, base_prefix))\n\n # ============= REMOVE DUPLICATE SAMPLES =============\n # read fam file\n fam_df = pd.read_csv(input_prefix+\".fam\", sep=\"\\s+\", names=['FID', 'IID', 'c3', 'c4', 'c5', 'c6'])\n\n assert fam_df[~(fam_df.FID == fam_df.IID)].shape[0] == 0,\\\n \"FID and IID are *not* the same in this file:\\n{}\".format(input_prefix+\".fam\")\n\n\n # identify duplicated FID&IID\n dup_index = fam_df[fam_df.duplicated(subset=['FID', 'IID'], keep='first')].index\n dup_fids = fam_df.iloc[dup_index, :].FID.unique()\n\n # each duplicate FID & IID, except for the first instance, will be have \"_[counter]\" appened\n for this_fid in dup_fids:\n for counter, index_row in enumerate(fam_df.loc[fam_df['FID'] == this_fid].iterrows()):\n index, this_row = index_row\n if counter == 0:\n continue\n else:\n fam_df.loc[index, ['FID', 'IID']] = fam_df.loc[index, [\n 'FID', 'IID']].apply(lambda x: x+\"_{}\".format(counter))\n\n # write duplicated FID and IID to file\n if (fam_df.loc[dup_index, ['FID', 'IID']].shape[0] > 0):\n fam_df.loc[dup_index, ['FID', 'IID']].to_csv(duplicated_samples_file, sep=\" \", header=None, index=None)\n\n # OVERWRITE existing .fam to tagging duplicates\n fam_df.to_csv(input_prefix+\".fam\", sep=\" \", header=None, index=None)\n\n\n # plink to rm duplicates\n if (fam_df.loc[dup_index, ['FID', 'IID']].shape[0] > 0):\n rm_dups_cmd = \"plink --bfile {} --remove {} --make-bed --out {}\".format(\n input_prefix, duplicated_samples_file, no_dups_plink_prefix)\n else:\n rm_dups_cmd = \"plink --bfile {} --make-bed --out {}\".format(input_prefix, no_dups_plink_prefix)\n\n plink_stdout = run_shell_cmd(rm_dups_cmd)\n\n return no_dups_plink_prefix, plink_stdout", "def rename_file_group_to_serial_nums(file_lst):\n file_lst.sort()\n c = 1\n for f in file_lst:\n dirname = path.abspath(f.dirname())\n fdest = f.joinpath(dirname, \"{0:04d}\".format(c) + output_dicom_extension)\n log.info('Renaming {0} to {1}'.format(f, fdest))\n f.rename(fdest)\n c += 1", "def write_fasta(sequences_hash, output_fasta, concatenate_duplicates=True):\n with open(output_fasta, \"w+\") as fasta_object:\n for sequence in sequences_hash:\n if concatenate_duplicates:\n sequence_id = \"__\".join(sequences_hash[sequence])\n fasta_object.write(\">{}\\n{}\\n\".format(sequence_id, sequence))\n else:\n sequence_id = sequence\n sequence = sequences_hash[sequence_id][0]\n fasta_object.write(\">{}\\n{}\\n\".format(sequence_id, sequence))", "def _create_unique_filename_with_integer_suffix(fullpath):\n # create an unique filename\n suffix = None\n suffix_cnt=1\n while os.path.exists(fullpath):\n if suffix: fullpath = fullpath[0:-len(suffix)]\n suffix = \".%s\" % suffix_cnt\n suffix_cnt+=1\n fullpath = fullpath + suffix\n return fullpath", "def _create_unique_file(self):\n with open(self.uniquefile, 'w') as f:\n f.write(self._uniquename)\n self._uniquefile_created = True\n self._extend_expiration_time()\n self._p(\"Unique file created: %s\" % self.uniquefile)", "def write_multfile(image_coords, source_z, file_name = 'multfile.in'):\n print 'write_multfile'\n file_in = open(file_name, 'w')\n file_in.write('#REFERENCE 3 0.0 0.0\\n')\n\n for i in range(len(image_coords)):\n image_id = 'A' + str(i+1) + ' '\n data = str(image_coords[i][0]) + ' ' + str(image_coords[i][1]) \\\n + str(' 0.2 0.2 0 ') + str(source_z) + ' 0'\n final = image_id + data + '\\n'\n file_in.write(final)\n file_in.close()", "def get_pids(self, file_path, pid):\n if 'cuhk03' in file_path:\n prefix = 'cuhk'\n else:\n prefix = file_path.split('/')[1]\n return prefix + '_' + str(pid)", "def write_combined_fasta(fasta_name_to_sample_id,\r\n fasta_files,\r\n output_dir=\".\",\r\n counter=0):\r\n\r\n combined_file_out = open(join(output_dir + \"/\", \"combined_seqs.fna\"), \"w\")\r\n\r\n for curr_fasta in fasta_files:\r\n for label, seq in parse_fasta(open(curr_fasta, \"U\")):\r\n combined_file_out.write(\">%s_%d %s\\n\" %\r\n (fasta_name_to_sample_id[basename(curr_fasta)], counter, label))\r\n combined_file_out.write(\"%s\\n\" % seq)\r\n counter += 1", "def export_uniq_ads(ads, out_folder, rel_folder):\n try :\n os.makedirs(out_folder)\n os.makedirs(os.path.join(out_folder, rel_folder))\n except OSError:\n LOG.debug('Creating output folder')\n\n fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')\n # Relative location = Location of the ad within this current session\n # Global location, added when an ad is matched with existing ads in DB\n fwtr.write('#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\\n')\n \n for bug in ads.keys():\n height, width = bug.get_dimension()\n filepath = bug.get_filepath()\n name = bug.get_name()\n src = bug.get_src()\n filetype = bug.get_filetype()\n new_uuidname = '%s.%s' % (uuid1(), filetype)\n bug.set_uuid(new_uuidname)\n new_filepath = os.path.join(out_folder, new_uuidname)\n rel_filepath = os.path.join(rel_folder, new_uuidname)\n copy2(filepath, new_filepath)\n fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(new_uuidname,\n name, filetype, height, width, rel_filepath, src))\n fwtr.close()\n return ads", "def _unique_path(prefix):\n suffix = ''.join([\n random.choice(string.ascii_letters) for i in range(8)\n ])\n return '%s/%r.%s' % (prefix, time.time(), suffix)", "def writeToFile(fil, aks, tid):\r\n\r\n f = open(\"processed_\"+fil, 'w')\r\n \r\n f.write(\"Aks Tid\")\r\n for i in range(len(aks)):\r\n f.write(f\"\\n{aks[i]} {tid[i]}\")\r\n f.close()", "def save2file(self):\n ids_input = []\n labels_input = []\n ids_path = os.path.join(self.path, 'ids')\n if not os.path.exists(ids_path):\n os.makedirs(ids_path)\n labels_path = os.path.join(self.path, 'labels')\n if not os.path.exists(labels_path):\n os.makedirs(labels_path)\n ids_total = len(self.test)\n for i in range(ids_total):\n ids_input = self.test[i][0]\n labels_input = self.test[i][1]\n file_name = \"ids/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(ids_input, dtype=np.int32).tofile(file_path)\n file_name = \"labels/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(labels_input, dtype=np.int32).tofile(file_path)\n print(\"\\n ****** Success! ******\\n \")", "def sequential_dump(list_of_whats, where, prefix, **args):\n # @todo - pad the number in filename\n for i, what in enumerate(list_of_whats,1):\n here = os.path.join(where,\"{}_{}.p\".format(prefix,i))\n pickle.dump(what,open(here,\"wb\"),**args)", "def create_prefixes(audio_uploads_path: Path, transcription_uploads_path: Path, prefix_information,\n base_path: Path, prefix_name: str) -> set:\n prefixes = set()\n count = 0\n for data in prefix_information:\n count += 1\n label_filename = data.utterance.transcription.file_info.name\n\n # using the prefix of the label file to specify the prefix\n prefix, extension = os.path.splitext(label_filename)\n cleaned_prefix = strip_unsafe_characters(prefix)\n prefixes.add(cleaned_prefix)\n\n # copy transcription to \"/label\" directory\n label_src_path = transcription_uploads_path / label_filename\n label_dest_path = base_path / \"label\" / (cleaned_prefix+extension)\n copyfile(str(label_src_path), str(label_dest_path))\n\n # copy audio to \"/wav\" directory\n audio_filename = data.utterance.audio.file_info.name\n audio_src_path = audio_uploads_path / audio_filename\n audio_dest_path = base_path / \"wav\" / (cleaned_prefix+\".wav\")\n copyfile(str(audio_src_path), str(audio_dest_path))\n\n if len(prefixes) != count:\n raise ValueError(\"Duplicate prefix found\")\n\n prefix_file_path = base_path / prefix_name\n with prefix_file_path.open(mode='w') as pf:\n for prefix in prefixes:\n pf.write(prefix)\n pf.write(os.linesep)\n return prefixes", "def get_pids(self, file_path, pid):\n if 'cuhk03' in file_path: \n prefix = 'cuhk'\n else: \n prefix = file_path.split('/')[1]\n return prefix + '_' + str(pid)", "def uniquefile(self):\n return op.join(self._basedir, self._uniquename)" ]
[ "0.69445384", "0.64533186", "0.6405879", "0.6318872", "0.5901129", "0.57555485", "0.57474875", "0.5697831", "0.55670774", "0.5564222", "0.55620843", "0.55135316", "0.54920965", "0.54119766", "0.5354059", "0.5347817", "0.5346361", "0.53412336", "0.53253436", "0.5306415", "0.52814376", "0.52794385", "0.5265606", "0.5261597", "0.5240998", "0.5231508", "0.52275914", "0.522617", "0.52258056", "0.521415" ]
0.826794
0
Read the unique IDs from in_file, but remove a self.prefix from each element of the array. For example, if the in_file is ./folder/image_1.jpg ./folder/image_2.jpg and the self.prefix is './folder/', then self.unique_ids would be written as ['image_1.jpg', 'image_2.jpg']
def read_unique_ids(self, in_file, prefix=None): if prefix is None: prefix = self.prefix with open(in_file) as f: self.unique_ids = [x.strip().replace(prefix, '') for x in f] return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_unique_ids(self, out_file):\n with open(out_file,'w') as f:\n f.writelines([self.prefix+x+'\\n' for x in self.unique_ids])\n return", "def load_uids_from_file():\n uids = io.load_file(io.UIDS_FILE)\n if not uids:\n save_uids_to_file(DEFAULT_UIDS)\n return load_uids_from_file()\n return uids", "def transform_prefix(filenames, prefix_old, prefix_new):\n\n new_filenames = set([])\n len_prefix_old = len(prefix_old)\n # loop over the list of files and remove the prefix\n for name in filenames:\n name = name[len_prefix_old:]\n new_filenames.add(prefix_new + name)\n\n\n return new_filenames", "def _assignUIDs(self):\n for messagePath in self.maildir:\n\n messageFile = os.path.basename(messagePath)\n\n if not messageFile in self.metadata['uids']:\n\n self.metadata['uids'][messageFile] = self.metadata['uidnext']\n\n self.metadata['uidnext'] += 1\n\n self.saveMetadata()", "def rm_dup_individuals(input_prefix, output_dir, base_prefix, prefix='temp_dedups_fids'):\n full_path, pprefix = os.path.split(input_prefix)\n\n # ============= OUTPUT FILES =============\n duplicated_samples_file = os.path.join(output_dir, '{}_samples_to_rm{}.csv'.format(prefix,base_prefix))\n no_dups_plink_prefix = os.path.join(output_dir, \"{}_{}\".format(prefix, base_prefix))\n\n # ============= REMOVE DUPLICATE SAMPLES =============\n # read fam file\n fam_df = pd.read_csv(input_prefix+\".fam\", sep=\"\\s+\", names=['FID', 'IID', 'c3', 'c4', 'c5', 'c6'])\n\n assert fam_df[~(fam_df.FID == fam_df.IID)].shape[0] == 0,\\\n \"FID and IID are *not* the same in this file:\\n{}\".format(input_prefix+\".fam\")\n\n\n # identify duplicated FID&IID\n dup_index = fam_df[fam_df.duplicated(subset=['FID', 'IID'], keep='first')].index\n dup_fids = fam_df.iloc[dup_index, :].FID.unique()\n\n # each duplicate FID & IID, except for the first instance, will be have \"_[counter]\" appened\n for this_fid in dup_fids:\n for counter, index_row in enumerate(fam_df.loc[fam_df['FID'] == this_fid].iterrows()):\n index, this_row = index_row\n if counter == 0:\n continue\n else:\n fam_df.loc[index, ['FID', 'IID']] = fam_df.loc[index, [\n 'FID', 'IID']].apply(lambda x: x+\"_{}\".format(counter))\n\n # write duplicated FID and IID to file\n if (fam_df.loc[dup_index, ['FID', 'IID']].shape[0] > 0):\n fam_df.loc[dup_index, ['FID', 'IID']].to_csv(duplicated_samples_file, sep=\" \", header=None, index=None)\n\n # OVERWRITE existing .fam to tagging duplicates\n fam_df.to_csv(input_prefix+\".fam\", sep=\" \", header=None, index=None)\n\n\n # plink to rm duplicates\n if (fam_df.loc[dup_index, ['FID', 'IID']].shape[0] > 0):\n rm_dups_cmd = \"plink --bfile {} --remove {} --make-bed --out {}\".format(\n input_prefix, duplicated_samples_file, no_dups_plink_prefix)\n else:\n rm_dups_cmd = \"plink --bfile {} --make-bed --out {}\".format(input_prefix, no_dups_plink_prefix)\n\n plink_stdout = run_shell_cmd(rm_dups_cmd)\n\n return no_dups_plink_prefix, plink_stdout", "def remove_dupes(infile):\n filename = infile.replace('.csv', '-unique.csv')\n s = set()\n with open(filename, 'w') as outfile:\n for line in open(infile):\n if line not in s:\n outfile.write(line)\n s.add(line)", "def make_items_distinct_in_file(path=get_run_path(), filename_in='', sort=False,\n prefix_or_postfix=0, fix_phrase=''):\n\n set_items = set()\n\n if path[len] != '/':\n path += '/'\n\n # if filename_out:\n # out_file_path = path + filename_out\n\n if filename_in:\n in_file_path = path + filename_in\n\n with open(in_file_path) as file:\n for line in file.read().splitlines():\n set_items.add(line)\n else:\n for filename in glob.glob(os.path.join(path)):\n with open(filename) as file_x:\n for line in file_x:\n set_items.add(line)\n\n def make_distinct_file(file_name):\n with open(file_name) as f:\n for f_line in f.read().splitlines():\n set_items.add(f_line)\n\n def make_merged_distinct_file(dir_path):\n for file_x in glob.glob(os.path.join(dir_path)):\n with open(filename) as file_x:\n for x_line in file_x:\n set_items.add(x_line)\n\n if set_items.__len__() > 0:\n with open(out_file_path, \"w\") as file_out:\n if sort:\n list_sorted = sorted(set(set_items))\n file_out.write(\"\\n\".join(list_sorted))\n else:\n file_out.write(\"\\n\".join(set_items))", "def _read_image_ids(image_ids_path):\n return list(map(str.strip, open(image_ids_path, \"r\").readlines()))", "def remove_duplicate_royal(files_data: List) -> List[str]:\n seen_royal: List[str] = []\n ret_val: List[str] = []\n for x in files_data:\n y = x.split(DELIMITER)\n if y[USAGE_ROYAL] not in seen_royal:\n seen_royal.append(y[USAGE_ROYAL])\n ret_val.append(x)\n return ret_val", "def reset_processed_ids_file():\n src = test_config[\"processed_tweets_ids_src_path\"]\n dst = test_config[\"overwrite\"][\"processed_tweets_ids_path\"]\n copyfile(src, dst)", "def fasta_ids(fasta_files, verbose=False):\r\n all_ids = set([])\r\n for fasta_in in fasta_files:\r\n for label, seq in parse_fasta(fasta_in):\r\n rid = label.split()[0]\r\n if rid in all_ids:\r\n raise ValueError(\r\n \"Duplicate ID found in FASTA/qual file: %s\" %\r\n label)\r\n all_ids.add(rid)\r\n return all_ids", "def get_id_list(fname):\n id_list = []\n with open(fname, 'r') as f:\n for line in f:\n line = line.strip()\n if line:\n id_list.append(line)\n return id_list", "def give_unique_id_to_all_files_in_hierarchy(top_directory):\n\n file_list = get_file_list_recursively(top_directory)\n\n for file_path in file_list:\n\n # Split path to maintain absolute path and extension\n file_dir = dirname(file_path)\n _, file_ext = splitext(file_path)\n\n # Generate the new path with unique id\n file_uuid = str(uuid.uuid4())\n file_new_path = join(file_dir, file_uuid + file_ext)\n\n os.rename(file_path, file_new_path)", "def unique(fname):\n addresses = []\n with gzip.open(fname, \"rb\") as f:\n lines = f.readlines()\n for line in lines:\n #print(\"[\"+line.split()[1]+\"]\")\n if line.split()[0] not in addresses:\n addresses.append(line.split()[0])\n return addresses", "def get_pids(self, file_path, pid):\n if 'cuhk03' in file_path:\n prefix = 'cuhk'\n else:\n prefix = file_path.split('/')[1]\n return prefix + '_' + str(pid)", "def get_pids(self, file_path, pid):\n if 'cuhk03' in file_path: \n prefix = 'cuhk'\n else: \n prefix = file_path.split('/')[1]\n return prefix + '_' + str(pid)", "def remove_duplicate_crn(files_data: List) -> List[str]:\n seen_crns: List[str] = []\n ret_val: List[str] = []\n for x in files_data:\n y = x.split(DELIMITER)\n # This checks the last 5 characters of y[9] for a CRN.\n # Make sure this is where the CRN is still located before running.\n if y[9][-5:] not in seen_crns:\n seen_crns.append(y[9][-5:])\n ret_val.append(x)\n return ret_val", "def testIgnoredPrefixesDuplicateFiles(self):\n\n INPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 /src/build/foo/../file1_1.cc\nFILE 2 /src/build/bar/../file1_2.cc\nFILE 3 D:/src/build2/baz/../file1_2.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 2\n1008 4 46 3\n100c 4 44 1\n\"\"\"\n EXPECTED_OUTPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 file1_1.cc\nFILE 2 file1_2.cc\nFILE 3 file1_2.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 2\n1008 4 46 3\n100c 4 44 1\n\"\"\"\n IGNORED_PREFIXES = ['\\\\src\\\\build\\\\', 'D:\\\\src\\\\build2\\\\']\n self.assertParsed(INPUT, IGNORED_PREFIXES, EXPECTED_OUTPUT)", "def __write_dupe_file(self, filename):\n sortedList = sorted(self.dupeList, key=lambda file: file[0])\n with open(filename, mode='w') as outfile:\n for size, md5, filename, ino in sortedList:\n outfile.write(\"%s %s %s %s\\n\" % (size, md5, ino, filename))", "def getsameIDList(id, file):\n glineList = []\n newread = []\n \n for line in open(file):\n itemList = line[:-1].split('\\t')\n line_id = getsubString(itemList[0],'|')\n \n if id == line_id:\n glineList.append(line)\n else:\n newread.append(line)\n return glineList", "def remove_duplicates(file):\n file_tmp = 'tmp'\n with open(file) as f, open(file_tmp, 'w') as o:\n for line in unique_everseen(f):\n o.write(line)\n # rename file_tmp to file\n os.remove(file)\n os.rename(file_tmp, file)", "def discover(self):\n ids = []\n for f in os.listdir(self.dirname):\n if self.file_prefix in f:\n ids.append(self.inv_filename(f))\n return sorted(ids)", "def get_user_ids() -> List[str]:\n listOfFiles = os.listdir('public_dataset')\n listOfFiles.remove('data_description.pdf')\n try:\n listOfFiles.remove('.DS_Store')\n except:\n pass\n return listOfFiles", "def remove_idats_not_in_samplesheet(samplesheet_filepath, sample_path):\n samples = pd.read_csv(samplesheet_filepath)\n all_idats = list(Path(sample_path).rglob('*.idat')) + list(Path(sample_path).rglob('*.idat.gz'))\n all_idats_names = [i.name for i in all_idats]\n # these are VALID idats to retain\n save_list = []\n try:\n idat_fileparts = [f\"{row['GSM_ID']}_{row['Sentrix_ID']}_{row['Sentrix_Position']}\" for (idx,row) in samples.iterrows()]\n except KeyError as e:\n LOGGER.error(f\"Samplesheet is missing {e}.\")\n return \n for file in idat_fileparts:\n files = [f\"{file}_Grn.idat\", f\"{file}_Grn.idat.gz\", f\"{file}_Red.idat\", f\"{file}_Red.idat.gz\"]\n for idat in files:\n if idat in all_idats_names:\n save_list.append(idat)\n #files = [f\"{file}_Grn.idat\", f\"{file}_Grn.idat.gz\", f\"{file}_Red.idat\", f\"{file}_Red.idat.gz\"]\n #if Path(idat).exists():\n remove_list = [idat for idat in all_idats if idat.name not in save_list]\n #LOGGER.info(f\"removing {len(remove_list)} idats out of a total of {len(all_idats)} found,\")\n worked = 'OK' if len(samples.index) == len(save_list)/2 else 'ERROR'\n if worked != 'OK':\n return\n removed = 0\n for idat in all_idats:\n if idat.name in save_list:\n continue\n if Path(idat).exists():\n Path(idat).unlink()\n #print('-',idat)\n removed += 1\n #LOGGER.info(f'removed {removed} idat files not in samplesheet. ready to process remaining ones.')\n LOGGER.info(f\"retaining {len(save_list)} files for {len(samples.index)} samples ({worked}). (Dropped {len(remove_list)} idats)\")", "def updateImportList(self):\n\t\tids_new = self.ids\n\t\tids_list = []\n\n\t\tif not os.path.exists(ids_file):\n\t\t\twith open(ids_file, 'a') as ids:\n\t\t\t\tpass\n\n\t\twith open(ids_file, \"rb\") as ids:\n\t\t\tids_reader = csv.reader(ids)\n\t\t\tfor row in ids_reader:\n\t\t\t\tids_list.append(row[0])\n\n\t\tids_new = set(ids_new) - set(ids_list)\n\n\t\twith open(ids_file, \"a\") as ids:\n\t\t\tids_writer = csv.writer(ids)\n\t\t\tfor idx in ids_new:\n\t\t\t\tids_writer.writerow( str(idx) )\n\n\t\tself.ids = ids_new", "def uniq(args):\n p = OptionParser(uniq.__doc__)\n p.add_option(\n \"--seq\", default=False, action=\"store_true\", help=\"Uniqify the sequences\"\n )\n p.add_option(\n \"-t\",\n \"--trimname\",\n dest=\"trimname\",\n action=\"store_true\",\n default=False,\n help=\"turn on the defline trim to first space\",\n )\n\n opts, args = p.parse_args(args)\n if len(args) != 2:\n sys.exit(p.print_help())\n\n fastafile, uniqfastafile = args\n fw = must_open(uniqfastafile, \"w\")\n seq = opts.seq\n\n for rec in _uniq_rec(fastafile, seq=seq):\n if opts.trimname:\n rec.description = \"\"\n SeqIO.write([rec], fw, \"fasta\")", "def unique_id(graph: nx.MultiDiGraph, prefix: str = \"\"):\n # TODO thread safety?\n unique_id.count = max(unique_id.count, graph.number_of_nodes()) + 1\n if prefix and not graph.has_node(prefix):\n return str(prefix)\n while graph.has_node(prefix + str(unique_id.count)):\n unique_id.count += 1\n return prefix + str(unique_id.count)", "def remove_prefix(self, path):\n self.log.debug(\n f\"S3FS.remove_prefix: self.prefix_: {self.prefix_} path: {path}\"\n )\n if isinstance(path, str):\n path = (\n path[len(self.prefix_) :]\n if path.startswith(self.prefix_)\n else path\n )\n path = path[1:] if path.startswith(self.delimiter) else path\n return path\n if isinstance(path, (list, tuple)):\n path = [\n p[len(self.prefix_) :] if p.startswith(self.prefix_) else p\n for p in path\n ]\n path = [p[1:] if p.startswith(self.delimiter) else p for p in path]\n return path", "def main():\n \"\"\"Removes the common prefix from each filename.\"\"\"\n \"\"\"Writes a new file with the stripped filenames.\"\"\"\n parser = OptionParser(usage='%prog [options] infile outfile')\n parser.add_option('-f', '--force', action='store_true', default=False, help='overwrite current outfile, if exists')\n\n # check inputs\n options, args = parser.parse_args() \n if len(args) != 2: parser.error('wrong number of positional arguments') \n\n infile = args[0]\n outfile = args[1]\n\n if exists(outfile) and not(options.force): \n print >>sys.stderr, 'Target %s already exists.' % outfile\n print >>sys.stderr, 'Use --force to overwrite.'\n sys.exit(1)\n\n if not(exists(infile)):\n print >>sys.stderr, 'File %s not found.' % infile \n sys.exit(1)\n\n infieldnames = ['filename', 'procname', 'lineno'] \n outfieldnames = ['filename', 'lineno']\n\n # read file\n instream = open(infile)\n reader = DictReader(instream, fieldnames=infieldnames)\n entries = list(reader) \n instream.close()\n\n # process entries\n fnames = map(lambda d: d['filename'], entries) \n prefix = commonprefix(fnames)\n\n # if there is only one file, the common prefix will include the filename \n # however, in the output we want to preserve the filename\n prefix, tail = split(prefix)\n\n for e in entries: \n tails = e['filename'].split(prefix) \n if not(tails[0] == ''): \n print >>sys.stderr, 'This prefix is uncommon!'\n sys.exit(1) \n e['filename'] = (tails[1].split('/'))[1] \n\n # print results\n outstream = open(outfile, 'w')\n writer = DictWriter(outstream, outfieldnames, extrasaction='ignore', lineterminator='\\n')\n writer.writerows(entries)\n outstream.close()", "def unique_files(self):\n self._tempfiles[-1].ctr = -1" ]
[ "0.6857764", "0.59046537", "0.5876727", "0.5724553", "0.5643143", "0.560151", "0.5585564", "0.55011547", "0.54623485", "0.5456302", "0.5420213", "0.53884214", "0.5385816", "0.5367136", "0.53472537", "0.53333265", "0.5323998", "0.52856857", "0.52816784", "0.5269871", "0.5257524", "0.5235414", "0.5215447", "0.5201493", "0.5197029", "0.5183324", "0.5176355", "0.5161927", "0.51458013", "0.5142387" ]
0.88484704
0
Randomly select images from the CCD dataset to be included in the experiments. Make sure that there are at least CAP number of images in each intersection for age, gender, lighting condition, and skin groups.
def select_unique_ids(self): ccd = self.metadata ccd_ids = [] for dg in set(ccd['isDark']): for gg in set(ccd['Gender']): for sg in set(ccd['Skin']): for ag in set(ccd['Age']): try: intersection_ids = list(ccd[np.logical_and(ccd['isDark'] == dg, np.logical_and(ccd['Gender'] == gg, np.logical_and(ccd['Skin'] == sg, ccd['Age'] == ag)))]['ImageID']) if len(intersection_ids) <= CAP: ccd_ids += intersection_ids else: x = list(np.random.choice(intersection_ids, CAP, replace=False)) ccd_ids += x except: continue self.unique_ids = ccd_ids return ccd_ids
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def randomly_select_images():\r\n global images_a, images_b, images_total\r\n images_a = random.sample(images_a, int(number_of_images_a.get()))\r\n if number_of_images_b.get() != \"\": #check if images_b empty\r\n images_b = random.sample(images_b, int(number_of_images_b.get()))\r\n else:\r\n images_b = []\r\n images_total = images_a + images_b\r\n random.shuffle(images_total)", "def sample_images(images, n=1000):\n ix = np.random.choice(np.arange(len(images)), size=n, replace=False)\n sample = labels.loc[ix, [\"img_name\", \"breed\"]]\n assert len(sample) == n\n return sample", "def _init_img_dataset(self, dataset_path):\n\n # ==\n # Define the classes used in the various states\n # form: (state class : cifar label class)\n class_dict = {\n 'initial': 'automobile',\n 'choice_1': 'dog',\n 'choice_2': 'cat',\n 'corridor': 'bird',\n }\n\n # ==\n # Download / initialize dataset\n ds = CIFAR10(dataset_path, train=self.training,\n download=True)\n\n # Get the CIFAR class index for each of the state classes\n cifar_class_dict = {\n k: ds.class_to_idx[class_dict[k]] for k in class_dict\n }\n\n # Iterate over the CIFAR dataset and get the idxs to each class\n cifar_indexes = {k: [] for k in class_dict}\n for i in range(len(ds)):\n cur_cifar_class = ds[i][1]\n for k in class_dict:\n if cur_cifar_class == cifar_class_dict[k]:\n cifar_indexes[k].append(i)\n\n # Manually sub-sample choice classes\n for k in ['choice_1', 'choice_2']:\n n_imgs = min(self.num_ds_imgs, len(cifar_indexes[k]))\n rng = np.random.default_rng()\n choice_imgs = rng.choice(cifar_indexes[k], size=n_imgs,\n replace=False)\n cifar_indexes[k] = choice_imgs\n\n # Manually shuffle the corridor class\n rng = np.random.default_rng()\n corri_img_shufIdxs = rng.choice(cifar_indexes['corridor'],\n size=len(cifar_indexes['corridor']),\n replace=False)\n cifar_indexes['corridor'] = corri_img_shufIdxs\n\n # ==\n # Construct the data subset dictionary\n ds_dict = {}\n for k in class_dict:\n ds_dict[k] = Subset(ds, cifar_indexes[k])\n\n return ds_dict", "def random_sample_images(self, images, sample_size):\n\n #return sample(images, int(sample_size))\n return images.order_by('?')[:sample_size]", "def load_dataset(image_home, mask_home, patient_list, \n size = 512, \n downsample = 0.5, \n overlap = 1.5, \n verbose=False):\n\n image_list = np.concatenate([sorted(glob.glob(f'{image_home}/{p}/*')) for p in patient_list])\n mask_list = np.concatenate([sorted(glob.glob(f'{mask_home}/{p}/*')) for p in patient_list])\n\n if verbose:\n for i, (im, m) in enumerate(zip(image_list, mask_list)):\n print(i, im, m)\n\n x = []\n y = [] \n\n for im, m in zip(image_list, mask_list):\n image = cv2.imread(im)[:,:,::-1]\n mask = cv2.imread(m, -1)\n mask = squash_labels(mask)\n \n image = cv2.resize(image, dsize=(0,0), fx=downsample, fy=downsample)\n mask = cv2.resize(mask, dsize=(0,0), fx=downsample, fy=downsample,\n interpolation=cv2.INTER_NEAREST)\n\n # assert (image.shape == mask.shape).all()\n split_x , split_y = split(image, mask, int(size * downsample), overlap)\n\n x.append(split_x)\n y.append(split_y)\n\n\n x = np.concatenate(x, axis=0)\n y = np.concatenate(y, axis=0)\n y = np.eye(N=y.shape[0], M=4)[y]\n\n shuffle = np.arange(x.shape[0]).astype(np.int)\n np.random.shuffle(shuffle)\n x = x[shuffle, :]\n y = y[shuffle, :]\n\n x = (x / 255.).astype(np.float32)\n\n print('split_datasets returning x:', x.shape, x.dtype, x.min(), x.max())\n print('split_datasets returning y:', y.shape, y.dtype)\n return x, y", "def select_unique_ids(self):\n miap = self.metadata\n miap_single = miap[miap.ImageID.isin(list(miap_single[miap_single == 1].index))]\n miap_ids = []\n for gp in set(miap_single['GenderPresentation']):\n for ap in set(miap_single['AgePresentation']):\n try:\n intersection_ids = list(miap_single[np.logical_and(miap_single['GenderPresentation'] == gp,\n miap_single['AgePresentation'] == ap)]['ImageID'])\n if group[gp][ap] <= CAP:\n miap_ids += intersection_ids\n else:\n x = list(np.random.choice(intersection_ids, CAP, replace=False))\n miap_ids += x\n\n except:\n continue\n self.unique_ids = miap_ids\n return miap_ids", "def take(self, n, wordnetid=None):\n subjectid = np.random.choice(self.subjects(), n) if wordnetid is None else [wordnetid] * n\n takelist = []\n for s in subjectid:\n d = os.path.join(self.datadir, s)\n f = np.random.choice(imlist(d),1)[0]\n im = ImageDetection(filename=f).category(filebase(d))\n takelist.append(im)\n return takelist", "def test_load_selections2(self, selection):\n self.image_set._subsets = []\n selection.load_selections([SAMPLE_ROI])\n rows, cols = np.column_stack(self.roi_coords)\n for pixel in self.image_set._roi_data[rows, cols]:\n assert np.array_equal(\n pixel, [255.0, 0.0, 0.0, 255.]\n )", "def pick_categories(path, number_of_categories):\r\n all_categories = os.listdir(path)\r\n # from all categories choose 'number_of_categories' many\r\n chosen_categories = random.sample(all_categories, number_of_categories)\r\n \r\n # output, which categories are being used\r\n print(\"Categories:\")\r\n for i, category in enumerate(chosen_categories):\r\n category_name = category.split(\".\")\r\n print(f\"category number {(i)} is {category_name[1]}\")\r\n print(\"\\n\")\r\n \r\n # to have a balanced amount of images choose the category with the least amount of images as upper threshold\r\n number_of_images = min([len(os.listdir(os.path.join(path, category))) for category in chosen_categories])\r\n return chosen_categories, number_of_images", "def sample_images(opt, batches_done, monet_dataloader, photo_dataloader):\n G_AB.eval()\n G_BA.eval()\n real_A = next(iter(monet_dataloader))[0].cuda()\n fake_B = G_AB(real_A)\n real_B = next(iter(photo_dataloader))[0].cuda()\n fake_A = G_BA(real_B)\n # Arange images along x-axis\n real_A = make_grid(real_A, nrow=opt.batch_size, normalize=True)\n real_B = make_grid(real_B, nrow=opt.batch_size, normalize=True)\n fake_A = make_grid(fake_A, nrow=opt.batch_size, normalize=True)\n fake_B = make_grid(fake_B, nrow=opt.batch_size, normalize=True)\n # Arange images along y-axis\n image_grid = torch.cat((real_A, fake_B, real_B, fake_A), 1)\n save_image(image_grid, \"%s/%s/images/%s.png\" % (opt.checkpoints_dir, opt.name, batches_done), normalize=False)", "def random_image():\n\n # select random photo from sample table\n result = db.engine.execute(\"\"\"SELECT photo_id\n FROM sample\n ORDER BY rand() LIMIT 1\"\"\")\n photo_id = result.first()[0]\n\n # extract classification vector from database\n class_columns = \",\".join(\"Label{}\".format(i) for i in range(num_labels))\n result = db.engine.execute(\"\"\"SELECT yfcc.download_url, {}\n FROM placesCNN INNER JOIN yfcc\n ON placesCNN.photo_id = yfcc.photo_id\n WHERE yfcc.photo_id = {}\"\"\".format(class_columns,\n photo_id))\n\n row = result.first()\n download_url = row[0]\n classification = np.array(row[1:])\n\n return {\"suggested_tags\": predicted_tags(classification),\n \"classification_vector\": classification,\n \"image_url\": download_url}", "def test_load_selections3(self, selection):\n self.image_set.create_subset()\n selection.load_selections([SAMPLE_ROI])\n rows, cols = np.column_stack(self.roi_coords)\n for pixel in self.image_set._roi_data[rows, cols]:\n assert np.array_equal(\n pixel, [255.0, 0.0, 0.0, 255.]\n )\n for pixel in self.subset._roi_data[rows, cols]:\n assert np.array_equal(\n pixel, [0.0, 100.0, 0.0, 255.]\n )", "def take_per_subject(self, n):\n subjectid = self.subjects()\n takelist = []\n for s in subjectid:\n d = os.path.join(self.datadir, s)\n for k in range(0,n):\n f = np.random.choice(imlist(d),1)[0]\n im = ImageDetection(filename=f).category(filebase(d))\n takelist.append(im)\n return takelist", "def three_sample_images():\n samples = samples_path()\n _truck = np.array(Image.open(os.path.join(samples, \"truck.png\")))\n _deer = np.array(Image.open(os.path.join(samples, \"deer.png\")))\n _frog = np.array(Image.open(os.path.join(samples, \"frog.png\")))\n truck = transforms.ToTensor()(_truck)\n deer = transforms.ToTensor()(_deer)\n frog = transforms.ToTensor()(_frog)\n return torch.stack([truck, deer, frog])", "def detect(model, dataset_dir, subset):\n print(\"Running on {}\".format(dataset_dir))\n\n # Create directory\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n submit_dir = \"submit_{:%Y%m%dT%H%M%S}\".format(datetime.datetime.now())\n submit_dir = os.path.join(RESULTS_DIR, submit_dir)\n os.makedirs(submit_dir)\n\n # Read dataset\n dataset = TamperDataset()\n dataset.load_tamper(dataset_dir, subset)\n dataset.prepare()\n # Load over images\n submission = []\n f1 = 0\n print(len(dataset.image_ids))\n # for image_id in dataset.image_ids:\n # # Load image and run detection\n # image = dataset.load_image(image_id)\n # # Detect objects\n # r = model.detect([image], verbose=0)[0]\n\n # # Encode image to RLE. Returns a string of multiple lines\n # source_id = dataset.image_info[image_id][\"id\"]\n # rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n # submission.append(rle)\n # # Save image with masks\n\n # N = r[\"scores\"].shape[0]\n # if not N:\n # \tH, W, C = image.shape\n # \tmask = np.zeros((H,W))\n\n \t\n # else:\n\n # H, W, C = image.shape\n\n # idx = np.argsort(-r[\"scores\"])\n # mask = r[\"masks\"][:,:,idx[0]].astype(np.float32)\n\n # bbox = r[\"rois\"][idx[0], :4]\n\n # y1, x1, y2, x2 = bbox\n\n\n\n # mask = dense_crf(image, mask)\n\n # mask = np.where(mask >= 0.5, 255, 0)\n\n # H, W, C = image.shape\n\n # full_mask = np.zeros((H, W))\n # full_mask[y1:y2, x1:x2] = mask\n\n for image_id in dataset.image_ids:\n # Load image and run detection\n image = dataset.load_image(image_id)\n # ela=dataset.load_ela(image_id)\n # Detect objects\n # r = model.detect([image],[ela], verbose=0)[0]\n r = model.detect([image],verbose=0)[0]\n\n # Encode image to RLE. Returns a string of multiple lines\n source_id = dataset.image_info[image_id][\"id\"]\n rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n submission.append(rle)\n # Save image with masks\n\n N = r[\"scores\"].shape[0]\n if not N:\n H, W, C = image.shape\n mask = np.zeros((H,W))\n\n \n else:\n idx = np.argsort(-r[\"scores\"])\n mask = r[\"masks\"][:,:,idx[0]].astype(np.uint8)\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n\n annotation = dataset.load_annaation(image_id)\n annotation = np.where(annotation >= 0.5, 1, 0) \n f = get_FM(mask, annotation)\n f1 += f\n\n print(f1/len(dataset.image_ids))\n\n\n\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n # visualize.display_instances(\n # image, r['rois'], r['masks'], r['class_ids'],\n # dataset.class_names, r['scores'],\n # show_bbox=False, show_mask=False,\n # title=\"Predictions\")\n # plt.savefig(\"{}/{}.png\".format(submit_dir, dataset.image_info[image_id][\"id\"]))\n\n # Save to csv file\n # submission = \"ImageId,EncodedPixels\\n\" + \"\\n\".join(submission)\n # file_path = os.path.join(submit_dir, \"submit.csv\")\n # with open(file_path, \"w\") as f:\n # f.write(submission)\n print(\"Saved to \", submit_dir)", "def choose_pair():\n print('This program is creating hybrid images from a given pair of images.\\n'\\\n 'Choose a pair of images you would like to make a hybrid image from:\\n'\\\n '1. bicycle + motorcycle\\n'\\\n '2. dog + cat\\n'\\\n '3. Marylin Monroe + Albert Einstein\\n'\\\n '4. bird + plane\\n'\\\n '5. fish + submarine\\n'\\\n '6. eye + snail\\n'\\\n '7. kitten + moon')\n path = '/Users/frank/PycharmProjects/hybrid-images'\n path = path + '/' + 'pictures'\n choice = int(input('Enter a number from 1 to 7: \\n'))\n while choice not in range(1,8):\n choice = int(input('The value you entered is invalid, try again: /n')) \n if choice == 1: \n img1 = cv2.imread(path + '/' + 'motorcycle.bmp')\n img2 = cv2.imread(path + '/' + 'bicycle.bmp')\n h1 = w1 = h2 = w2 = 11\n s1 = s2 = 2\n elif choice == 2: \n img1 = cv2.imread(path + '/' + 'dog.bmp')\n img2 = cv2.imread(path + '/' + 'cat.bmp')\n h1 = w1 = h2 = w2 = 21\n s1 = s2 = 7\n elif choice == 3:\n img1 = cv2.imread(path + '/' + 'marilyn.bmp')\n img2 = cv2.imread(path + '/' + 'einstein.bmp')\n h1 = w1 = 23\n h2 = w2 = 11\n s1 = 4\n s2 = 2\n elif choice == 4:\n img1 = cv2.imread(path + '/' + 'bird.bmp')\n img2 = cv2.imread(path + '/' + 'plane.bmp')\n h1 = w1 = 6\n h2 = w2 = 21\n s1 = 2\n s2 = 8\n elif choice == 5:\n img1 = cv2.imread(path + '/' + 'submarine.bmp')\n img2 = cv2.imread(path + '/' + 'fish.bmp')\n h1 = w1 = 29\n h2 = w2 = 17\n s1 = 5\n s2 = 3\n elif choice == 6:\n img1 = cv2.imread(path + '/' + 'snail.png')\n img2 = cv2.imread(path + '/' + 'eye.png')\n h1 = w1 = 16\n h2 = w2 = 25\n s1 = 4\n s2 = 10\n elif choice == 7:\n img1 = cv2.imread(path + '/' + 'moon.png')\n img2 = cv2.imread(path + '/' + 'kitten.png')\n h1 = w1 = 5\n h2 = w2 = 21\n s1 = 1\n s2 = 5\n #convert the images into floating points data type \n img1 = img1.astype('float64')\n img2 = img2.astype('float64')\n param = [h1,w1,s1,h2,w2,s2]\n #change parameters to user chosen if such were declared in parameters module \n for i in range(len(param)):\n if parameters.values[i] != 0:\n param[i] = parameters.values[i]\n #create Gaussian filters for each image\n kernel1 = create_gaussian_filter((param[0], param[1]), param[2])\n kernel2 = create_gaussian_filter((param[3], param[4]), param[5]) \n return img1, img2, kernel1, kernel2", "def select_unique_ids(self):\n adience = self.metadata\n adi_ids = []\n for gg in set(adience['gender']):\n for ag in set(adience['age_group']):\n try:\n idx = np.logical_and(adience['gender'] == gg,adience['age_group'] == ag)\n intersection_ids = list(adience[idx]['user_id'] +\n '/coarse_tilt_aligned_face.' +\n adience[idx]['face_id'].astype(str) +\n '.' + adience[idx]['original_image'])\n if len(intersection_ids) <= CAP:\n adi_ids += intersection_ids\n else:\n x = list(np.random.choice(intersection_ids, CAP, replace=False))\n adi_ids += x\n\n except:\n continue\n self.unique_ids = adi_ids\n return adi_ids", "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def crop_images_color(dataset_dir, is_mask=True):\n data = []\n for folder in os.listdir(dataset_dir):\n path = os.path.join(dataset_dir, folder, \"*_labelIds.png\")\n data.extend(glob(path))\n\n for index, filePath in enumerate(data):\n print ('{}/{}'.format(index, len(data)))\n\n img = scipy.misc.imread(filePath).astype(np.uint8)\n img = scipy.misc.imresize(img, 0.25, interp='bilinear', mode=None)\n if is_mask:\n mask = np.ones((img.shape[0], img.shape[1]), dtype=np.uint8) * 255\n\n idx_person = np.where(np.all(img == [220, 20, 60, 255], axis=-1))\n #idx_rider = np.where(np.all(img == [255, 0, 0, 255], axis=-1))\n #idx_void = np.where(np.all(img == [0, 0, 0, 255], axis=-1))\n\n #indices = np.concatenate((idx_person, idx_rider, idx_void), axis=1)\n indices = idx_person\n # mask[indices[0], indices[1], :] = (0, 0, 0, 255)\n mask[indices[0], indices[1]] = 0\n mask = np.reshape(mask, (256, 512))\n\n #scipy.misc.imsave('/home/andy/dataset/CITYSCAPES/CITYSCAPES_crop_random/' + filePath.split('/')[-1],\n # img[offs_h[index]:offs_h_end[index], offs_w[index]:offs_w_end[index] :])\n scipy.misc.imsave('/home/andy/dataset/CITYSCAPES/for_wonderful_chou/image/' + filePath.split('/')[-1],\n img[0:192, :])\n #break", "def images_pca(images_folder, limit=100, k=3):\n my_images = []\n shape = None\n files = os.listdir(images_folder)\n random.shuffle(files)\n files = files[:limit]\n for study_file in files:\n assert study_file.endswith('.pkl'), 'file %s has wrong extension' % study_file\n with open(os.path.join(images_folder, study_file), 'rb') as f:\n study = pickle.load(f)\n for slice_ in study['sax']:\n myframe = random.choice(study['sax'][slice_])\n assert shape is None or shape == myframe['pixel'].shape, 'inconsistent image shapes'\n shape = myframe['pixel'].shape\n my_images.append(myframe['pixel'])\n\n X = np.zeros((len(my_images), my_images[0].size))\n for i, img in enumerate(my_images):\n X[i] = img.reshape(img.size)\n\n V, eig = pca(X)\n V = V.reshape((k, shape[0], shape[1]))\n return V, eig", "def getRandomImage(path):\n folders = list(filter(lambda x: os.path.isdir(os.path.join(path, x)), os.listdir(path)))\n random_directory = np.random.randint(0,len(folders))\n path_class = folders[random_directory]\n print(\"Class - \" + five_celeb_dict_n[str(path_class)])\n file_path = path + path_class\n file_names = [f for f in listdir(file_path) if isfile(join(file_path, f))]\n random_file_index = np.random.randint(0,len(file_names))\n image_name = file_names[random_file_index]\n return cv2.imread(file_path+\"/\"+image_name)", "def load_all_dicom_images(self, verbose=True):\n if verbose: print(\"Loading dicom files ... This may take a moment.\")\n\n path = self.get_path_to_dicom_files()\n fnames = [fname for fname in os.listdir(path)\n if fname.endswith('.dcm') and not fname.startswith(\".\")]\n images = []\n for fname in fnames:\n image = dicom.dcmread(os.path.join(path,fname))\n\n seid = str(image.SeriesInstanceUID).strip()\n stid = str(image.StudyInstanceUID).strip()\n\n if seid == self.series_instance_uid and\\\n stid == self.study_instance_uid:\n images.append(image)\n\n # ##############################################\n # Clean multiple z scans.\n #\n # Some scans contain multiple slices with the same `z` coordinate \n # from the `ImagePositionPatient` tag.\n # The arbitrary choice to take the slice with lesser \n # `InstanceNumber` tag is made.\n # This takes some work to accomplish...\n zs = [float(img.ImagePositionPatient[-1]) for img in images]\n inums = [float(img.InstanceNumber) for img in images]\n inds = list(range(len(zs)))\n while np.unique(zs).shape[0] != len(inds):\n for i in inds:\n for j in inds:\n if i!=j and zs[i] == zs[j]:\n k = i if inums[i] > inums[j] else j\n inds.pop(inds.index(k))\n\n # Prune the duplicates found in the loops above.\n zs = [zs[i] for i in range(len(zs)) if i in inds]\n images = [images[i] for i in range(len(images)) if i in inds]\n\n # Sort everything by (now unique) ImagePositionPatient z coordinate.\n sort_inds = np.argsort(zs)\n images = [images[s] for s in sort_inds]\n # End multiple z clean.\n # ##############################################\n\n return images", "def select_unique_ids(self):\n utk = self.metadata\n utk_ids = []\n for gg in set(utk['gender']):\n for rg in set(utk['race']):\n for ag in set(utk['age']):\n try:\n intersection_ids = list(utk[np.logical_and(utk['gender'] == gg,\n np.logical_and(utk['race'] == rg,\n utk['age'] == ag))]['filename'])\n if len(intersection_ids) <= CAP:\n utk_ids += intersection_ids\n else:\n x = list(np.random.choice(intersection_ids, CAP, replace=False))\n utk_ids += x\n\n except:\n continue\n self.unique_ids = utk_ids\n return utk_ids", "def extract_images(dimension = (_HEIGHT, _WIDTH), n = 100, color = True, include = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13']):\n # establish directory routes\n origin = '/Users/jaoming/Active Projects/Shopee Challenge/shopee-product-detection-dataset'\n main_train_folder = '/Users/jaoming/Active Projects/Shopee Challenge/shopee-product-detection-dataset/train/train'\n os.chdir(main_train_folder)\n if color:\n imread_color = cv2.IMREAD_COLOR\n else:\n imread_color = cv2.IMREAD_GRAYSCALE\n\n # setting up the variables \n data, labels = [], []\n for name in include:\n os.chdir(name)\n image_namelist = os.listdir()\n if '.DS_Store' in image_namelist: # removing unnecessary files\n image_namelist.remove('.DS_Store')\n count = 0\n while count < n:\n data.append(cv2.resize(\n cv2.imread(image_namelist[count], imread_color),\n dimension,\n interpolation = cv2.INTER_CUBIC\n ))\n labels.append(int(name))\n count += 1\n os.chdir(main_train_folder)\n\n os.chdir(origin)\n return data, labels", "def selected_dataset(train_ratio = 0.9, defect_ratio = 1, rescale = True, \\\r\n\tshuffle = True, set_defect_ratio = True, data_type = 'type1', used_pickle = True):\r\n\tpkl_file_root = 'selected_dataset'\r\n\tif used_pickle:\r\n\t\ttry:\r\n\t\t\timages = load_from_pickle('train_images', pkl_file_root)\r\n\t\t\tlabels = load_from_pickle('train_labels', pkl_file_root)\r\n\t\t\tindex = load_from_pickle('specific_indices', pkl_file_root)\r\n\t\t\tnd_img = load_from_pickle('not_defect_image_for_test', pkl_file_root)\r\n\t\texcept:\r\n\t\t\tprint('pickle file not exist, data prosessing..')\r\n\t\t\timages, labels, index, nd_img = selected_dataset(train_ratio, defect_ratio, rescale, shuffle, set_defect_ratio, data_type, False)\r\n\r\n\telse:\r\n\t\ttype1_img, type1_label, type2_img, type2_label = load_data_all()\r\n\t\tif data_type == 'type1':\r\n\t\t\ttype1_imgs, type1_labels = type1_makeup(type1_img, type1_label, v1 = 1000, v2 = 160, masking = True)\r\n\t\t\ttype1 = split_train_test(type1_imgs, type1_labels, train_ratio = train_ratio, shuffle = shuffle)\r\n\t\t\t# again\r\n\t\t\ttrain_img, train_label = type1_makeup(type1['train_img'], type1['train_label'], v1 = 100, masking = False)\r\n\t\t\ttest_img, test_label = type1_makeup(type1['test_img'], type1['test_label'], v1 = 100, masking = False)\r\n\t\t\t\r\n\t\telif data_type == 'type2':\r\n\t\t\ttype2_imgs, type2_labels = type2_makeup(type2_img, type2_label, v1 = 1250, v2 = 55, masking = True)\r\n\t\t\ttype2 = split_train_test(type2_imgs, type2_labels, train_ratio = train_ratio, shuffle = shuffle)\r\n\r\n\t\t\ttrain_img, train_label = type2_makeup(type2['train_img'], type2['train_label'], v1 = 100, masking = False)\r\n\t\t\ttest_img, test_label = type2_makeup(type2['test_img'], type2['test_label'], v1 = 100, masking = False)\r\n\r\n\t\telse:\r\n\t\t\traise ValueError('..')\r\n\r\n\t\t# find not defect surface in sub-set\r\n\t\tnd_img, nd_lb = find_contain_target(test_img, test_label, target = 'background')\r\n\r\n\t\tif set_defect_ratio:\r\n\t\t\t# if only_target was false, (background : defect)ratio set to same ratio or func's argument ratio.\r\n\t\t\tdefect_img, defect_label = find_contain_target(train_img, train_label, 'defect')\r\n\t\t\tn_defect = len(defect_img)\r\n\t\t\tuse_n_image = int(n_defect * defect_ratio)\r\n\t\t\t#print(' # of contain target, ', n_defect) \r\n\t\t\tback_img, back_label = find_contain_target(train_img, train_label, 'background')\r\n\t\t\tn_back_images = len(back_img)\r\n\r\n\t\t\tif n_back_images < (n_defect + use_n_image):\r\n\t\t\t\traise ValueError('ratio error')\r\n\r\n\t\t\trandom_index = np.random.choice((len(back_img)-use_n_image), 1)[0]\r\n\t\t\tback_img = back_img[random_index:random_index+use_n_image]\r\n\t\t\tback_label = back_label[random_index:random_index+use_n_image]\r\n\r\n\t\t\timages = np.concatenate((defect_img, back_img), axis = 0)\r\n\t\t\tlabels = np.concatenate((defect_label, back_label), axis = 0)\r\n\t\telse:\r\n\t\t\timages = train_img\r\n\t\t\tlabels = train_label\r\n\r\n\t\tconcat_images = np.concatenate((images, nd_img), axis = 0)\r\n\t\tconcat_labels = np.concatenate((labels, nd_lb), axis = 0)\r\n\r\n\t\timages, labels, index = shuffle_with_sameindex_img_label(concat_images, concat_labels, memorial_n = nd_img.shape[0]) \r\n\r\n\t\tif rescale:\r\n\t\t\timages = images / 255\r\n\t\t\tnd_img = nd_img / 255\r\n\t\timages = np.expand_dims(images, axis = 3)\r\n\t\tlabels = np.expand_dims(labels, axis = 3)\r\n\t\tnd_img = np.expand_dims(nd_img, axis = 3)\r\n\t\tprint(' >> images set shape : ', images.shape)\r\n\r\n\t\tarray_save_to_pickle(images, 'train_images', pkl_file_root)\r\n\t\tarray_save_to_pickle(labels, 'train_labels', pkl_file_root)\r\n\t\tarray_save_to_pickle(index, 'specific_indices', pkl_file_root)\r\n\t\tarray_save_to_pickle(nd_img, 'not_defect_image_for_test', pkl_file_root)\r\n\t\tnd_img = np.squeeze(nd_img, axis = -1)\r\n\t\tfor i in range(len(nd_img)):\r\n\t\t\tnd_img_name = 'nd_img' + str(i)\r\n\t\t\timg_save(nd_img[i], nd_img_name, rescale = True, mode = 'L', root = 'GAN_TEST_SAMPLE')\r\n\r\n\treturn images, labels, index, nd_img", "def generate_trial(trialset, synset2img, trialtype, num_imgs):\n # randomly shuffle the sets.\n for s in trialset:\n random.shuffle(s)\n source = trialset[trialtype]\n # sample images\n # make sure we have the most specific guy\n src_imgs = [random.choice(synset2img[trialset[0][0]])]\n for i in range(num_imgs - 1):\n synset = random.choice(source)\n src_imgs.append(random.choice(synset2img[synset]))\n target_imgs = []\n # target imgs are sampled in a structured way\n # 12 images in domain\n for i in range(4):\n for j in range(3):\n synset = random.choice(trialset[i])\n target_imgs.append(random.choice(synset2img[synset]))\n # 12 images outside the domain\n for i in range(12):\n synset = random.choice(trialset[-1])\n target_imgs.append(random.choice(synset2img[synset]))\n # shuffling the images to minimize the ordering effect\n random.shuffle(src_imgs)\n random.shuffle(target_imgs)\n return src_imgs, target_imgs", "def training_batch_selection(train_set_size, input_img):\n\n input_dims = input_img.shape\n all_data_indices = np.arange(input_dims[0]*input_dims[1])\n all_data_indices = all_data_indices.reshape(input_dims[:-1])\n\n conf = get_config()\n inside = int(np.floor(conf[\"inside_part\"]))\n outside = int(np.floor(conf[\"outside_part\"]))\n cx = ii.image.c_x\n cy = ii.image.c_y\n\n # Find the position of the crop in the image and determine part least affected by radial distortion\n center_x = (conf[\"crop\"][\"left_top\"]['x'] + conf[\"crop\"][\"size\"][\"height\"]/2)*2/ii.image.height-1.\n center_y = (conf[\"crop\"][\"left_top\"]['y'] + conf[\"crop\"][\"size\"][\"width\"]/2)*2/ii.image.width-1.\n\n left_right_center = max(min((center_y-cy)*2, 1), -1)\n top_bot_center = max(min((center_x - cx) * 2, 1), -1)\n\n # DEBUG: set your own center\n # left_right_center = 0\n # top_bot_center = 0\n\n from_x = int(round(inside*(1.-top_bot_center)))\n to_x = min(-int(round(inside*(1.+top_bot_center))), -1)\n from_y = int(round(inside*(1.-left_right_center)))\n to_y = min(-int(round(inside*(1.+left_right_center))), -1)\n\n # Exclude part that is minimally affected by radial distortion\n selection_exclude = all_data_indices[from_x:to_x, from_y:to_y]\n selection_exclude = selection_exclude.reshape(-1)\n\n # Exclude outer border in order to avoid index out of bounds\n selection_include = all_data_indices[outside:-outside,\n outside:-outside]\n selection_include = selection_include.reshape(-1)\n\n selection = [x for x in selection_include if x not in selection_exclude]\n selection = np.random.permutation(selection)\n\n # DEBUG: forcing larger training set\n train_set_size = int(train_set_size*2)\n\n selection = selection[:train_set_size]\n\n # DEBUG: display image region for selection\n image = input_img.reshape(-1)\n image[:] = 0\n image[selection_include] = input_img.reshape(-1)[selection_include]\n image[selection_exclude] = 0\n image = image.reshape(input_dims[0], input_dims[1])\n Verbose.imshow(image, Verbose.debug)\n\n return selection", "def test_load_selections(self, selection):\n selection.load_selections([SAMPLE_ROI])\n rows, cols = np.column_stack(self.roi_coords)\n for pixel in self.image_set._roi_data[rows, cols]:\n assert np.array_equal(\n pixel, [255.0, 0.0, 0.0, 255.]\n )\n for pixel in self.subset._roi_data[rows, cols]:\n assert np.array_equal(\n pixel, [0.0, 100.0, 0.0, 255.]\n )", "def __call__(self, results):\n\n if 'img_fields' in results:\n assert results['img_fields'] == ['img'], \\\n 'Only single img_fields is allowed'\n img = results['img']\n assert 'bbox_fields' in results\n boxes = [results[key] for key in results['bbox_fields']]\n boxes = np.concatenate(boxes, 0)\n h, w, c = img.shape\n while True:\n mode = random.choice(self.sample_mode)\n self.mode = mode\n if mode == 1:\n return results\n\n min_iou = mode\n for i in range(50):\n new_w = random.uniform(self.min_crop_size * w, w)\n new_h = random.uniform(self.min_crop_size * h, h)\n\n # h / w in [0.5, 2]\n if new_h / new_w < 0.5 or new_h / new_w > 2:\n continue\n\n left = random.uniform(w - new_w)\n top = random.uniform(h - new_h)\n\n patch = np.array(\n (int(left), int(top), int(left + new_w), int(top + new_h)))\n # Line or point crop is not allowed\n if patch[2] == patch[0] or patch[3] == patch[1]:\n continue\n overlaps = bbox_overlaps(\n patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)\n if len(overlaps) > 0 and overlaps.min() < min_iou:\n continue\n\n # center of boxes should inside the crop img\n # only adjust boxes and instance masks when the gt is not empty\n if len(overlaps) > 0:\n # adjust boxes\n def is_center_of_bboxes_in_patch(boxes, patch):\n center = (boxes[:, :2] + boxes[:, 2:]) / 2\n mask = ((center[:, 0] > patch[0]) *\n (center[:, 1] > patch[1]) *\n (center[:, 0] < patch[2]) *\n (center[:, 1] < patch[3]))\n return mask\n\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n if not mask.any():\n continue\n for key in results.get('bbox_fields', []):\n boxes = results[key].copy()\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n boxes = boxes[mask]\n if self.bbox_clip_border:\n boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])\n boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])\n boxes -= np.tile(patch[:2], 2)\n\n results[key] = boxes\n # labels\n label_key = self.bbox2label.get(key)\n if label_key in results:\n results[label_key] = results[label_key][mask]\n\n # mask fields\n mask_key = self.bbox2mask.get(key)\n if mask_key in results:\n results[mask_key] = results[mask_key][\n mask.nonzero()[0]].crop(patch)\n # adjust the img no matter whether the gt is empty before crop\n img = img[patch[1]:patch[3], patch[0]:patch[2]]\n results['img'] = img\n results['img_shape'] = img.shape\n\n # seg fields\n for key in results.get('seg_fields', []):\n results[key] = results[key][patch[1]:patch[3],\n patch[0]:patch[2]]\n return results", "def stratified_sample_images(self, images, sample_size):\n\n images.order_by('deployment', 'date_time')\n every_nth = images.count()/int(sample_size)\n sampled_images = images[0:images.count():every_nth]\n\n return sampled_images" ]
[ "0.6372832", "0.5899854", "0.58285266", "0.58255917", "0.5720729", "0.5693634", "0.5607028", "0.55150485", "0.5467195", "0.5396391", "0.5383312", "0.5382062", "0.53393257", "0.53032136", "0.5301093", "0.5289086", "0.52777845", "0.5268686", "0.52449733", "0.523275", "0.5193581", "0.5192082", "0.5191008", "0.5187661", "0.5187409", "0.51847035", "0.51844275", "0.5162847", "0.51618844", "0.51585066" ]
0.65822405
0
First only select those MIAP images that have 1 object in them. Then randomly select images to be included in the experiments. Make sure that there are at least CAP number of images in each intersection for age and gender groups.
def select_unique_ids(self): miap = self.metadata miap_single = miap[miap.ImageID.isin(list(miap_single[miap_single == 1].index))] miap_ids = [] for gp in set(miap_single['GenderPresentation']): for ap in set(miap_single['AgePresentation']): try: intersection_ids = list(miap_single[np.logical_and(miap_single['GenderPresentation'] == gp, miap_single['AgePresentation'] == ap)]['ImageID']) if group[gp][ap] <= CAP: miap_ids += intersection_ids else: x = list(np.random.choice(intersection_ids, CAP, replace=False)) miap_ids += x except: continue self.unique_ids = miap_ids return miap_ids
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_unique_ids(self):\n ccd = self.metadata\n ccd_ids = []\n for dg in set(ccd['isDark']):\n for gg in set(ccd['Gender']):\n for sg in set(ccd['Skin']):\n for ag in set(ccd['Age']):\n try:\n intersection_ids = list(ccd[np.logical_and(ccd['isDark'] == dg,\n np.logical_and(ccd['Gender'] == gg,\n np.logical_and(ccd['Skin'] == sg,\n ccd['Age'] == ag)))]['ImageID'])\n if len(intersection_ids) <= CAP:\n ccd_ids += intersection_ids\n else:\n x = list(np.random.choice(intersection_ids, CAP, replace=False))\n ccd_ids += x\n\n except:\n continue\n self.unique_ids = ccd_ids\n return ccd_ids", "def randomly_select_images():\r\n global images_a, images_b, images_total\r\n images_a = random.sample(images_a, int(number_of_images_a.get()))\r\n if number_of_images_b.get() != \"\": #check if images_b empty\r\n images_b = random.sample(images_b, int(number_of_images_b.get()))\r\n else:\r\n images_b = []\r\n images_total = images_a + images_b\r\n random.shuffle(images_total)", "def select_unique_ids(self):\n adience = self.metadata\n adi_ids = []\n for gg in set(adience['gender']):\n for ag in set(adience['age_group']):\n try:\n idx = np.logical_and(adience['gender'] == gg,adience['age_group'] == ag)\n intersection_ids = list(adience[idx]['user_id'] +\n '/coarse_tilt_aligned_face.' +\n adience[idx]['face_id'].astype(str) +\n '.' + adience[idx]['original_image'])\n if len(intersection_ids) <= CAP:\n adi_ids += intersection_ids\n else:\n x = list(np.random.choice(intersection_ids, CAP, replace=False))\n adi_ids += x\n\n except:\n continue\n self.unique_ids = adi_ids\n return adi_ids", "def select_unique_ids(self):\n utk = self.metadata\n utk_ids = []\n for gg in set(utk['gender']):\n for rg in set(utk['race']):\n for ag in set(utk['age']):\n try:\n intersection_ids = list(utk[np.logical_and(utk['gender'] == gg,\n np.logical_and(utk['race'] == rg,\n utk['age'] == ag))]['filename'])\n if len(intersection_ids) <= CAP:\n utk_ids += intersection_ids\n else:\n x = list(np.random.choice(intersection_ids, CAP, replace=False))\n utk_ids += x\n\n except:\n continue\n self.unique_ids = utk_ids\n return utk_ids", "def __call__(self, results):\n\n if 'img_fields' in results:\n assert results['img_fields'] == ['img'], \\\n 'Only single img_fields is allowed'\n img = results['img']\n assert 'bbox_fields' in results\n boxes = [results[key] for key in results['bbox_fields']]\n boxes = np.concatenate(boxes, 0)\n h, w, c = img.shape\n while True:\n mode = random.choice(self.sample_mode)\n self.mode = mode\n if mode == 1:\n return results\n\n min_iou = mode\n for i in range(50):\n new_w = random.uniform(self.min_crop_size * w, w)\n new_h = random.uniform(self.min_crop_size * h, h)\n\n # h / w in [0.5, 2]\n if new_h / new_w < 0.5 or new_h / new_w > 2:\n continue\n\n left = random.uniform(w - new_w)\n top = random.uniform(h - new_h)\n\n patch = np.array(\n (int(left), int(top), int(left + new_w), int(top + new_h)))\n # Line or point crop is not allowed\n if patch[2] == patch[0] or patch[3] == patch[1]:\n continue\n overlaps = bbox_overlaps(\n patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)\n if len(overlaps) > 0 and overlaps.min() < min_iou:\n continue\n\n # center of boxes should inside the crop img\n # only adjust boxes and instance masks when the gt is not empty\n if len(overlaps) > 0:\n # adjust boxes\n def is_center_of_bboxes_in_patch(boxes, patch):\n center = (boxes[:, :2] + boxes[:, 2:]) / 2\n mask = ((center[:, 0] > patch[0]) *\n (center[:, 1] > patch[1]) *\n (center[:, 0] < patch[2]) *\n (center[:, 1] < patch[3]))\n return mask\n\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n if not mask.any():\n continue\n for key in results.get('bbox_fields', []):\n boxes = results[key].copy()\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n boxes = boxes[mask]\n if self.bbox_clip_border:\n boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])\n boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])\n boxes -= np.tile(patch[:2], 2)\n\n results[key] = boxes\n # labels\n label_key = self.bbox2label.get(key)\n if label_key in results:\n results[label_key] = results[label_key][mask]\n\n # mask fields\n mask_key = self.bbox2mask.get(key)\n if mask_key in results:\n results[mask_key] = results[mask_key][\n mask.nonzero()[0]].crop(patch)\n # adjust the img no matter whether the gt is empty before crop\n img = img[patch[1]:patch[3], patch[0]:patch[2]]\n results['img'] = img\n results['img_shape'] = img.shape\n\n # seg fields\n for key in results.get('seg_fields', []):\n results[key] = results[key][patch[1]:patch[3],\n patch[0]:patch[2]]\n return results", "def sample_images(opt, batches_done, monet_dataloader, photo_dataloader):\n G_AB.eval()\n G_BA.eval()\n real_A = next(iter(monet_dataloader))[0].cuda()\n fake_B = G_AB(real_A)\n real_B = next(iter(photo_dataloader))[0].cuda()\n fake_A = G_BA(real_B)\n # Arange images along x-axis\n real_A = make_grid(real_A, nrow=opt.batch_size, normalize=True)\n real_B = make_grid(real_B, nrow=opt.batch_size, normalize=True)\n fake_A = make_grid(fake_A, nrow=opt.batch_size, normalize=True)\n fake_B = make_grid(fake_B, nrow=opt.batch_size, normalize=True)\n # Arange images along y-axis\n image_grid = torch.cat((real_A, fake_B, real_B, fake_A), 1)\n save_image(image_grid, \"%s/%s/images/%s.png\" % (opt.checkpoints_dir, opt.name, batches_done), normalize=False)", "def match(image1,image2,threshold,useRansac=False,t_orientation=30,t_scale=0.5):\r\n im1, keypoints1, descriptors1 = ReadKeys(image1)\r\n im2, keypoints2, descriptors2 = ReadKeys(image2)\r\n #\r\n # REPLACE THIS CODE WITH YOUR SOLUTION (ASSIGNMENT 5, QUESTION 3)\r\n #\r\n # Generate five random matches (for testing purposes)\r\n # matched_pairs = []\r\n # num = 5\r\n # for i in range(num):\r\n # matched_pairs.append([keypoints1[i],keypoints2[i]])\r\n # return DisplayMatches(im1, im2, matched_pairs)\r\n\r\n # END OF SECTION OF CODE TO REPLACE\r\n #\r\n\r\n #q3\r\n matched_pairs = []\r\n between_angles = np.arccos(np.dot(descriptors1, np.transpose(descriptors2)))\r\n for i, row in enumerate(between_angles):\r\n \tratio = sorted(row)[0] / sorted(row)[1]\r\n \tif ratio <= threshold:\r\n\t \tmatched_pairs.append([keypoints1[i], keypoints2[np.where(row == sorted(row)[0])[0][0]]])\r\n # print(matched_pairs)\r\n if useRansac is False:\r\n return DisplayMatches(im1, im2, matched_pairs)\r\n\t# \r\n\r\n #q4\r\n repetition = 10\r\n subsets = [[]] * repetition\r\n for i in range(repetition):\r\n r = random.randint(0, len(matched_pairs))\r\n for match in matched_pairs:\r\n ds1, ds2 = matched_pairs[r][1][2]/matched_pairs[r][0][2], match[1][2]/match[0][2]\r\n do1, do2 = (matched_pairs[r][1][3]-matched_pairs[r][0][3]), (match[1][3]-match[0][3])\r\n if abs(ds2 - ds1) <= t_scale * ds1 and abs(do2 - do1) % (2 * math.pi) <= t_orientation:\r\n subsets[i].append(match)\r\n\r\n max_i, max_len = 0, subsets[0]\r\n for i in range(10):\r\n l = len(subsets[i])\r\n if l > max_len:\r\n max_len = l\r\n max_i = i\r\n\r\n im3 = DisplayMatches(im1, im2, subsets[max_i])\r\n return im3", "def load_dataset(image_home, mask_home, patient_list, \n size = 512, \n downsample = 0.5, \n overlap = 1.5, \n verbose=False):\n\n image_list = np.concatenate([sorted(glob.glob(f'{image_home}/{p}/*')) for p in patient_list])\n mask_list = np.concatenate([sorted(glob.glob(f'{mask_home}/{p}/*')) for p in patient_list])\n\n if verbose:\n for i, (im, m) in enumerate(zip(image_list, mask_list)):\n print(i, im, m)\n\n x = []\n y = [] \n\n for im, m in zip(image_list, mask_list):\n image = cv2.imread(im)[:,:,::-1]\n mask = cv2.imread(m, -1)\n mask = squash_labels(mask)\n \n image = cv2.resize(image, dsize=(0,0), fx=downsample, fy=downsample)\n mask = cv2.resize(mask, dsize=(0,0), fx=downsample, fy=downsample,\n interpolation=cv2.INTER_NEAREST)\n\n # assert (image.shape == mask.shape).all()\n split_x , split_y = split(image, mask, int(size * downsample), overlap)\n\n x.append(split_x)\n y.append(split_y)\n\n\n x = np.concatenate(x, axis=0)\n y = np.concatenate(y, axis=0)\n y = np.eye(N=y.shape[0], M=4)[y]\n\n shuffle = np.arange(x.shape[0]).astype(np.int)\n np.random.shuffle(shuffle)\n x = x[shuffle, :]\n y = y[shuffle, :]\n\n x = (x / 255.).astype(np.float32)\n\n print('split_datasets returning x:', x.shape, x.dtype, x.min(), x.max())\n print('split_datasets returning y:', y.shape, y.dtype)\n return x, y", "def detect(model, dataset_dir, subset):\n print(\"Running on {}\".format(dataset_dir))\n\n # Create directory\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n submit_dir = \"submit_{:%Y%m%dT%H%M%S}\".format(datetime.datetime.now())\n submit_dir = os.path.join(RESULTS_DIR, submit_dir)\n os.makedirs(submit_dir)\n\n # Read dataset\n dataset = TamperDataset()\n dataset.load_tamper(dataset_dir, subset)\n dataset.prepare()\n # Load over images\n submission = []\n f1 = 0\n print(len(dataset.image_ids))\n # for image_id in dataset.image_ids:\n # # Load image and run detection\n # image = dataset.load_image(image_id)\n # # Detect objects\n # r = model.detect([image], verbose=0)[0]\n\n # # Encode image to RLE. Returns a string of multiple lines\n # source_id = dataset.image_info[image_id][\"id\"]\n # rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n # submission.append(rle)\n # # Save image with masks\n\n # N = r[\"scores\"].shape[0]\n # if not N:\n # \tH, W, C = image.shape\n # \tmask = np.zeros((H,W))\n\n \t\n # else:\n\n # H, W, C = image.shape\n\n # idx = np.argsort(-r[\"scores\"])\n # mask = r[\"masks\"][:,:,idx[0]].astype(np.float32)\n\n # bbox = r[\"rois\"][idx[0], :4]\n\n # y1, x1, y2, x2 = bbox\n\n\n\n # mask = dense_crf(image, mask)\n\n # mask = np.where(mask >= 0.5, 255, 0)\n\n # H, W, C = image.shape\n\n # full_mask = np.zeros((H, W))\n # full_mask[y1:y2, x1:x2] = mask\n\n for image_id in dataset.image_ids:\n # Load image and run detection\n image = dataset.load_image(image_id)\n # ela=dataset.load_ela(image_id)\n # Detect objects\n # r = model.detect([image],[ela], verbose=0)[0]\n r = model.detect([image],verbose=0)[0]\n\n # Encode image to RLE. Returns a string of multiple lines\n source_id = dataset.image_info[image_id][\"id\"]\n rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n submission.append(rle)\n # Save image with masks\n\n N = r[\"scores\"].shape[0]\n if not N:\n H, W, C = image.shape\n mask = np.zeros((H,W))\n\n \n else:\n idx = np.argsort(-r[\"scores\"])\n mask = r[\"masks\"][:,:,idx[0]].astype(np.uint8)\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n\n annotation = dataset.load_annaation(image_id)\n annotation = np.where(annotation >= 0.5, 1, 0) \n f = get_FM(mask, annotation)\n f1 += f\n\n print(f1/len(dataset.image_ids))\n\n\n\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n # visualize.display_instances(\n # image, r['rois'], r['masks'], r['class_ids'],\n # dataset.class_names, r['scores'],\n # show_bbox=False, show_mask=False,\n # title=\"Predictions\")\n # plt.savefig(\"{}/{}.png\".format(submit_dir, dataset.image_info[image_id][\"id\"]))\n\n # Save to csv file\n # submission = \"ImageId,EncodedPixels\\n\" + \"\\n\".join(submission)\n # file_path = os.path.join(submit_dir, \"submit.csv\")\n # with open(file_path, \"w\") as f:\n # f.write(submission)\n print(\"Saved to \", submit_dir)", "def perform_augmentations(image, gt_image, augmentations, probabilities):\n for i in range(len(augmentations)):\n if np.random.rand(1) < probabilities[i]:\n image, gt_image = augmentations[i](image, gt_image)\n\n return image, gt_image", "def sample_images(images, n=1000):\n ix = np.random.choice(np.arange(len(images)), size=n, replace=False)\n sample = labels.loc[ix, [\"img_name\", \"breed\"]]\n assert len(sample) == n\n return sample", "def parse(self):\n imset = []\n imdir = remkdir(os.path.join(self._datadir, 'images'))\n csv_actors = readcsv(os.path.join(self._datadir, 'facescrub_actors.txt'), separator='\\t')\n for (subjectname, imageid, faceid, url, bbox, sha256) in csv_actors[1:]:\n categoryname = subjectname.replace(' ', '_')\n (xmin,ymin,xmax,ymax) = bbox.split(',')\n imset.append(ImageDetection(url=url, filename=os.path.join(imdir, '%s_%s.jpg' % (categoryname, imageid)), category=categoryname, xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax, attributes={'GENDER':'male'}))\n\n csv_actresses = readcsv(os.path.join(self._datadir, 'facescrub_actresses.txt'), separator='\\t')\n for (subjectname, imageid, faceid, url, bbox, sha256) in csv_actresses[1:]:\n categoryname = subjectname.replace(' ', '_')\n (xmin,ymin,xmax,ymax) = bbox.split(',')\n imset.append(ImageDetection(url=url, filename=os.path.join(imdir, '%s_%s.jpg' % (categoryname, imageid)), category=categoryname, xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax, attributes={'GENDER':'female'}))\n\n return imset", "def get_relevant_images_norank(img_lst, img_map, indices, k,operation=\"union\"):\n set_lst = []\n for img in img_lst:\n set_lst.append(set(get_similar_imgs(img, img_map, indices, k)))\n if operation == \"union\":\n return random.shuffle(list(set.union(*set_lst)))\n if operation == \"intersection\":\n return random.shuffleo(list(set.intersection(*set_lst)))", "def _sample_rois(all_rois, all_scores, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\n\n # print(gt_boxes)\n # fang[-1] ok\n\n # overlaps: (rois x gt_boxes)\n overlaps = bbox_overlaps(\n all_rois[:, 1:5].data,\n gt_boxes[:, :4].data)\n max_overlaps, gt_assignment = overlaps.max(1)\n labels = gt_boxes[gt_assignment, [4]]\n\n # Select foreground RoIs as those with >= FG_THRESH overlap\n fg_inds = (max_overlaps >= cfg.TRAIN.FG_THRESH).nonzero().view(-1)\n # Guard against the case when an image has fewer than fg_rois_per_image\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = ((max_overlaps < cfg.TRAIN.BG_THRESH_HI) + (max_overlaps >= cfg.TRAIN.BG_THRESH_LO) == 2).nonzero().view(-1)\n\n # Small modification to the original version where we ensure a fixed number of regions are sampled\n if fg_inds.numel() > 0 and bg_inds.numel() > 0:\n fg_rois_per_image = min(fg_rois_per_image, fg_inds.numel())\n fg_inds = fg_inds[torch.from_numpy(npr.choice(np.arange(0, fg_inds.numel()), size=int(fg_rois_per_image), replace=False)).long().cuda()]\n bg_rois_per_image = rois_per_image - fg_rois_per_image\n to_replace = bg_inds.numel() < bg_rois_per_image\n bg_inds = bg_inds[torch.from_numpy(npr.choice(np.arange(0, bg_inds.numel()), size=int(bg_rois_per_image), replace=to_replace)).long().cuda()]\n elif fg_inds.numel() > 0:\n to_replace = fg_inds.numel() < rois_per_image\n fg_inds = fg_inds[torch.from_numpy(npr.choice(np.arange(0, fg_inds.numel()), size=int(rois_per_image), replace=to_replace)).long().cuda()]\n fg_rois_per_image = rois_per_image\n elif bg_inds.numel() > 0:\n to_replace = bg_inds.numel() < rois_per_image\n bg_inds = bg_inds[torch.from_numpy(npr.choice(np.arange(0, bg_inds.numel()), size=int(rois_per_image), replace=to_replace)).long().cuda()]\n fg_rois_per_image = 0\n else:\n import pdb\n pdb.set_trace()\n\n\n # The indices that we're selecting (both fg and bg)\n keep_inds = torch.cat([fg_inds, bg_inds], 0)\n \n # Select sampled values from various arrays:\n labels = labels[keep_inds].contiguous()\n # Clamp labels for the background RoIs to 0\n labels[int(fg_rois_per_image):] = 0\n # print(int(fg_rois_per_image)) -> 16\n\n rois = all_rois[keep_inds].contiguous()\n roi_scores = all_scores[keep_inds].contiguous()\n\n\n\n bbox_target_data, front_2_1_points_targets_data, front_2_2_points_targets_data, front_center_targets_data, \\\n back_2_1_points_targets_data, back_2_2_points_targets_data, back_center_targets_data, center_targets_data\\\n = _compute_targets(rois[:, 1:5].data, gt_boxes[gt_assignment[keep_inds]][:, :4].data, labels.data,\\\n gt_boxes[gt_assignment[keep_inds]][:, 5:9].data, gt_boxes[gt_assignment[keep_inds]][:, 9:13].data, \\\n gt_boxes[gt_assignment[keep_inds]][:, 13:15].data, gt_boxes[gt_assignment[keep_inds]][:, 15:19].data, \\\n gt_boxes[gt_assignment[keep_inds]][:, 19:23].data, gt_boxes[gt_assignment[keep_inds]][:, 23:25].data, \\\n gt_boxes[gt_assignment[keep_inds]][:, 25:27].data)\n\n bbox_targets, bbox_inside_weights, front_2_1_points_targets, front_2_2_points_targets, front_center_targets, \\\n back_2_1_points_targets, back_2_2_points_targets, back_center_targets, center_targets, front_center_inside_weights \\\n = _get_bbox_regression_labels(bbox_target_data, num_classes, front_2_1_points_targets_data, front_2_2_points_targets_data, \\\n front_center_targets_data, back_2_1_points_targets_data, back_2_2_points_targets_data, back_center_targets_data, center_targets_data)\n \n \n\n return labels, rois, roi_scores, bbox_targets, bbox_inside_weights, front_2_1_points_targets, front_2_2_points_targets, front_center_targets, \\\n back_2_1_points_targets, back_2_2_points_targets, back_center_targets, center_targets, front_center_inside_weights", "def __get_multi_images_ids(self, num_images=0): \n availability_images = imageInstance()\n images = availability_images.get_images()\n images_ids = []\n for image in images:\n if image.type == 'machine':\n images_ids.append( image.id.encode(\"latin-1\") )\n if num_images>1:\n random.shuffle(images_ids)\n return images_ids[:num_images]\n return images_ids", "def random_sample_images(self, images, sample_size):\n\n #return sample(images, int(sample_size))\n return images.order_by('?')[:sample_size]", "def take(self, n, wordnetid=None):\n subjectid = np.random.choice(self.subjects(), n) if wordnetid is None else [wordnetid] * n\n takelist = []\n for s in subjectid:\n d = os.path.join(self.datadir, s)\n f = np.random.choice(imlist(d),1)[0]\n im = ImageDetection(filename=f).category(filebase(d))\n takelist.append(im)\n return takelist", "def testing_on_new_data(directory=parentDir + '/photos-to-categorize'):\n\n known_face_encodings={}\n with open(parentDir + '/data/face_locations.csv', 'r') as csvfile:\n fieldnames=['person', 'fullfilename' , 'top', 'right', 'bottom', 'left']\n reader = csv.DictReader(csvfile,fieldnames=fieldnames)\n for row in reader:\n face_location = [int(row['top']), int(row['right']), int(row['bottom']), int(row['left'])]\n if row['person'] in known_face_encodings:\n known_face_encodings[row['person']] = np.append(known_face_encodings[row['person']],np.array(face_encodings(load_image_file(row['fullfilename']),[face_location])),axis=0)\n else:\n known_face_encodings.update({row['person']:np.array(face_encodings(load_image_file(row['fullfilename']),[face_location]))})\n \n for filename in os.listdir(directory):\n img = load_image_file(directory + '/' + filename)\n \n all_face_locations = face_locations(img)\n for face_location in all_face_locations:\n top,right,bottom,left = face_location\n face_image = img[top:bottom, left:right]\n pil_image = Image.fromarray(face_image)\n \n test_face_encode = face_encodings(img,[face_location])\n result={}\n name='Unknown'\n minval=1\n for person in known_face_encodings:\n r=face_distance(known_face_encodings[person], test_face_encode[0]).mean()\n if r <= 0.6:\n result.update({person:(1-r)*100})\n if r < minval:\n minval=r\n name=person\n \n Image._show(pil_image)\n \n print filename + \": The person is \" + name\n print \"All possiblities: \" + str(result)\n\n #writting in a file\n with open(parentDir + '/data/image_categorizations.csv', 'a') as csvfile:\n fieldnames=['person', 'fullfilename' , 'top', 'right', 'bottom', 'left']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n if name != 'Unknown': writer.writerow({'person':name, 'fullfilename': directory + '/' + filename, 'top':top, 'right':right, 'bottom':bottom, 'left':left})\n \n\n #raw_input(\"continue?\")", "def _sample_mini_dataset_mil(dataset, num_classes, num_shots):\n shuffled = list(dataset)\n random.shuffle(shuffled)\n for class_idx, class_obj in enumerate(shuffled[:num_classes]):\n gifs, states, actions = class_obj.sample(num_shots)\n for shot_idx in range(num_shots):\n start_idx, end_idx = shot_idx*class_obj.T, (shot_idx + 1)*class_obj.T\n g, s, a = gifs[start_idx:end_idx], states[start_idx:end_idx], actions[start_idx:end_idx]\n yield (g, s, a)", "def sample_damaging(image):\r\n return crease_image(blotch_image(image, 100, True), 10, False)", "def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\r\n # MP:\r\n # overlaps: (no_rois x no_gt_bbox) each row gives the overlap of the proposed region with the gt boxes. Overlap is measured as: (overlapping area)/(union area).\r\n # gt_assignment: determines which of the gt boxes has more overlap with the regions\r\n # max_overlaps: takes the maximum overlap of a region\r\n # labels: defines which which gt box corresponds best with the region and assigns its label to the region\r\n # fg_rois_per_image = 8\r\n # overlaps: (rois x gt_boxes)\r\n\r\n # MP: bbox_overlaps rewritten as c_bbox_overlaps\r\n #overlaps =c_bbox_overlaps(np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\r\n # \t\t np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\r\n overlaps = bbox_overlaps(np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\r\n \t\t np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\r\n # MP: which column index has maximum value\r\n gt_assignment = overlaps.argmax(axis=1)\r\n max_overlaps = overlaps.max(axis=1)\r\n labels = gt_boxes[gt_assignment, 4]\r\n\r\n\r\n # MP: Extract RoIs where overlap >= FG_THRESH\r\n fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]\r\n\r\n # Guard against the case when an image has fewer than fg_rois_per_image (i.e. 8)\r\n fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)\r\n\r\n # Sample foreground regions without replacement\r\n if fg_inds.size > 0:\r\n fg_inds = npr.choice(fg_inds, size=int(fg_rois_per_this_image), replace=False)\r\n\r\n # MP: Extract RoIs where overlap in [BG_THRESH_LO, BG_THRESH_HI), i.e. [0.0, 0.5)\r\n bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &\r\n (max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\r\n\r\n # Compute number of background RoIs to take from this image (guarding\r\n # against there being fewer than desired)\r\n # MP: Take the no of bg_inds such that fg_inds.shape + bg_inds.shape = 32\r\n bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image\r\n bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)\r\n if bg_inds.size > 0:\r\n bg_inds = npr.choice(bg_inds, size=int(bg_rois_per_this_image), replace=False)\r\n\r\n\r\n # MP: concatenate the fg_inds and bg_inds, such that keep_inds.shape = 32\r\n keep_inds = np.append(fg_inds, bg_inds)\r\n # MP: obtain the labels set the ones corresponding to bg_inds to zero\r\n labels = labels[keep_inds]\r\n labels[int(fg_rois_per_this_image):] = 0\r\n\r\n # MP: select the 32 rois (fg & bg) from the 2000+ rois with the keep_inds\r\n rois = all_rois[keep_inds]\r\n # MP: fg rois\r\n rois_pos = np.zeros((fg_inds.size, 5), dtype=np.float32) #because return rois_pos as top ---> allocate memory for it\r\n rois_pos[:, :] = all_rois[fg_inds]\r\n gt_assignment_pos = gt_assignment[fg_inds]\r\n\r\n # MP: compute diff to approximate bbox to ground truth\r\n bbox_target_data = _compute_targets(\r\n rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)\r\n\r\n # MP: set the diff values in a matrix where each row corresponds to a foreground bbox\r\n # and the values are stored starting at the index of the label.\r\n # Therefore number of columns: 4*(no labels)\r\n # The bg bboxes are also included in rows, but have all values equal to zero.\r\n bbox_targets, bbox_inside_weights = \\\r\n _get_bbox_regression_labels(bbox_target_data, num_classes)\r\n\r\n '''\r\n # MP: printing and saving files\r\n print \"overlaps with size {}: {}\".format(overlaps.shape, overlaps)\r\n print \"gt_assignment with size {}: {}\".format(gt_assignment.shape, gt_assignment)\r\n print \"max_overlaps with size{}: {}\".format(max_overlaps.shape, max_overlaps)\r\n print \"labels with size{}: {}\".format(labels.shape, labels)\r\n print \"bg_inds with size{}: {}\".format(bg_inds.shape, bg_inds)\r\n print \"bg_rois_per_this_image: {}\".format(bg_rois_per_this_image)\r\n print \"bg_inds with shape {}: {}\".format(bg_inds.shape, bg_inds)\r\n print \"fg_inds with size {}: {}\".format(fg_inds.shape, fg_inds)\r\n print \"labels with shape {}: {}\".format(labels.shape,labels)\r\n print \"rois wiht shape {}: {}\".format(rois.shape, rois)\r\n print \"rois_pos wiht shape {}: {}\".format(rois_pos.shape, rois_pos)\r\n print \"labels with shape {}: {}\".format(labels.shape,labels)\r\n print \"rois_pos wiht shape {}: {}\".format(rois_pos.shape, rois_pos)\r\n print \"gt_assignment_pos wiht shape {}: {}\".format(gt_assignment_pos.shape, gt_assignment_pos)\r\n print \"bbox_target_data wiht shape {}: {}\".format(bbox_target_data.shape, bbox_target_data)\r\n print \"diff: {}\".format(rois_pos[:,:] + bbox_target_data[0:fg_inds.size,:])\r\n print \"bbox_targets with size {}: {}\".format(bbox_targets.shape, bbox_targets)\r\n print \"bbox_inside_weights with size {}: {}\".format(bbox_inside_weights.shape, bbox_inside_weights)\r\n\r\n np.savetxt('bbox_targets.txt', bbox_targets, delimiter=',')\r\n np.savetxt('bbox_inside_weights.txt', bbox_inside_weights, delimiter=',')\r\n '''\r\n\r\n return labels, rois, bbox_targets, bbox_inside_weights, gt_boxes[gt_assignment[keep_inds], :], rois_pos, gt_assignment_pos", "def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\n # overlaps: (rois x gt_boxes)\n overlaps = bbox_overlaps(\n np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\n np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\n gt_assignment = overlaps.argmax(axis=1)\n max_overlaps = overlaps.max(axis=1)\n labels = gt_boxes[gt_assignment, 4]\n\n # Select foreground RoIs as those with >= FG_THRESH overlap\n fg_inds = np.where(max_overlaps >= FG_THRESH)[0]\n # Guard against the case when an image has fewer than fg_rois_per_image\n # foreground RoIs\n fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)\n # Sample foreground regions without replacement\n if fg_inds.size > 0:\n fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False)\n\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = np.where((max_overlaps < BG_THRESH_HI) &\n (max_overlaps >= BG_THRESH_LO))[0]\n # Compute number of background RoIs to take from this image (guarding\n # against there being fewer than desired)\n bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image\n bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)\n # Sample background regions without replacement\n if bg_inds.size > 0:\n bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)\n\n # The indices that we're selecting (both fg and bg)\n keep_inds = np.append(fg_inds, bg_inds)\n # Select sampled values from various arrays:\n labels = labels[keep_inds]\n # Clamp labels for the background RoIs to 0\n labels[fg_rois_per_this_image:] = 0\n rois = all_rois[keep_inds]\n\n bbox_target_data = _compute_targets(\n rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)\n\n bbox_targets, bbox_inside_weights = \\\n _get_bbox_regression_labels(bbox_target_data, num_classes)\n\n return labels, rois, bbox_targets, bbox_inside_weights", "def take_per_subject(self, n):\n subjectid = self.subjects()\n takelist = []\n for s in subjectid:\n d = os.path.join(self.datadir, s)\n for k in range(0,n):\n f = np.random.choice(imlist(d),1)[0]\n im = ImageDetection(filename=f).category(filebase(d))\n takelist.append(im)\n return takelist", "def _undersample(self, imgs_paths):\n labels = [bool(self._get_img_label(path)) for path in imgs_paths]\n without_tomato = imgs_paths[np.invert(labels)]\n with_tomato = imgs_paths[labels]\n\n indices = np.random.choice(\n with_tomato.shape[0],\n np.sum(labels),\n replace=False\n )\n\n parts = [imgs_paths[labels], without_tomato[indices]]\n imgs_paths = np.concatenate(parts)\n np.random.shuffle(imgs_paths)\n return [(path, None) for path in imgs_paths]", "def survivors_selection(self):\n q = 5\n new_population = []\n for i in range(self._population_size):\n batch = []\n for j in range(q):\n r = random.randint(0, (self._child2population_ratio + 1) * self._population_size - 1)\n if r < self._population_size:\n batch.append(self._population[r])\n else:\n batch.append(self._children[r - self._population_size])\n new_population.append(self.select_best(batch))\n\n self._population = new_population", "def random_distribution(self, init_model_infos):\n model_folder = 'random_distribution_model'\n all_images = []\n for package in self.images_pool:\n image_dir = os.path.join(DATA_DIR, package)\n images_in_package = os.listdir(image_dir)\n for img in images_in_package:\n all_images.append(img)\n total_amount = 30\n # Select most hard images (30 as a step)\n # Start training with select images\n while total_amount < 300:\n al_model = TrainingProcess()\n al_model_data = random.sample(all_images,30)\n for item in al_model_data:\n all_images.remove(item)\n total_amount += 30\n if total_amount == 60:\n last_model_info = init_model_infos\n else:\n last_model_info = al_model_info\n last_model_path = os.path.join(last_model_info[0], last_model_info[1] + '.h5')\n last_model_weights = os.path.join(MODEL_DIR, last_model_path)\n al_model_info = [model_folder, '%s_images_model' % total_amount]\n al_model.train_model(al_model_data, al_model_info, self.dataset_val, cur_model_path=last_model_weights)\n al_model.mAP_of_model(al_model_info, self.dataset_val)\n del al_model\n print(\"Ending training\")", "def generate_trial(trialset, synset2img, trialtype, num_imgs):\n # randomly shuffle the sets.\n for s in trialset:\n random.shuffle(s)\n source = trialset[trialtype]\n # sample images\n # make sure we have the most specific guy\n src_imgs = [random.choice(synset2img[trialset[0][0]])]\n for i in range(num_imgs - 1):\n synset = random.choice(source)\n src_imgs.append(random.choice(synset2img[synset]))\n target_imgs = []\n # target imgs are sampled in a structured way\n # 12 images in domain\n for i in range(4):\n for j in range(3):\n synset = random.choice(trialset[i])\n target_imgs.append(random.choice(synset2img[synset]))\n # 12 images outside the domain\n for i in range(12):\n synset = random.choice(trialset[-1])\n target_imgs.append(random.choice(synset2img[synset]))\n # shuffling the images to minimize the ordering effect\n random.shuffle(src_imgs)\n random.shuffle(target_imgs)\n return src_imgs, target_imgs", "def preprocess(self):\n \n file_name_list = os.listdir(self.image_dir)\n random.seed(1234)\n random.shuffle(file_name_list)\n \n for i,d in enumerate(self.domains):\n self.attr2idx[d]=i \n\n for i, file_name in enumerate(file_name_list):\n if (file_name.startswith('X_')):\n continue\n \n parts = file_name.split(\"-\")\n label = int(parts[0])\n if label not in self.domains:\n continue\n img_name = file_name\n\n count=self.get_sample_count(label)\n if count<self.valid_set_size:\n # create holdout set on the fly\n utils.copy_file(self.image_dir,self.valid_set_dir,img_name)\n else:\n self.dataset.append([img_name, self.attr2idx[label]])\n \n self.increment_sample_count(label)\n\n print(\"Sample count per domain: \"+str(self.sample_count)+\" (including holdout set, holdout size per domain is: \"+str(self.valid_set_size)+\")\")\n print('Finished preprocessing the dataset...')", "def test_photo_classification_view_set_get_filtered_successful(self):\n # Test data\n user = account_models.User.objects.get(email='[email protected]')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/photo_classifications?classification=tag')\n results = request.data['results']\n\n self.assertEquals(len(results), 2)", "def sample_rois(rois, gt_boxes, num_classes, rois_per_image, fg_rois_per_image, fg_overlap, bb8_variance, im_info, granularity):\n overlaps = bbox_overlaps(rois[:, 1:], gt_boxes[:, 1:5] * np.array([im_info[1], im_info[0], im_info[1], im_info[0]]))\n gt_assignment = overlaps.argmax(axis=1)\n cid_labels = gt_boxes[gt_assignment, 0]\n max_overlaps = overlaps.max(axis=1)\n if DEBUG:\n print(\"max_overlaps: {}\".format(max_overlaps))\n\n # select foreground RoI with FG_THRESH overlap\n fg_indexes = np.where(max_overlaps >= fg_overlap)[0]\n # guard against the case when an image has fewer than fg_rois_per_image foreground RoIs\n fg_rois_this_image = min(fg_rois_per_image, len(fg_indexes))\n # sample foreground regions without replacement\n if len(fg_indexes) > fg_rois_this_image:\n fg_indexes = np.random.choice(fg_indexes, size=fg_rois_this_image, replace=False)\n\n # select background RoIs as those within [0, FG_THRESH)\n bg_indexes = np.where(max_overlaps < fg_overlap)[0]\n # compute number of background RoIs to take from this image (guarding against there being fewer than desired)\n bg_rois_this_image = rois_per_image - fg_rois_this_image\n bg_rois_this_image = min(bg_rois_this_image, len(bg_indexes))\n # sample bg rois without replacement\n if len(bg_indexes) > bg_rois_this_image:\n bg_indexes = np.random.choice(bg_indexes, size=bg_rois_this_image, replace=False)\n\n # indexes selected\n keep_indexes = np.append(fg_indexes, bg_indexes)\n # keep_indexes = fg_indexes\n\n # pad more bg rois to ensure a fixed minibatch size\n while len(keep_indexes) < rois_per_image:\n gap = min(len(bg_indexes), rois_per_image - len(keep_indexes))\n gap_indexes = np.random.choice(range(len(bg_indexes)), size=gap, replace=False)\n keep_indexes = np.append(keep_indexes, bg_indexes[gap_indexes])\n\n if DEBUG:\n print(\"fg_indexes length: {}\".format(len(fg_indexes)))\n print(\"keep_indexes length: {}\".format(len(keep_indexes)))\n # sample rois and labels\n rois = rois[keep_indexes]\n # cid_labels = cid_labels[keep_indexes]\n # # set labels of bg rois to be 0\n # cid_labels[fg_rois_this_image:] = 0\n\n # load or compute bbox_target\n # targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], :4], box_stds=box_stds)\n FGA_cls_targets, FGA_reg_targets, FGA_reg_weights = \\\n bb8_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], 8:24],\n bb8_variance=bb8_variance, granularity=granularity, im_info=im_info)\n\n for i in range(fg_rois_this_image, rois_per_image):\n FGA_cls_targets[i] = -1\n FGA_reg_weights[i] = 0\n\n if DEBUG:\n print(\"FGA_cls_targets: {}\".format(FGA_cls_targets[-1]))\n print(\"FGA_reg_targets: {}\".format(FGA_reg_targets[-1]))\n print(\"FGA_reg_weights: {}\".format(FGA_reg_weights[-1]))\n\n return rois, FGA_cls_targets, FGA_reg_targets, FGA_reg_weights" ]
[ "0.6107763", "0.6104834", "0.60473615", "0.5822715", "0.5687118", "0.5592082", "0.55718535", "0.5563342", "0.5562081", "0.5492943", "0.5483507", "0.54614764", "0.54502845", "0.54063916", "0.5379569", "0.5375134", "0.53729117", "0.53599495", "0.5322199", "0.532211", "0.53098464", "0.5307775", "0.52907807", "0.52906066", "0.52852774", "0.5282286", "0.5261851", "0.52467567", "0.5244394", "0.5242769" ]
0.6976405
0
The metadata for the UTK dataset are in the file names, so pass a list of utk files
def load_metadata(self, utkface_filenames): def utk_resolve_age_label(file): x = file.split('_') if len(x) != 4: return -1 age = int(file.split('_')[0]) if age in range(18): age_id = 0 elif age in range(18,45): age_id = 1 elif age in range(45,65): age_id = 2 elif age in range(65,122): age_id = 3 else: raise ValueError("Not sure how to handle this age: {}".format(age)) return age_id def utk_resolve_gender_label(file): x = file.split('_') return int(x[1]) if len(x)==4 and len(x[1]) else -1 def utk_resolve_race_label(file): x = file.split('_') return int(x[2]) if len(x)==4 else -1 with open(utkface_filenames, 'r') as f: files = [x.strip() for x in f] utk = pd.DataFrame(files, columns=['filename']) utk['ImageID'] = utk['filename'].apply(lambda x: os.path.basename(x)) utk['age'] = utk['ImageID'].apply(utk_resolve_age_label) utk['gender'] = utk['ImageID'].apply(utk_resolve_gender_label) utk['race'] = utk['ImageID'].apply(utk_resolve_race_label) return utk
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, utkface_filenames = 'utkface_images.txt'):\n self.metadata = self.load_metadata(utkface_filenames)\n self.prefix = ''\n return", "def load_sotu_data():\n sotu_files = glob.glob(\"sotu-data/*.txt\")\n path_desc = re.compile(r\"sotu-data/([A-Za-z]+)_([0-9]{4})\\.txt\")\n for filepath in sotu_files:\n with open(filepath, \"r\") as f:\n raw_text = f.read()\n pres, year = path_desc.search(filepath).groups()\n yield {\"president\": pres, \"year\": year, \"speech\": raw_text}", "def KittiTestDataset(test_root_path):\n \n names = os.listdir(test_root_path)\n dataset = [[os.path.join(test_root_path, name)] for name in names]\n \n return dataset", "def createMetadata(request, datafile):\n samples = []\n datafile = datafile.split(',')\n for f in datafile:\n filename = f.replace('[', '').replace(']', '').replace('\"', '').replace(' ', '')\n cont = subprocess.Popen(\n [\"curl -u \" + request.session.get('username') + \":\" + request.session.get('password') + \" -k -s \" + filename[1:]],\n stdout=subprocess.PIPE, shell=True).communicate()[0]\n with open(request.session.get('username') + \"/data.txt\", \"w\") as datafile:\n datafile.write(cont)\n with open(datafile.name, \"r\") as tfile:\n for line in tfile:\n if \"!Sample_geo_accession\" in line:\n line = line.split('\\t')\n for x in range(0, len(line)):\n samples.append(line[x].replace('\\n', ''))\n samples = filter(None, samples)\n tfile.seek(0)\n with open(request.session.get('username') + \"/meta.txt\", \"w\") as meta:\n for i in range(0, len(samples)):\n for line in tfile:\n if \"!Sample\" in line:\n line = line.split('\\t')\n line[i] = line[i].replace(\"!Sample_\", \"\").replace(\"\\n\", \"\").replace(\"'\", \"\").replace(\",\", \"\").replace(\"\\\"\", \"\")\n if line[i] == \"geo_accession\":\n line[i] = \"sample_id\"\n elif line[1] == \"\\\"female\\\"\" or line[1] == \"\\\"male\\\"\":\n line[0] = \"sex\"\n if \"title\" not in line[0]:\n meta.write(re.sub(r'[^\\x00-\\x7F]+', ' ', line[i]) + '\\t')\n meta.write('\\n')\n tfile.seek(0)\n meta.close()\n datafile.close()\n call([\"rm\", request.session.get('username') + \"/data.txt\"])\n return meta", "def train_projector(self, train_files, projector_file, metadata=None):\n assert isinstance(metadata, list)\n return super(DummyAlgorithmMetadata, self).train_projector(train_files, projector_file)", "def get_feature_labels_files(dataset):\n features = []\n audio_labels = []\n focal_labels = []\n files = []\n for frame in dataset:\n files.append(frame[0])\n features.append(frame[1][0].T)\n if frame[1][1] is not None:\n audio_labels.append(frame[1][1][0].T)\n focal_labels.append(frame[1][1][1].T)\n else:\n audio_labels.append(None)\n focal_labels.append(None)\n features = np.expand_dims(np.asarray(features), 4)\n audio_labels = np.asarray(audio_labels)\n focal_labels = np.asarray(focal_labels)\n return [features, audio_labels,focal_labels, files]", "def save_and_upload_cohort_all_tumors(all_samples, name, namespace, workspace, blacklist=[]):\n tumor_samples = all_samples[all_samples.sample_type == \"Tumor\"]\n\n # Prepare column names\n df = tumor_samples[['entity:sample_id']].rename(columns={'entity:sample_id': 'sample_id'})\n df['membership:sample_set_id'] = name\n\n # Re-arrange columns\n cols = ['membership:sample_set_id', 'sample_id']\n df = df[cols]\n\n # Blacklist\n df = df[ ~df['sample_id'].isin(blacklist) ]\n df.to_csv('tumor_samples/fc_upload_%s.txt'%name, index=None, sep=\"\\t\")\n res = upload_entities_from_tsv(namespace, workspace, 'tumor_samples/fc_upload_%s.txt'%name)\n return res", "def get_tweet_file_names(self) -> None:\n self.list_of_files = os.listdir(self.input_file_path)\n no_of_files = len(self.list_of_files)\n for iterator in range(0, no_of_files):\n self.list_of_files[iterator] = self.input_file_path + \"\\\\\" + self.list_of_files[iterator]\n print(\"no of json files \",no_of_files)", "def main(args):\n metafiles = []\n verbose = args.verbose\n\n if (args.metalist is not None):\n for listfile in args.metalist:\n metafiles.extend(addmeta.list_from_file(listfile))\n\n if (args.metafiles is not None):\n metafiles.extend(args.metafiles)\n\n if verbose: print(\"metafiles: \",\" \".join([str(f) for f in metafiles]))\n\n addmeta.find_and_add_meta(args.files, metafiles)", "def main():\n\n tok = T5Tokenizer.from_pretrained('t5-small')\n data = Data(\n xmi_dir=args.xmi_dir,\n tokenizer=tok,\n max_input_length=args.max_input_length,\n max_output_length=args.max_output_length,\n partition=args.partition,\n n_files=args.n_files)\n\n for index in range(len(data)):\n input_ids = data[index]['input_ids']\n output_ids = data[index]['labels']\n print(tok.decode(input_ids, skip_special_tokens=True))\n print(tok.decode(output_ids, skip_special_tokens=True))\n print()", "def metadata_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None\n config = self\n\n if config.metadata_path:\n files.append((os.path.abspath(config.metadata_path), config.metadata_path))", "def read_texts_for_testing(tarfname, dname, vocab_size):\n import tarfile\n tar = tarfile.open(tarfname, \"r:gz\", errors = 'replace')\n train_mem = tar.getmember(dname + \".train.txt\")\n train_txt = unicode(tar.extractfile(train_mem).read(), errors='replace').lower()\n test_mem = tar.getmember(dname + \".test.txt\")\n test_txt = unicode(tar.extractfile(test_mem).read(), errors='replace').lower()\n \n dnames=[\"brown\",\"reuters\",\"gutenberg\"]\n out = [name for name in dnames if name != dname]\n \n out1_mem = tar.getmember(out[0]+\".test.txt\")\n out1_text = unicode(tar.extractfile(out1_mem).read(), errors='replace').lower()\n \n out2_mem = tar.getmember(out[1]+\".test.txt\")\n out2_text = unicode(tar.extractfile(out2_mem).read(), errors='replace').lower()\n \n from sklearn.feature_extraction.text import CountVectorizer\n count_vect = CountVectorizer()\n count_vect.fit(train_txt.split(\"\\n\"))\n tokenizer = count_vect.build_tokenizer()\n class Data: pass\n data = Data()\n data.train = []\n for s in train_txt.split(\"\\n\"):\n toks = tokenizer(s)\n if len(toks) > 0:\n data.train.append(toks)\n data.test = []\n for s in test_txt.split(\"\\n\"):\n toks = tokenizer(s)\n if len(toks) > 0:\n data.test.append(toks)\n out1 = []\n for s in out1_text.split(\"\\n\"):\n toks = tokenizer(s)\n if len(toks)>0:\n out1.append(toks)\n out2 = []\n for s in out2_text.split(\"\\n\"):\n toks = tokenizer(s)\n if len(toks)>0:\n out2.append(toks)\n \n word_freq = nltk.FreqDist(itertools.chain(*data.train))\n print(\"Found %d unique words tokens.\" % len(word_freq.items()))\n vocab = word_freq.most_common(vocab_size)\n print(\"in {} vocab size=\".format(dname),len(vocab))\n index_to_word = [x[0] for x in vocab]\n word_to_index = dict([(w,i) for i,w in enumerate(index_to_word)])\n \n for i, sent in enumerate(data.train):\n data.train[i] = \"<bos> \"+\" \".join([w if w in word_to_index else \"<unk>\" for w in sent])+\" <eos>\"\n for i, sent in enumerate(data.test):\n data.test[i] = \"<bos> \"+\" \".join([w if w in word_to_index else \"<unk>\" for w in sent])+\" <eos>\"\n for i,sent in enumerate(out1):\n out1[i] = \"<bos> \"+\" \".join([w if w in word_to_index else \"<unk>\" for w in sent])+\" <eos>\"\n for i,sent in enumerate(out2):\n out2[i] = \"<bos> \"+\" \".join([w if w in word_to_index else \"<unk>\" for w in sent])+\" <eos>\"\n \n tests = {}\n tests[\"self\"] = data.train\n tests[\"in\"] = data.test\n tests[\"out1\"] = out1\n tests[\"out2\"] = out2\n with open(\"test_for_\"+dname+\".pkl\",'wb') as f:\n pkl.dump(tests,f)", "def read_texts(tarfname, dname):\n import tarfile\n tar = tarfile.open(tarfname, \"r:gz\", errors = 'replace')\n train_mem = tar.getmember(dname + \".train.txt\")\n train_txt = unicode(tar.extractfile(train_mem).read(), errors='replace').lower()\n test_mem = tar.getmember(dname + \".test.txt\")\n test_txt = unicode(tar.extractfile(test_mem).read(), errors='replace').lower()\n dev_mem = tar.getmember(dname + \".dev.txt\")\n dev_txt = unicode(tar.extractfile(dev_mem).read(), errors='replace').lower()\n\n from sklearn.feature_extraction.text import CountVectorizer\n count_vect = CountVectorizer()\n count_vect.fit(train_txt.split(\"\\n\"))\n tokenizer = count_vect.build_tokenizer()\n class Data: pass\n data = Data()\n data.train = []\n for s in train_txt.split(\"\\n\"):\n toks = tokenizer(s)\n if len(toks) > 0:\n data.train.append(toks)\n data.test = []\n for s in test_txt.split(\"\\n\"):\n toks = tokenizer(s)\n if len(toks) > 0:\n data.test.append(toks)\n data.dev = []\n for s in dev_txt.split(\"\\n\"):\n toks = tokenizer(s)\n if len(toks) > 0:\n data.dev.append(toks)\n \n word_freq = nltk.FreqDist(itertools.chain(*data.train))\n print(\"Found %d unique words tokens.\" % len(word_freq.items()))\n vocab = word_freq.most_common(int(len(word_freq.items())*0.99))\n index_to_word = [x[0] for x in vocab]\n word_to_index = dict([(w,i) for i,w in enumerate(index_to_word)])\n \n for i, sent in enumerate(data.train):\n data.train[i] = \"<bos> \"+\" \".join([w if w in word_to_index else \"<unk>\" for w in sent])+\" <eos>\"\n for i, sent in enumerate(data.dev):\n data.dev[i] = \"<bos> \"+\" \".join([w if w in word_to_index else \"<unk>\" for w in sent])+\" <eos>\"\n for i, sent in enumerate(data.test):\n data.test[i] = \"<bos> \"+\" \".join([w if w in word_to_index else \"<unk>\" for w in sent])+\" <eos>\"\n \n print(dname,\" read.\", \"train:\", len(data.train), \"dev:\", len(data.dev), \"test:\", len(data.test))\n return data", "async def get_files_metadata_dataset(\n location_id: LocationID,\n dataset_id: str,\n user_id: UserID,\n expand_dirs: bool = Query(\n True,\n description=(\n \"Automatic directory expansion. This will be replaced by pagination the future\"\n ),\n ),\n):", "def _load_original_dataset(data_directory, setname):\n\n Sign = collections.namedtuple(\"Sign\", [\"visibility\", \"type\", \"name\"])\n data_directory = pathlib.Path(data_directory)\n filename = data_directory / setname / \"annotations.txt\"\n with tf.io.gfile.GFile(filename) as f:\n files, annotations = zip(*(l.strip().split(\":\", 1) for l in f))\n\n all_signs = []\n for annotation in annotations:\n signs = []\n for sign in annotation.split(\";\"):\n if sign == [\"\"] or not sign: continue\n parts = [s.strip() for s in sign.split(\",\")]\n if parts[0] == \"MISC_SIGNS\": continue\n signs.append(Sign(parts[0], parts[5], parts[6]))\n all_signs.append(signs)\n\n filepaths = (data_directory / setname / f for f in files)\n return zip(filepaths, all_signs)", "def create_test_set(self):\n test_files = os.listdir(self.image_folder_path)\n test_files = sorted_alphanumeric(test_files)\n delete_files(self.root_name, \"/VOC2021/ImageSets/Main\")\n write_txt(\"test.txt\", self.txt_path, test_files)", "def create_file_meta_data(vk4_container, args):\n log.debug(\"Entering create_file_meta_data()\")\n\n header_list = list()\n header_list.append(args.layer)\n header_list.append('\\n')\n header_list.append('File name')\n header_list.append(args.input)\n header_list.append('Title')\n header_list.append(args.input[:-4])\n header_list.append('Measurement date')\n header_list.append(str(vk4_container.measurement_conditions['month']) + '\\\\' +\n str(vk4_container.measurement_conditions['day']) + '\\\\' +\n str(vk4_container.measurement_conditions['year']))\n header_list.append('Measurement time')\n header_list.append(str(vk4_container.measurement_conditions['hour']) + ':' +\n str(vk4_container.measurement_conditions['minute']) + ':' +\n str(vk4_container.measurement_conditions['second']))\n # User mode?\n header_list.append('Objective lens')\n header_list.append(vk4_container.string_data['lens_name'] + ' ' +\n str(vk4_container.measurement_conditions['lens_magnification'] / 10.0) + 'x')\n header_list.append('Numerical Aperture')\n header_list.append(vk4_container.measurement_conditions['num_aperture'] / 1000.0)\n # Size? Standard?\n # Mode? Surface profile?\n # RPD? OFF?\n header_list.append('Quality')\n header_list.append('Skip 4 lines')\n header_list.append('Pitch (um)')\n header_list.append(vk4_container.measurement_conditions['pitch'] / 1000.0)\n header_list.append('Z measurement distance (um)')\n header_list.append(vk4_container.measurement_conditions['distance'] / 1000.0)\n # Double scan? OFF?\n header_list.append('Brightness 1')\n header_list.append(vk4_container.measurement_conditions['PMT_gain'])\n header_list.append('Brightness 2')\n br_2 = vk4_container.measurement_conditions['PMT_gain_2']\n header_list.append('---') if br_2 == 0 else header_list.append(br_2)\n # Not sure how they got ND filter to 30% in example csv\n header_list.append('ND filter (%)')\n header_list.append(vk4_container.measurement_conditions['ND_filter'] * 30)\n header_list.append('Optical zoom')\n header_list.append(vk4_container.measurement_conditions['optical_zoom'] / 10.0)\n # Average count? 1 time?\n # Filter? OFF?\n # Fine mode? ON?\n header_list.append('Line count')\n l_count = vk4_container.measurement_conditions['number_of_lines']\n header_list.append(l_count)\n\n header_list.append('Line position1')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][0])\n\n header_list.append('Line position2')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][1])\n\n header_list.append('Line position3')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][2])\n\n header_list.append('Camera gain (db)')\n header_list.append(vk4_container.measurement_conditions['camera_gain'] * 6)\n header_list.append('Shutter speed')\n header_list.append(vk4_container.measurement_conditions['shutter_speed'])\n header_list.append('White balance mode')\n wb_mode = vk4_container.measurement_conditions['white_balance_mode']\n header_list.append('Auto') if wb_mode == 1 else header_list.append(wb_mode)\n header_list.append('White balance R')\n header_list.append(vk4_container.measurement_conditions['white_balance_red'])\n header_list.append('White balance B')\n header_list.append(vk4_container.measurement_conditions['white_balance_blue'])\n header_list.append('Intensity correction mode')\n header_list.append('Gamma correction')\n header_list.append('Gamma correction value')\n header_list.append(vk4_container.measurement_conditions['gamma'] / 100.0)\n header_list.append('Gamma offset (%)')\n header_list.append(vk4_container.measurement_conditions['gamma_correction_offset'] /\n 65536.0)\n # W/B inversion? OFF?\n # Head type? VK-X110?\n # Correct intensity eccentricity? OFF?\n # Correct field curvature? OFF?\n header_list.append('XY calibration (nm/pixel)')\n header_list.append(vk4_container.measurement_conditions['x_length_per_pixel'] / 1000.0)\n header_list.append('Z calibration (nm/digit)')\n header_list.append(vk4_container.measurement_conditions['z_length_per_digit'] / 1000.0)\n # Saturation?\n # Contrast?\n # Brightness?\n # AI noise elimination? Auto(ON)?\n # Angled surface noise filter? Auto(OFF)?\n header_list.append('Width')\n header_list.append(vk4_container.image_width)\n header_list.append('Height')\n header_list.append(vk4_container.image_height)\n # Skip amount? 1?\n\n out_type = args.type\n if out_type == 'hcsv':\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return np.reshape(header_list, (len(header_list) // 2, 2))\n else:\n # Can use a dict to attach info to an image using PILs Image module\n meta_dict = dict()\n for n in range(0, len(header_list), 2):\n meta_dict[header_list[n]] = header_list[n + 1]\n\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return meta_dict", "def read_fn(file_references, mode, params=None):\r\n print('Reading the dataset from Datalakestore (2mm NIfTI images)....')\r\n\r\n def _augment(img):\r\n \"\"\"An image augmentation function\"\"\"\r\n return flip(img, axis=2)\r\n\r\n image_array = []\r\n label_array = []\r\n for f in file_references:\r\n subject_id = f[0]\r\n\r\n # Read the image nii with sitk\r\n ##t1_fn = os.path.join(data_path, '{}/T1_2mm.nii.gz'.format(subject_id))\r\n ##t1 = sitk.GetArrayFromImage(sitk.ReadImage(str(t1_fn)))\r\n t1_fn = os.path.join(data_path, '{}/T1_2mm.nii.gz'.format(subject_id))\r\n print(t1_fn)\r\n #with adlsFileSystemClient.open(t1_fn, 'rb') as f:\r\n # img = sitk.ReadImage(str(f))\r\n # sitk::ERROR: The file \"<ADL file: /clusters/DLTK_IXI_Dataset/2mm/IXI012/T1_2mm.nii.gz>\" does not exist.\r\n # sitk seems only read from local path....how to read from remote path????????\r\n # for short term download to local path\r\n # rpath is datalakestore, lpath is local file path both have the same root structure '/clusters/DLTK_IXI_Dataset/'\r\n multithread.ADLDownloader(adlsFileSystemClient, rpath=t1_fn, lpath=t1_fn, nthreads=5, chunksize=2**24, overwrite=True)\r\n img = sitk.ReadImage(str(t1_fn))\r\n # you need http://imagej.net/Fiji#Downloads app to show the img. More discussion and instruction: https://stackoverflow.com/questions/45682319/simpleitk-show-generates-error-in-imagej-on-linux\r\n ##sitk.Show(img)\r\n t1 = sitk.GetArrayFromImage(img)\r\n\r\n # Normalise volume image\r\n t1 = whitening(t1)\r\n images = np.expand_dims(t1, axis=-1).astype(np.float32)\r\n\r\n if mode == tf.estimator.ModeKeys.PREDICT:\r\n yield {'features': {'x': images}, 'img_id': subject_id}\r\n print('read_fn Predict')\r\n\r\n # Parse the sex classes from the file_references [1,2] and shift them\r\n # to [0,1]\r\n sex = np.int(f[1]) - 1\r\n y = np.expand_dims(sex, axis=-1).astype(np.int32)\r\n\r\n # Augment if used in training mode\r\n if mode == tf.estimator.ModeKeys.TRAIN:\r\n images = _augment(images)\r\n print('read_fn Train')\r\n # Check if the reader is supposed to return training examples or full images\r\n if params['extract_examples']:\r\n #print('read_fn params extract_examples')\r\n images = extract_random_example_array(\r\n image_list=images,\r\n example_size=params['example_size'],\r\n n_examples=params['n_examples'])\r\n for e in range(params['n_examples']):\r\n #print ('e: ', e)\r\n## yield {'features': {'x': images[e].astype(np.float32)},\r\n## 'labels': {'y': y.astype(np.float32)},\r\n## 'img_id': subject_id}\r\n image_array.append(images[e].astype(np.float32))\r\n label_array.append(y.astype(np.int32))\r\n else:\r\n print('read_fn params yield last')\r\n## yield {'features': {'x': images},\r\n## 'labels': {'y': y.astype(np.float32)},\r\n## 'img_id': subject_id}\r\n image_array.append(images)\r\n label_array.append(y.astype(np.int32))\r\n\r\n print(\"read_fn yield output_array with image shape = \", images.shape, \"label shape = \", y.shape)\r\n yield {'x': np.array(image_array), 'y': np.array(label_array)}", "def prepare_dataset(fpath):\n raise NotImplementedError", "def register_data_files(self, *files, task=None, run=None):\n\n files = [Path(f) for f in files]\n for file in files:\n if file.suffix not in DATA_EXTENSIONS:\n raise ValueError(f'Wrong file format of data {file.suffix}. '\n f'Valid formats are {DATA_EXTENSIONS}')\n\n key = ''\n if task is not None:\n key += f'task_{task}'\n if run is not None:\n key += f'run-{run}'\n\n if key not in self.data:\n self.data[key] = files\n else:\n self.data['key'].extend(files)", "async def get_datasets_metadata(location_id: LocationID, user_id: UserID):", "def _setup_data_filenames(self):\n\n # read in filenames of training data(poses, images, labels)\n logging.info('Reading filenames')\n all_filenames = os.listdir(self.data_dir)\n if self.image_mode== ImageMode.BINARY:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.binary_im_tensor_template) > -1]\n elif self.image_mode== ImageMode.DEPTH:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tensor_template) > -1]\n elif self.image_mode== ImageMode.BINARY_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.binary_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.COLOR_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.color_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.GRAY_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.gray_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.DEPTH_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.DEPTH_TF_TABLE:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tf_table_tensor_template) > -1]\n else:\n raise ValueError('Image mode %s not supported.' %(self.image_mode))\n\n self.pose_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.hand_poses_template) > -1]\n self.label_filenames = [f for f in all_filenames if f.find(self.target_metric_name) > -1]\n\n self.im_filenames.sort(key = lambda x: int(x[-9:-4]))\n self.pose_filenames.sort(key = lambda x: int(x[-9:-4]))\n self.label_filenames.sort(key = lambda x: int(x[-9:-4]))\n\n # check that all file categories were found\n if len(self.im_filenames) == 0 or len(self.label_filenames) == 0 or len(self.label_filenames) == 0:\n raise ValueError('1 or more required training files could not be found')", "def _build(self, files, strict=True, usrdata=None):\n # Allow for single files\n _files = files if hasattr(files, '__len__') else [files]\n\n # Build lists to fill\n data = {k:[] for k in self.spectrograph.meta.keys()}\n data['directory'] = ['None']*len(_files)\n data['filename'] = ['None']*len(_files)\n\n # Build the table\n for idx, ifile in enumerate(_files):\n _ifile = Path(ifile).resolve()\n # User data (for frame type)\n if usrdata is None:\n usr_row = None\n else:\n # TODO: This check should be done elsewhere\n # Check\n if _ifile.name != usrdata['filename'][idx]:\n msgs.error('File name list does not match user-provided metadata table. See '\n 'usrdata argument of instantiation of PypeItMetaData.')\n usr_row = usrdata[idx]\n\n # Add the directory and file name to the table\n data['directory'][idx] = str(_ifile.parent)\n data['filename'][idx] = _ifile.name\n if not data['directory'][idx]:\n data['directory'][idx] = '.'\n\n # Read the fits headers. NOTE: If the file cannot be opened,\n # headarr will be None, and the subsequent loop over the meta keys\n # will fill the data dictionary with None values.\n msgs.info(f'Adding metadata for {data[\"filename\"][idx]}')\n headarr = self.spectrograph.get_headarr(_ifile, strict=strict)\n\n # Grab Meta\n for meta_key in self.spectrograph.meta.keys():\n value = self.spectrograph.get_meta_value(headarr, meta_key, \n required=strict,\n usr_row=usr_row, \n ignore_bad_header = (\n self.par['rdx']['ignore_bad_headers'] or strict))\n if isinstance(value, str) and '#' in value:\n value = value.replace('#', '')\n msgs.warn('Removing troublesome # character from {0}. Returning {1}.'.format(\n meta_key, value))\n data[meta_key].append(value)\n\n # JFH Changed the below to not crash if some files have None in\n # their MJD. This is the desired behavior since if there are\n # empty or corrupt files we still want this to run.\n\n # Validate, print out a warning if there is problem\n try:\n time.Time(data['mjd'], format='mjd')\n except ValueError:\n mjd = np.asarray(data['mjd'])\n filenames = np.asarray(data['filename'])\n bad_files = filenames[mjd == None]\n # Print status message\n msg = f'Time invalid for {len(bad_files)} files.\\nContinuing, but the following ' \\\n 'frames either could not be opened, are empty, or have corrupt headers:\\n'\n for file in bad_files:\n msg += f' {file}\\n'\n msgs.warn(msg)\n\n # Return\n return data", "def _init_dataset(self):\n chars = set()\n with open(self.file_path + \"/words.txt\", 'r') as input_file:\n for line in input_file:\n line_split = line.strip().split('\\t')\n file_name = self.file_path+\"/words/\"+line_split[1]\n gt_text = line_split[0]\n chars = chars.union(set(list(gt_text)))\n self.samples.append((file_name, gt_text))\n input_file.close()\n\n self.char_set = sorted(list(chars))", "def get_utt():\n\treturn mfcc_h5.get_datasets()", "def htk(self, data_type):\n if self.htk_save_path is None:\n raise ValueError('Set path to htk files.')\n\n return [p for p in glob(join(self.htk_save_path, data_type, '*/*.htk'))]\n # NOTE: ex.) timit/htk/data_type/speaker/speaker_utt-index.htk", "def get_train_files(self):\n raise NotImplementedError", "def get_parsed_data():\n\n echonest_data_files = [f for f in os.listdir('.') if re.match(\"^echonest_[\\w]+.txt$\", f)]\n\n # Setting up header with user id and attributes\n header = ['user_id']\n header.extend(ATTRIBUTES)\n\n # printing header to standard out\n print \",\".join(header) \n\n # Processing each file to obtain parsed data\n for data_file in echonest_data_files:\n user_id = data_file[9:-4] # strip file prefix/suffix to get username/id\n parse_echonest_data_file(data_file, user_id)", "def metadata_update_targets(targets):\n filenames = []\n for target in targets:\n if target == 'stable':\n filename = _generate_metadata_kind('firmware.xml.gz', targets=['stable'])\n filenames.append(filename)\n elif target == 'testing':\n filename = _generate_metadata_kind('firmware-testing.xml.gz', targets=['stable', 'testing'])\n filenames.append(filename)\n\n # return all the files we have to sign\n return filenames", "def get_files_io():\n if GC.conf['general']['training']:\n files_zip = {\n 'raw': os.path.join(COOKED_DATA, 'train.txt'),\n 'new': os.path.join(COOKED_DATA, 'train_new.txt'),\n 'norm': os.path.join(COOKED_DATA, 'train_norm.txt'),\n 'manu': os.path.join(RAW_DATA, 'others', 'temp_updt_manu.txt'),\n 'labels': os.path.join(TRAIN_DATA, 'train_norm.txt_labels.pkl'),\n 'segll': os.path.join(TRAIN_DATA, 'train_norm.txt_seginf_loglab.pkl'),\n 'segdl': os.path.join(TRAIN_DATA, 'train_norm.txt_seginf_deeplog.pkl'),\n 'struct': os.path.join(TRAIN_DATA, 'train_norm.txt_structured.csv'),\n 'output': TRAIN_DATA\n }\n else:\n files_zip = {\n 'raw': os.path.join(COOKED_DATA, 'test.txt'),\n 'new': os.path.join(COOKED_DATA, 'test_new.txt'),\n 'norm': os.path.join(COOKED_DATA, 'test_norm.txt'),\n 'labels': os.path.join(TEST_DATA, 'test_norm.txt_labels.pkl'),\n 'segll': os.path.join(TEST_DATA, 'test_norm.txt_seginf_loglab.pkl'),\n 'segdl': os.path.join(TEST_DATA, 'test_norm.txt_seginf_deeplog.pkl'),\n 'map_norm_raw': os.path.join(TEST_DATA, 'map_norm_raw.pkl'),\n 'map_norm_rcv': os.path.join(TEST_DATA, 'map_norm_rcv.pkl'),\n 'norm_rcv': os.path.join(TEST_DATA, 'test_norm_rcv.txt'),\n 'struct': os.path.join(TEST_DATA, 'test_norm.txt_structured.csv'),\n 'struct_rcv': os.path.join(TEST_DATA, 'test_norm_rcv.txt_structured.csv'),\n 'top': os.path.join(TEST_DATA, 'analysis_summary_top.txt'),\n 'sum': os.path.join(TEST_DATA, 'analysis_summary.csv'),\n 'rst_llab': os.path.join(TEST_DATA, 'results_loglab.csv'),\n 'rst_dlog': os.path.join(TEST_DATA, 'results_deeplog.txt'),\n 'rst_llzr': os.path.join(TEST_DATA, 'results_loglizer.csv'),\n 'dbg': os.path.join(TEST_DATA, 'debug.csv'),\n 'output': TEST_DATA\n }\n return files_zip" ]
[ "0.6503858", "0.5745298", "0.56453", "0.5499407", "0.531591", "0.5222382", "0.5202782", "0.5201199", "0.51858264", "0.51849985", "0.51831937", "0.5131584", "0.51163554", "0.51074165", "0.50857013", "0.5076598", "0.50594175", "0.5055774", "0.5037375", "0.5024851", "0.50070727", "0.49821112", "0.4980553", "0.49793178", "0.49784362", "0.49777812", "0.49677745", "0.49639118", "0.49594367", "0.49588576" ]
0.638878
1
Return the total receptive field of this model as of frames.
def receptive_field(self): frames = 0 for f in self.pad: frames += f return 1 + 2 * frames
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def receptive_field(self):\n return self.LocalLayer_Torso.receptive_field()", "def total_rewards(self) -> float:\n return self.__total_rewards", "def fields(self):\n return (self._total + self._mean + self._variance\n + self._skew + self._kurtosis)", "def patrimony_total(self):\n pass", "def get_prop(self):\n\t\tnewframe = copy.deepcopy(self)\n\t\tfor f in newframe.header[1:]:\n\t\t\tsum = newframe.sum_field(f)\n\t\t\tfor d in newframe:\n\t\t\t\ttry:\n\t\t\t\t\td[f]= d[f]/float(sum)*100\n\t\t\t\texcept ZeroDivisionError:\n\t\t\t\t\td[f] = 0\n\t\t\n\t\treturn newframe", "def getScore(self):\n return sum(self.field)", "def extras_total(self):\n total = self.wides + self.no_balls + self.byes + self.leg_byes\n return total", "def get_fuel_total_saved (self):\n return self.electric_diesel_reduction + self.reduction_diesel_used", "def sum(self) -> FrameLike:\n return super().sum()", "def sum(self) -> FrameLike:\n return super().sum()", "def sum(self) -> FrameLike:\n return super().sum()", "def sum(self) -> FrameLike:\n return super().sum()", "def frame_rate(self):\n return self._frame_rate", "def total_reward(self):\n return np.sum(self.rewards)", "def life_insurance_to_recive_total(self):\n pass", "def total(self):\n\t\treturn self._total", "def total_differences(self):\n return self._total_diffs", "def getTotalReward(self):\n return self.cumreward", "def getTotalReward(self):\n return self.cumreward", "def tot(self):\n return self.det + self.out + self.faint + self.late", "def get_receptive_field_radius(self):\n raise NotImplementedError()", "def get_frame_duration(self):\n return self._frame_duration", "def FrameCount(self):\r\n\t\treturn self._get_attribute('frameCount')", "def fe_ratio(self):\n return self._fe_ratio", "def get_binary_rf_area(self):\n\n if self.thr is None:\n raise LookupError('To th area, the receptive field should be thresholded!!')\n\n alt_step = abs(np.mean(np.diff(self.altPos).astype(np.float)))\n azi_step = abs(np.mean(np.diff(self.aziPos).astype(np.float)))\n\n return len(self.weights) * alt_step * azi_step", "def total_realised_pnl(self):\n return self.pos_handler.total_realised_pnl()", "def effectiveness(self):\n self._effectiveness = 0.20 * self.ANA + 0.20 * self.DAM + 0.20 * self.MOA + 0.20 * self.MFA + 0.20 * self.NOP\n return round(self._effectiveness, 5)", "def getInteractionRate(self):\n m = mctal.MCTAL(self.name+'.m')\n t = m.tallies[4]\n # Returing the total\n return t.data[-1],t.errors[-1]", "def get_reg(self):\n loss = 0\n for name, m in self.net.named_children():\n if name.startswith('wave'):\n loss += m[0].GainLayer.get_reg()\n elif name.startswith('conv'):\n loss += 0.5 * self.wd * torch.sum(m[0].weight**2)\n loss += 0.5 * self.wd * torch.sum(self.fc1.weight**2)\n return loss", "def relative_rate(self):\n return _spacegrant_swig.udp_debug_sptr_relative_rate(self)" ]
[ "0.6807186", "0.6247257", "0.6151634", "0.6062826", "0.60195196", "0.60095304", "0.6005246", "0.5999161", "0.5976631", "0.5976631", "0.5976631", "0.5976631", "0.5954339", "0.5918959", "0.590753", "0.589701", "0.5859162", "0.5844734", "0.5844734", "0.5828635", "0.5827173", "0.57821864", "0.5778133", "0.57746553", "0.5737219", "0.5720969", "0.5716971", "0.57068545", "0.56906897", "0.568676" ]
0.70493305
0
Function to get coordinates of given films
def get_location_coordinates(films_set, film_number=0): if not film_number: film_number = len(films_set) films_list = sorted(list(films_set)) print(f'List has {len(films_list)} films with specified year. ' f'\nAmount of films to analyze: {film_number} ' f'\n------------------------------') locations_loss = 0 lost_locations = [] output_list = [] coordinates_set = set() geoloc = Nominatim(user_agent="map") print('Loading...') for i in range(film_number): if '.' in films_list[i][-1]: geo_value = geoloc.geocode(films_list[i][-1] [films_list[i][-1].find('.'):], timeout=30) else: geo_value = geoloc.geocode(films_list[i][-1], timeout=30) if geo_value is None or \ (geo_value.latitude, geo_value.longitude) in coordinates_set: locations_loss += 1 lost_locations.append(films_list[i]) continue time.sleep(1.1) coordinates = (geo_value.latitude, geo_value.longitude) coordinates_set.add(coordinates) output_list.append([films_list[i][0], coordinates]) print(f"Lost {locations_loss} locations overall, due to geopy", lost_locations) return output_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_film_coordinates(film_dict):\n coordinate_dict = dict()\n for location in film_dict.keys():\n try:\n locator = geopy.Nominatim(user_agent=\"filmGeocoder\", timeout=10)\n coordinates = locator.geocode(location)\n\n coordinate_dict[coordinates.latitude, coordinates.longitude] = film_dict[location]\n except (TypeError, AttributeError, IndexError):\n continue\n\n return coordinate_dict", "def get_distance(film_coordinates, latitude, longitude):\n film_distance = []\n for film in film_coordinates.keys():\n user_coordinates = (latitude, longitude)\n film_coord = (film[0], film[1])\n\n distance = great_circle(user_coordinates, film_coord).kilometers\n film_distance.append((distance, film[0], film[1], film_coordinates[film]))\n\n film_distance.sort(key=lambda x: x[0])\n return film_distance[:10]", "def get_nearest_films_filming_from_file(path, user_coordinates):\n data = pandas.read_csv(path, sep=';\\t', engine='python')\n locations, films = data['location'], data['films']\n lat, long = data['latitude'], data['longitude']\n\n distance_list = []\n for location, x, y, film in zip(locations, lat, long, films):\n distance = geodesic((x, y), user_coordinates).km\n distance_list.append((location, film, (x, y), distance))\n distance_list_sorted = sorted(distance_list, key=lambda t: t[-1])\n nearest_films_filming = [(elem[1], elem[2])\n for elem in distance_list_sorted]\n return nearest_films_filming", "def getCoords(file):\n global demag\n name = file.split('.')[0]\n name = name.split('_')\n x = int(name[2])//demag\n y = int(name[3])//demag\n return(int(x),int(y))", "def find_position(self, focal_length, real_height):\n height_px, offset_px = self.process_image(self.image)\n if height_px == 0:\n return 0, (0,0)\n dist = (focal_length * real_height) / height_px\n x_off = (offset_px[0] * dist)/focal_length\n y_off = (offset_px[1] * dist)/focal_length\n return (dist, (x_off, y_off))", "def get_nearest_films(films_list, number, input_location):\n output_list = []\n for film_data in films_list:\n film_dist = int(distance.distance(film_data[1], input_location).km)\n film_data.append(film_dist)\n output_list.append(film_data)\n output_list.sort(key=lambda x: x[-1])\n if len(output_list) >= int(number):\n output_list.pop()\n dist_list = [film[-1] for film in output_list]\n print(f'Closest film distance: {dist_list[0]} km.')\n print(f'Furthest film distance: {dist_list[-1]} km.')\n return output_list", "def spot_coords(self,spot):\n if spot == '1':\n return (330 - 60 ,335 - 15)\n if spot == '2':\n return (419 - 60, 335 - 15)\n if spot == '3':\n return (591 - 60, 159 - 15)\n if spot == '4':\n return (588 - 60, 248 - 15)", "def get_layers(filen, flist):\n lay_lim =()\n if (filen in flist[0]) or (filen in flist[1]) or \\\n (filen in flist[2]) or (filen in flist[3]):\n lay_lim = (24,45)\n elif (filen in flist[4]):\n lay_lim = (29,50)\n return lay_lim", "def positions(self):\n method = 'get_xdata' if self.direction == 'horizontal' else 'get_ydata'\n return [getattr(line, method)()[0] for line in self.artists]", "def get_person_coordinates(self, depth_image, detections):\n coord_list = []\n count = 0\n for det in detections:\n count = count + 1\n if det.ClassID == 1:\n person_center = det.Center\n x, y = person_center\n depth_arr = []\n try:\n for x in range(int(x) - 2, int(x) + 3):\n for y in range(int(y) - 2, int(y) + 3):\n depth_arr.append(depth_image[int(x), int(y)] / 1000.0)\n depth = np.mean(depth_arr)\n person_coord = self._get_coord(depth, x, y)\n self.make_marker(person_coord, count)\n coord_list.append(person_coord)\n except IndexError:\n self.marker_pub.publish(self.marker_array)\n self.marker_pub.publish(self.marker_array)\n return coord_list", "def determine_animal_pos(self, plot, latitude, longitude):\r\n x = convert_fraction_lat(\r\n\r\n str(return_values(plot, latitude)\r\n )\r\n )[0] * self.space.x_max\r\n\r\n y = convert_fraction_long(\r\n str(return_values(plot, longitude)\r\n )\r\n )[0] * self.space.y_max\r\n pos = (x, y)\r\n return pos", "def face_coords(img, model):\n (h, w) = img.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))\n\n model.setInput(blob)\n detections = model.forward()\n\n box = detections[0, 0, 0, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n return (startX, startY, endX, endY)", "def coordinates(self):", "def process_coords():\n split_coords = row[\"map_coord\"].split(',')\n map_x, map_y = [int(i) for i in split_coords]\n map_x_normed = ((map_x*2) / self.MINIMAP_DIM) - 1\n map_y_normed = -(((map_y*2) / self.MINIMAP_DIM) - 1)\n return map_x_normed, map_y_normed", "def xy(self, photons):\n flatbeam = self.beamImage.flatten()\n beamsorted = np.argsort(flatbeam)\n ind = np.searchsorted(flatbeam[beamsorted], photons[\"resID\"])\n return np.unravel_index(beamsorted[ind], self.beamImage.shape)", "def get_at_position(self, x=507, y=507, filter='F140W'):\n epsf = self.epsf[filter]\n \n rx = 1+(np.clip(x,1,1013)-0)/507.\n ry = 1+(np.clip(y,1,1013)-0)/507.\n \n # zero index\n rx -= 1\n ry -= 1 \n\n nx = np.clip(int(rx), 0, 2)\n ny = np.clip(int(ry), 0, 2)\n\n # print x, y, rx, ry, nx, ny\n\n fx = rx-nx\n fy = ry-ny\n\n psf_xy = (1-fx)*(1-fy)*epsf[:, :, nx+ny*3]\n psf_xy += fx*(1-fy)*epsf[:, :, (nx+1)+ny*3]\n psf_xy += (1-fx)*fy*epsf[:, :, nx+(ny+1)*3]\n psf_xy += fx*fy*epsf[:, :, (nx+1)+(ny+1)*3]\n self.eval_filter = filter\n \n return psf_xy", "def get_coords(self):\n xTK = int(jeu.coords(self.rectangle)[0]) # Coordonnées TKinter x1 et y1 du rectangle correspondant à la voiture\n yTK = int(jeu.coords(self.rectangle)[1])\n # On divise par la largeur d'une case et on renvoie les valeurs obtenues sous la forme d'un tuple\n X = xTK//100\n Y = yTK//100\n resultat = [X, Y]\n return resultat", "def getSearchSpaceCoords(self):", "def feature_coords(features):\n coords_list = []\n for feature in features:\n coord_start = feature.location.nofuzzy_start\n coord_end = feature.location.nofuzzy_end\n coord_pair = (coord_start, coord_end)\n coords_list.append(coord_pair)\n ## consider adding some info to the log\n return coords_list", "def get_positions(specs):\r\n xy = []\r\n for i, spec in enumerate(specs):\r\n slit = spec.split(\"n3311\", 1)[1].replace(\".fits\", \"\")\r\n # slit = spec.split(\".\")[0].split(\"_\", 1)[1][5:]\r\n index = canvas.slits.ids.index(slit)\r\n xy.append([canvas.slits.x[index], canvas.slits.y[index]])\r\n return np.array(xy)", "def condense_coords(matches):\n x = []\n y = []\n for m in matches:\n x += m['matches']['p'][0]\n x += m['matches']['q'][0]\n y += m['matches']['p'][1]\n y += m['matches']['q'][1]\n coords = np.transpose(np.vstack((np.array(x), np.array(y))))\n return coords", "def _get_fid_coords(dig_points, raise_error=True):\n fid_coords = Bunch(nasion=None, lpa=None, rpa=None)\n fid_coord_frames = dict()\n\n for d in dig_points:\n if d[\"kind\"] == FIFF.FIFFV_POINT_CARDINAL:\n key = _cardinal_ident_mapping[d[\"ident\"]]\n fid_coords[key] = d[\"r\"]\n fid_coord_frames[key] = d[\"coord_frame\"]\n\n if len(fid_coord_frames) > 0 and raise_error:\n if set(fid_coord_frames.keys()) != set([\"nasion\", \"lpa\", \"rpa\"]):\n raise ValueError(\n f\"Some fiducial points are missing, got {fid_coords.keys()}\"\n )\n\n if len(set(fid_coord_frames.values())) > 1:\n raise ValueError(\n \"All fiducial points must be in the same coordinate system, \"\n f\"got {len(fid_coord_frames)})\"\n )\n\n coord_frame = fid_coord_frames.popitem()[1] if fid_coord_frames else None\n\n return fid_coords, coord_frame", "def get_pos(self, frame):\n frame = self.perspective_shift(frame)\n \n puck_mask = self.color_mask(frame, self.color_green, thresh=15)\n striker_mask = self.color_mask(frame, self.color_orange, thresh=25, blur=5)\n \n puck_loc, _ = self.find_centroids(puck_mask)\n striker_locs, _ = self.find_centroids(striker_mask, 2)\n \n p_pos = self.abs_to_meter(puck_loc[0])\n # cases: (pos,pos), (pos,None), (None,None)\n if striker_locs[0] is not None:\n pos_1 = self.abs_to_meter(striker_locs[0])\n pos_2 = self.abs_to_meter(striker_locs[1])\n s1_pos = pos_1 if pos_1[1]<0 else pos_2\n s2_pos = pos_2 if pos_1[1]<0 else pos_1\n else:\n s1_pos, s2_pos = None, None \n \n return [p_pos, s1_pos, s2_pos]", "def get_imdb_list():\n list_file = 'imdb.txt'\n name_column = 26\n f = open(list_file, 'r')\n film_list = []\n pos = 0\n\n for line in f:\n pos += 1\n words = line.split()\n name = line[name_column:-1]\n # could be problematic is there are brackets in the film name\n year = name[name.find('(') + 1:name.find(')')]\n name = name.replace('(' + year + ')', '')\n film = {\n 'pos': pos,\n 'score': Decimal(words[2]),\n 'name': name.strip(),\n 'year': year\n }\n film_list.append(film)\n f.close()\n return film_list", "def get_face_coords(self, frame):\n\n\t\ttry:\n\t\t\tresults = self.face_detector.detect_faces(frame) # Detects faces in the image\n\n\t\t\tx1, y1, width, height = results[0]['box'] # Bounding box of first face\n\t\t\tx1, y1 = abs(x1), abs(y1) # bug fix...\n\t\t\tx2, y2 = x1 + width, y1 + height\n\t\texcept:\n\t\t\tx1, x2, y1, y2 = -1, -1, -1, -1\n\t\treturn y1, y2, x1, x2", "def get_mouth(dictionary):\r\n mouth = []\r\n for landmark, coordinate in dictionary.items():\r\n if landmark.startswith(\"mouth_\"):\r\n mouth.append((coordinate['x'], coordinate['y']))\r\n return mouth", "def get_coords(self) -> Tuple[int]:\r\n return self.file, self.rank", "def find_coordinates(self):\n\n raise NotImplementedError", "def find_coordinates(self):\n\n raise NotImplementedError", "def parse_coords(lines):\r\n pcoa_results = OrdinationResults.from_file(lines)\r\n return (pcoa_results.site_ids, pcoa_results.site, pcoa_results.eigvals,\r\n pcoa_results.proportion_explained)" ]
[ "0.6283534", "0.59644246", "0.58434176", "0.5780984", "0.55411386", "0.55378103", "0.55216914", "0.5519904", "0.55140954", "0.5485288", "0.5386026", "0.53777754", "0.53692013", "0.5347572", "0.53261596", "0.5300548", "0.5293786", "0.5275657", "0.52662235", "0.5232054", "0.5220899", "0.5192166", "0.51806873", "0.516845", "0.516158", "0.51487076", "0.5140363", "0.51324064", "0.51324064", "0.51253086" ]
0.663994
0
Function finds the nearest films near user specified location
def get_nearest_films(films_list, number, input_location): output_list = [] for film_data in films_list: film_dist = int(distance.distance(film_data[1], input_location).km) film_data.append(film_dist) output_list.append(film_data) output_list.sort(key=lambda x: x[-1]) if len(output_list) >= int(number): output_list.pop() dist_list = [film[-1] for film in output_list] print(f'Closest film distance: {dist_list[0]} km.') print(f'Furthest film distance: {dist_list[-1]} km.') return output_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_nearest_films_filming_from_file(path, user_coordinates):\n data = pandas.read_csv(path, sep=';\\t', engine='python')\n locations, films = data['location'], data['films']\n lat, long = data['latitude'], data['longitude']\n\n distance_list = []\n for location, x, y, film in zip(locations, lat, long, films):\n distance = geodesic((x, y), user_coordinates).km\n distance_list.append((location, film, (x, y), distance))\n distance_list_sorted = sorted(distance_list, key=lambda t: t[-1])\n nearest_films_filming = [(elem[1], elem[2])\n for elem in distance_list_sorted]\n return nearest_films_filming", "def users_nearby(self, meters):\n location = Location.objects.get(id=self.most_recent_location_id)\n lng = location.position['coordinates'][0]\n lat = location.position['coordinates'][1]\n\n nearby_locations = Location.objects(position__near=[lng, lat], position__max_distance=meters)\n\n nearby_user_ids = []\n\n for loc in nearby_locations:\n nearby_user_ids.append(loc.uid)\n\n return SallasanaUser.objects.filter(id__in=nearby_user_ids)", "def get_distance(film_coordinates, latitude, longitude):\n film_distance = []\n for film in film_coordinates.keys():\n user_coordinates = (latitude, longitude)\n film_coord = (film[0], film[1])\n\n distance = great_circle(user_coordinates, film_coord).kilometers\n film_distance.append((distance, film[0], film[1], film_coordinates[film]))\n\n film_distance.sort(key=lambda x: x[0])\n return film_distance[:10]", "def get_closest(list_of_nearby, favorite_place):\n\tref_rating = float(favorite_place[\"rating\"]) # this is a float\n\tref_price_len = len(favorite_place[\"price\"]) # this is the length of the dollar sign - an int\n\tref_categ = favorite_place[\"categories\"] # this is a string!\n\n\tfor item in list_of_nearby:\n\t\tscore = 0\n\t\tlist_of_cat_words = item[categories].split()\n\t\tfor word in list_of_cat_words:\n\t\t\tif word in ref_categ:\n\t\t\t\tscore += 1\n\t\tscore = score * 5\n\t\tscore = score - 2 * abs(len(item[\"price\"]) - ref_price_len)\n\t\tscore = score - 10 * abs(float(item[\"rating\"]) - ref_rating)\n\t\titem[\"score\"] = score\n\n\tfor item in list_of_nearby:\n\t\treturn_list = []\n\t\treturn_list.append({\"id\": item[\"id\"], \"score\": item[\"score\"]})\n\n\treturn_list = sorted(return_list, key = lambda i: i[\"score\"])\n\treturn return_list", "def nearest(reference, locations):\n return [x[1] for x in distances(reference, locations)]", "def nearby(self, words, num=20):\n ids = np.array([vocabulary.getVocabID(x) for x in words])\n vals, idx = self.sess.run(\n [self._nearby_val, self._nearby_idx], {self._nearby_word: ids})\n for i in range(len(words)):\n print(\"\\n%s\\n=====================================\" % (words[i]))\n for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):\n print(neighbor)\n print(\"%-20s %6.4f\" % (vocabulary.VocabID_to_vocab(neighbor), distance))", "def get_location_coordinates(films_set, film_number=0):\n if not film_number:\n film_number = len(films_set)\n\n films_list = sorted(list(films_set))\n print(f'List has {len(films_list)} films with specified year. '\n f'\\nAmount of films to analyze: {film_number} '\n f'\\n------------------------------')\n\n locations_loss = 0\n lost_locations = []\n output_list = []\n coordinates_set = set()\n geoloc = Nominatim(user_agent=\"map\")\n print('Loading...')\n for i in range(film_number):\n if '.' in films_list[i][-1]:\n geo_value = geoloc.geocode(films_list[i][-1]\n [films_list[i][-1].find('.'):], timeout=30)\n else:\n geo_value = geoloc.geocode(films_list[i][-1], timeout=30)\n if geo_value is None or \\\n (geo_value.latitude, geo_value.longitude) in coordinates_set:\n locations_loss += 1\n lost_locations.append(films_list[i])\n continue\n time.sleep(1.1)\n coordinates = (geo_value.latitude, geo_value.longitude)\n coordinates_set.add(coordinates)\n output_list.append([films_list[i][0], coordinates])\n print(f\"Lost {locations_loss} locations overall, due to geopy\", lost_locations)\n return output_list", "def near():\n\targs = request.args\n\n\tif 'limit' in args: limit = int(args.get('limit'))\n\telse: limit = 1\n\n\tif 'lat' in args and 'lng' in args:\n\t\tlat = float(args.get('lat'))\n\t\tlng = float(args.get('lng'))\n\n\telse:\n\t\treturn jsonify(success=False, reason='wrong_arguments')\n\n\tdocs = findWithInCircle([lat,lng],6)\n\n\treturn json_util.dumps({\n\t\t'success': True, 'docs': docs\n\t})", "def search2(term, location, distance):\n\n print location\n if float(distance) <= 5.0:\n radius = 0.75\n elif float(distance) > 5.0 and float(distance) <= 10.0:\n radius = 1.5\n elif float(distance) > 10.0 and float(distance) <= 25.0:\n radius = 4\n elif float(distance) > 25.0 and float(distance) <= 100.0:\n radius = 8\n elif float(distance) > 100.0:\n radius = 15\n\n metradius = int(float(1609) * float(radius))\n print metradius\n url_params = {\n 'category_filter': term.replace(' ', '+'),\n 'radius_filter': metradius,\n 'll': location.replace(' ', '+'),\n 'limit': SEARCH_LIMIT\n }\n return request(API_HOST, SEARCH_PATH, url_params=url_params)", "def closest_stations(latlong, df):\n names = df['name'].values\n station_dists = {}\n for (lat, lon, name) in list(df[['Lat', 'Lon', 'name']].value_counts().index):\n if not(np.isnan(lat) or np.isnan(lon)):\n station_dists[name] = haversine(latlong, (lat, lon)) \n \n return sorted(station_dists.items(), key=lambda x: x[1])", "def near(self, meters):\n lng = self.position['coordinates'][0]\n lat = self.position['coordinates'][1]\n\n return Location.objects(position__near=[lng, lat], position__max_distance=meters)", "def nearest(coordinate, coordinate_list, limit=None):\r\n distances = []\r\n coordinate_lat=coordinate[0]\r\n coordinate_lon=coordinate[1]\r\n for c in coordinate_list:\r\n if len(c)==5:\r\n distances.append( (distance(coordinate_lat, coordinate_lon, c[3][0], c[3][1]), c))\r\n else:\r\n distances.append( (distance(coordinate_lat, coordinate_lon, c[0], c[1]), c)) \r\n distances.sort()\r\n if limit:\r\n return distances[:limit]\r\n return distances", "def search(location=DEFAULT_LOCATION, api_key=API_KEY):\n latitude, longtitude = location[0], location[1]\n url_params = {\"page\": \"1\", \"lon\": longtitude, \"lat\": latitude, \"distance\": \"5\"}\n\n return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)['result']['data']", "def find_closest_forecast_location(soup): \n\n links = soup.findAll(\"div\", {\"class\": \"info\"})\n \n location_list = []\n for link in links:\n try:\n href = text_between(str(link),'href=\"', '\"><em>')\n accu_codes = re.findall(r'\\(\\d+\\)', str(link))\n try:\n code = accu_codes[0]\n except Exception:\n code = '</em>'\n \n location = text_between(str(link),'<em>', code)\n location_list.append([location, href])\n \n except Exception:\n None\n \n if location_list == []:\n print(\"Forecast scraper needs attention\")\n \n home = find_lat_lon(\"Illinois\")\n lat1 = home[0]\n lon1 = home[1]\n \n distance_location_list = []\n distance_list = []\n \n for location in location_list:\n try:\n accu_location = find_lat_lon(str(location[0]))\n lat2 = accu_location[0]\n lon2 = accu_location[1]\n distance = float(coordinate_distance(lat1, lon1, lat2, lon2))\n distance_location_list.append([distance, location[0], location[1]])\n distance_list.append(distance)\n print(\"Checking distance of forecast location: \" + str(location[0]))\n \n except Exception:\n None\n \n closest_location_distance = min(distance_list)\n \n for place in distance_location_list:\n if place[0] == closest_location_distance:\n url = place[2]\n name = place[1]\n \n print(url, name, min(distance_list))\n return url", "def get_nearest(infected_coordinates, uninfected_coordinates, d):\n # Create tree from the GPS coordinates of uninfected users\n tree = BallTree(uninfected_coordinates, leaf_size=15, metric='haversine')\n indices,distances=tree.query_radius(infected_coordinates, r=d,return_distance=True)\n indices=indices.transpose()\n distances=distances.transpose()\n return indices,distances", "def knearest_amongst_user_rated( self, restaurant_id, user_id, k = 7, reg = 3.0 ):\n\t\tuser_rated = self.df[ self.df['user_id'] == user_id ]['business_id'].unique()\n\t\treturn self.knearest( restaurant_id = restaurant_id, \n\t\t\t\t\t\t\t set_of_restaurants = user_rated, k = k, reg = reg )", "def find(location, path):\n\n index = AnnoyIndex(64, metric='hamming')\n index.load('live/phash_index.ann')\n\n # get the requested image\n if location == 'url':\n MAX_DOWNLOAD = 15 * 1024 * 1024\n response = requests.get(path, stream=True)\n size = 0\n content = bytearray()\n for chunk in response.iter_content(1024):\n size += len(chunk)\n content += chunk\n if size > MAX_DOWNLOAD:\n raise ValueError\n img = Image.open(BytesIO(content))\n else:\n img = Image.open(path)\n\n # get the image's phash\n phash = imagehash.phash(img)\n phash_arr = phash.hash.flatten()\n\n # find the closest mateches\n results = index.get_nns_by_vector(phash_arr, 16, include_distances=True)\n\n conn = sqlite3.connect('live/twitter_scraper.db')\n c = conn.cursor()\n\n basenames = []\n tweet_ids = []\n\n # look up the location of the match and its tweet info\n first = True\n for idx, score in map(list, zip(*results)):\n if not first and score > 8:\n break\n first = False\n\n print('score: {}'.format(score))\n c.execute('SELECT path, filename FROM annoy WHERE idx=(?)', (idx,))\n dirname, basename = c.fetchone()\n fullpath = os.path.join(dirname, basename)\n c.execute('SELECT id FROM info WHERE filename=(?) AND path=(?)', (basename, dirname))\n tweet_id = c.fetchone()\n print(tweet_id)\n tweet_id = tweet_id[0]\n\n print('local path: {}'.format(fullpath))\n print('direct link: https://pbs.twimg.com/media/{}'.format(basename))\n print('source tweet: https://www.twitter.com/statuses/{}'.format(tweet_id))\n print()\n\n basenames.append(basename)\n tweet_ids.append(tweet_id)\n\n conn.close()\n return basenames, tweet_ids", "def nearest():\n try:\n text = find_stop_near(session[\"place\"])\n return render_template(\"place.html\", text = text)\n except:\n return render_template('error.html')", "def closest(centroids,coordinates):\n tup = [(cen[0], haversine(coordinates,cen[1])) for cen in centroids]\n distance = min(tup, key = lambda x:x[1])\n return (distance[0],coordinates)", "def test_get_nearest(self):\n switzerland = Country.objects.get(name=u\"Switzerland\")\n uk = Country.objects.get(name=u\"United Kingdom\")\n \n user1, person1 = self._create_person(\"user1\", \"[email protected]\",\n country=switzerland.name,\n latitude=46.519582,\n longitude=6.632121,\n location_description=u\"Geneva\")\n # Geneva -> Saint-Genis: 10.9km\n user2, person2 = self._create_person(\"user2\", \"[email protected]\",\n country=switzerland.name,\n latitude=46.205973,\n longitude=6.5995789,\n location_description=u\"Saint-Genis\")\n \n # Geneva -> Islington: 986km\n user3, person3 = self._create_person(\"user3\", \"[email protected]\",\n country=uk.name,\n latitude=51.532601866,\n longitude=-0.108382701874,\n location_description=u\"Islington\")\n \n # Geneva -> Lausanne: 63.2km\n user4, person4 = self._create_person(\"user4\", \"[email protected]\",\n country=switzerland.name,\n latitude=46.243572,\n longitude=6.02107,\n location_description=u\"Lausanne\")\n \n \n near = person1.get_nearest(within_range=9999)\n \n self.assertEqual(near, [person2, person4, person3])\n \n # the within range feature doesn't work in mysql\n if settings.DATABASE_ENGINE == 'mysql':\n return\n \n # person2: 21.7 miles\n # person4: 34.7 miles\n # person3: 471.9 miles\n near = person1.get_nearest(within_range=100)\n \n self.assertEqual(near, [person2, person4])\n \n near = person1.get_nearest(num=1, within_range=100)\n \n self.assertEqual(near, [person2])", "def test_nearest_location():\n locations = [(10, 20), (30, 40), (50, 60)]\n\n assert nearest_location(locations, 8) == 0\n assert nearest_location(locations, 15) == 0\n assert nearest_location(locations, 22) == 0\n\n assert nearest_location(locations, 28) == 1\n assert nearest_location(locations, 35) == 1\n assert nearest_location(locations, 42) == 1\n\n assert nearest_location(locations, 48) == 2\n assert nearest_location(locations, 55) == 2\n assert nearest_location(locations, 62) == 2", "def knearest( self, restaurant_id, set_of_restaurants, k = 7, reg = 3.0 ):\t\t\n\t\tsimilar = []\t\t\n\t\tfor other_rest_id in set_of_restaurants:\n\t\t\tif other_rest_id != restaurant_id:\n\t\t\t\tsim, n_common = self.get( other_rest_id, restaurant_id )\n\t\t\t\tsim = self.shrunk_sim( sim = sim, n_common = n_common, reg = reg )\n\t\t\t\tsimilar.append( ( other_rest_id, sim, n_common ) )\n\n\t\tsimilars = sorted( similar, key = itemgetter(1), reverse = True )\t\n\t\treturn similars[0:k]", "def location2station(location):\r\n # just forget it, use google\r\n location = quote(str(location))\r\n geo_url = 'http://maps.google.com/maps/geo?key=%s&q=%s&sensor=false&output=csv'%(API_KEY,location)\r\n point = map(float,urlopen(geo_url).readline().split(',')[-2:])\r\n best,result = 99999999,[]\r\n for row in rows():\r\n test_point = map(float, (row[2],row[3]))\r\n distance = ((test_point[0]-point[0])**2 + (test_point[1]-point[1])**2)**.5\r\n if distance < best:\r\n best,result = distance,row\r\n return tuple(result)", "def find_pubs(pub_csv, postcode_csv, \n current_location, max_return=20, max_distance=50):\n \n list_of_pubs = []\n for pub in Pub.lookup_pubs(pub_csv, postcode_csv):\n # Compute distance\n distance = pub.location.distance_from(current_location)\n if distance < max_distance:\n # Are we in range?\n if len(list_of_pubs) < max_return:\n # How much stuff do we want print?\n list_of_pubs.append((pub.name, distance))\n list_of_pubs = sorted(list_of_pubs, key=lambda x:x[1])\n \n elif distance < list_of_pubs[-1][1]:\n # is distance smaller than last entry?\n # we can save a sort if not\n list_of_pubs.append((pub.name, distance))\n list_of_pubs = sorted(list_of_pubs, key=lambda x:x[1])[:-1]\n \n return list_of_pubs", "def getRecommendations_movie(userID,nearest,pivotvalues,neighborCount=5,moviesToRecommend=10):\n\tmovierecs = np.zeros(len(nearest))\n\tarr = pivotvalues[userID,:]\n\twatched = []\n\tfor i in range(len(arr)):\n\t if arr[i]>0:\n\t watched.append(i)\n\t for j in range(neighborCount):\n\t movierecs[nearest[i][j]] = movierecs[nearest[i][j]] + arr[i]\n\trecommend = movierecs.argsort()[0:moviesToRecommend]\t\n\trecommend = np.setdiff1d(recommend,watched)\n\treturn recommend[0:moviesToRecommend]", "def query_restaurants_by_location(collection, radius, lat, lon):\n results = collection.find(\n {'location': {'$nearSphere': {'$geometry': {'type': \"Point\",\n 'coordinates': [float(lon), float(lat)]},\n '$maxDistance': radius}}}, {\"_id\": 0})\n\n return results", "def get_near_cities_from_user_coordinates(user_coordinates):\n data = pandas.read_csv('city_coordinates.tsv', sep='\\t')\n cities = data['city_ascii']\n latitudes, longitudes = data['lat'], data['lng']\n distance_list = []\n for city, lat, lng in zip(cities, latitudes, longitudes):\n try:\n distance = geodesic((lat, lng), user_coordinates).km\n distance_list.append(((lat, lng), city, distance))\n except Exception:\n continue\n distance_list_sorted = sorted(distance_list, key=lambda x: x[-1])\n return [elem[-2] for elem in distance_list_sorted[:100]]", "def search(latit, longit, dist, num_results):\n API_PRIVATE = os.environ.get(\"TOM_TOM_PRIVATE\")\n apiParameters = {\n 'key': API_PRIVATE,\n 'typeahead': True,\n 'limit': num_results,\n 'ofs': 0,\n 'countrySet': 'US',\n 'lat': latit,\n 'lon': longit,\n 'radius': dist,\n 'categorySet': '9361023, 7332005, 9361066, 9361051, 9361009'\n }\n apiQuery = str('https://api.tomtom.com/search/2/categorySearch/.json');\n\n response = requests.get(apiQuery, params=apiParameters)\n while True:\n try:\n jsonResponse = response.json()\n break\n except:\n response = requests.get(apiQuery, params=apiParameters)\n\n latitude_lst = []\n longitude_lst = []\n for eachStore in jsonResponse['results']:\n latitude_lst.append(eachStore['position']['lat'])\n longitude_lst.append(eachStore['position']['lon'])\n final_lat = []\n final_lon = []\n for i in range(len(latitude_lst)):\n repeat = False\n for j in range(len(final_lat)):\n if final_lat[j] == latitude_lst[i] and final_lon[j] == longitude_lst[i]:\n repeat = True\n break\n if repeat == False:\n final_lat.append(latitude_lst[i])\n final_lon.append(longitude_lst[i])\n return final_lat, final_lon", "def find_closest_patients(self,max_dist):\n\n centre_loc = self.location\n pats = User.objects.filter(assigned_centre_id__lt=1)\n\n plocs = np.zeros((len(pats), 2), dtype=np.float32)\n pids = np.zeros(len(pats), dtype=int)\n for idx,pat in enumerate(pats):\n plocs[idx,:] = pat.location\n pids[idx] = (pat.id)\n\n dists = np.linalg.norm(centre_loc - plocs, ord=2, axis=-1)\n in_range = (dists <= max_dist)\n\n return pids[in_range].tolist()", "def nearest_neighbor_within(others, point, max_distance):\n search_region = point.buffer(max_distance)\n interesting_points = search_region.intersection(MultiPoint(others))\n \n if not interesting_points:\n closest_point = None\n elif isinstance(interesting_points, Point):\n closest_point = interesting_points\n else: \n distances = [point.distance(ip) for ip in interesting_points\n if point.distance(ip) > 0]\n closest_point = interesting_points[distances.index(min(distances))]\n \n return closest_point" ]
[ "0.734112", "0.6164199", "0.60423326", "0.5946243", "0.59105843", "0.57418096", "0.5691708", "0.5665888", "0.5598716", "0.5578369", "0.5567686", "0.5564117", "0.5545929", "0.5539266", "0.55250674", "0.55057204", "0.5500532", "0.54814434", "0.54331714", "0.5421165", "0.54205436", "0.54175407", "0.54042214", "0.5359512", "0.535485", "0.53478587", "0.533623", "0.53352493", "0.5322688", "0.5301148" ]
0.7052365
1
If East is in the same guild, Talos will ask them a favor... Otherwise, Talos isn't doing it
async def favor(self, ctx): east = ctx.guild.get_member(339119069066297355) if not east or east.status != discord.Status.online: await ctx.send(f"I'm afraid I can't do that, {ctx.author.display_name}.") return await ctx.send("&East, could I ask you for a favor? I need someone to verify my code.") await asyncio.sleep(2) async with ctx.typing(): await asyncio.sleep(1) await ctx.send("Oh my. Well, if you insist ;)")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def omartrifacta(self, ctx):\n user_member1 = await ctx.guild.fetch_member(\"142084729674399745\")\n user_member2 = await ctx.guild.fetch_member(\"197784087476305921\")\n user_member3 = await ctx.guild.fetch_member(\"219969018369409024\")\n if user_member1 is not None and user_member2 is not None and user_member3 is not None:\n kick_channel = await ctx.guild.create_voice_channel(\"kicked\")\n await user_member1.move_to(kick_channel, reason=\"you have been kicked by Omar.\")\n await user_member2.move_to(kick_channel, reason=\"you have been kicked by Omar.\")\n await user_member3.move_to(kick_channel, reason=\"you have been kicked by Omar.\")\n await kick_channel.delete()\n else:\n print(\"user invalid for omar()\")", "def cheer(self, songs):\n if self.favourite_song in songs:\n return \"Whoo!\"", "async def _guild(self, ctx):\n if await self.config.guild(ctx.guild).guild():\n await self.config.guild(ctx.guild).guild.set(False)\n msg = _(\"Okay, I will not react to messages \" \"containing server emojis!\")\n await ctx.send(msg)\n else:\n await self.config.guild(ctx.guild).guild.set(True)\n msg = _(\"Okay, I will react to messages \" \"containing server emojis!\")\n await ctx.send(msg)", "async def isspecial(self, ctx):\n if ctx.guild.id in config['config']['special_servers']:\n await ctx.send(embed=discord.Embed(\n title='**Yes!**',\n description=\"Your server is in my special servers list!\",\n colour=discord.Color.green()\n )\n )\n else:\n await ctx.send(embed=discord.Embed(\n title='**No!**',\n description=\"Your server isn't in my special servers list!\",\n colour=discord.Color.red()\n )\n )", "def guild_only():\n\n async def check(ctx):\n if ctx.guild: # In a server\n return True\n await ctx.send('This command is only available in servers!')\n return False\n\n return commands.check(check)", "async def tip(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id) and ' '.join(args).lower() == 'fedora':\n user = User.objects.filter(Q(name__icontains=ctx.author.name) | Q(nick__icontains=ctx.author.name))\n if user:\n if user[0].equipment_slots[0] == FEDORA:\n await ctx.send(f'*{ctx.author.name} tips their fedora.*')", "async def guild(ctx):\n print(ctx.channel)\n if ctx.channel.name.lower() in channels:\n await ctx.send(f\"\"\"guild: {ctx.guild.name}\"\"\")", "async def musicbot(self, ctx, the_state):\r\n is_mod = False\r\n for role in ctx.message.author.roles:\r\n if role.name == \"Moderators\":\r\n is_mod = True\r\n if is_mod:\r\n if the_state == \"1\":\r\n self.music_off = False\r\n await ctx.send(\"Music Bot features now on\")\r\n else:\r\n self.music_off = True\r\n await ctx.send(\"Music Bot features now off\")\r\n else:\r\n await ctx.send(\"**Error:** You are not allowed to use this command!\")", "async def _guilds(self, ctx):\n all_guilds = sorted(list(ctx.bot.guilds), key=lambda s: s.name.lower())\n msg = \"Currently in these servers.\\n\"\n responses = []\n async with ctx.typing():\n for i, guild in enumerate(all_guilds, 1):\n msg += f\"{i}: ``{guild.name}`` ({guild.id})\\n\"\n responses.append(str(i))\n query = await ctx.send(\"To leave a server, just type its number.\")\n await ctx.send(msg)\n\n def pred(m):\n return True if m.author.id == ctx.message.author.id and m.content in responses else False\n\n try:\n msg = await ctx.bot.wait_for(\"message\", check=pred, timeout=15)\n if guild.owner.id == ctx.bot.user.id:\n return await ctx.send(\"I cannot leave a guild I am the owner of.\")\n except asyncio.TimeoutError:\n await query.delete()\n return await ctx.send(\"message timed out.\")\n\n guild_leave = await ctx.send(f\"Are you sure you want me to leave {guild.name}? (yes/no)\")\n def pred2(m):\n return True if m.author == ctx.author and m.content == \"yes\" else False\n try:\n pred = await self.bot.wait_for(\"message\", check=pred2, timeout=15)\n if pred.result is True:\n await guild.leave()\n if guild != ctx.guild:\n await ctx.send(\"Done.\")\n else:\n await ctx.send(\"Alright then.\")\n except asyncio.TimeoutError:\n await guild_leave.delete()\n await query.delete()\n await ctx.send(\"Response timed out.\")", "async def otherdiscords(self, ctx: commands.Context, team: HockeyTeams) -> None:\n if team is None:\n return await ctx.send(_(\"You must provide a valid current team.\"))\n if team not in [\"all\"]:\n await ctx.send(TEAMS[team][\"invite\"])\n else:\n if not ctx.channel.permissions_for(ctx.message.author).manage_messages:\n # Don't need everyone spamming this command\n return\n atlantic = [team for team in TEAMS if TEAMS[team][\"division\"] == \"Atlantic\"]\n metropolitan = [team for team in TEAMS if TEAMS[team][\"division\"] == \"Metropolitan\"]\n central = [team for team in TEAMS if TEAMS[team][\"division\"] == \"Central\"]\n pacific = [team for team in TEAMS if TEAMS[team][\"division\"] == \"Pacific\"]\n team_list = {\n \"Atlantic\": atlantic,\n \"Metropolitan\": metropolitan,\n \"Central\": central,\n \"Pacific\": pacific,\n }\n msg1 = _(\n \"__**Hockey Discord Master List**__\\n```fix\\n\"\n \"- Do not join other discords to troll.\\n- \"\n \"Respect their rules & their members \"\n \"(Yes even the leafs & habs unfortunately).\\n- \"\n \"We don't control the servers below. \"\n \"If you get banned we can not get you unbanned.\\n- \"\n \"Don't be an asshole because then we all look like assholes. \"\n \"They won't see it as one asshole \"\n \"fan they will see it as a toxic fanbase.\\n- \"\n \"Salt levels may vary. Your team is the best \"\n \"here but don't go on another discord and preach \"\n \"it to an angry mob after we just won.\\n- \"\n \"Not following the above rules will result in \"\n \"appropriate punishments ranging from a warning \"\n \"to a ban. ```\\n\\nhttps://discord.gg/reddithockey\"\n )\n eastern_conference = \"https://i.imgur.com/CtXvcCs.png\"\n western_conference = \"https://i.imgur.com/UFYJTDF.png\"\n async with self.session.get(eastern_conference) as resp:\n data = await resp.read()\n logo = BytesIO()\n logo.write(data)\n logo.seek(0)\n image = discord.File(logo, filename=\"eastern_logo.png\")\n await ctx.send(msg1, file=image)\n for division in team_list:\n if division == \"Central\":\n async with self.session.get(western_conference) as resp:\n data = await resp.read()\n logo = BytesIO()\n logo.write(data)\n logo.seek(0)\n image = discord.File(logo, filename=\"western_logo.png\")\n await ctx.send(file=image)\n div_emoji = \"<:\" + TEAMS[\"Team {}\".format(division)][\"emoji\"] + \">\"\n msg = \"{0} __**{1} DIVISION**__ {0}\".format(div_emoji, division.upper())\n await ctx.send(msg)\n for team in team_list[division]:\n team_emoji = \"<:\" + TEAMS[team][\"emoji\"] + \">\"\n team_link = TEAMS[team][\"invite\"]\n msg = \"{0} {1} {0}\".format(team_emoji, team_link)\n await ctx.send(msg)", "async def iam(self, ctx, *, rank):\n\t\twith open(\"cogs/utils/servers.json\") as f:\n\t\t\tdata = json.load(f)\n\t\tif rank in data[str(ctx.guild.id)][\"ranks\"]:\n\t\t\trole = discord.utils.find(lambda m: rank.lower() in m.name.lower(), ctx.guild.roles)\n\t\t\tif role in ctx.author.roles:\n\t\t\t\tawait ctx.author.remove_roles(role)\n\t\t\t\tawait ctx.send('You already had that rank, so I went ahead and removed it. Just repeat the command to get it back at anytime.')\n\t\t\telse:\n\t\t\t\tawait ctx.author.add_roles(role)\n\t\t\t\tawait ctx.send(f\"I gave you the {rank} rank\")\n\t\telse:\n\t\t\tawait ctx.send(\"That rank does not exist\")", "async def is_guild_shortcut_name(argument, context, verbose=True):\n return await is_shortcut_name(argument, context, \"guild\", verbose)", "async def is_bear(ctx):\n return ctx.message.author.id == 353730886577160203 or ctx.message.author.id == 715048392408956950", "async def team_unignore(self, ctx: commands.Context):\n await self.config.user(ctx.author).do_not_message.set(False)\n await ctx.send('Okay, I\\'ll include you back in team-wide DMs.')", "async def cog_check(self, ctx: Context) -> bool: # type: ignore[override]\n\n return ctx.guild is not None", "async def botserver(self, ctx):\n if isinstance(ctx.channel, discord.DMChannel) or ctx.guild.id != 749595288280498188:\n return await ctx.send(f\"**Here you go {ctx.author.name} 🍻\\n<{self.config.botserver}>**\")\n\n await ctx.send(f\"**{ctx.author.name}** this is my home you know :3\")", "async def is_not_guild_shortcut_name(argument, context, verbose=True):\n return await is_not_shortcut_name(argument, context, \"guild\", verbose)", "async def auto(self, ctx):\n if ctx.message.author.top_role.name.lower() == 'officer':\n await ctx.message.channel.send(\n 'Still working on integration with the election results. Maybe have a command to link to an elections '\n 'database?')\n else:\n await ctx.message.channel.send('Hey! You do not have permission to do that.')", "async def is_launcher(ctx):\n member = ctx.message.author\n staff = await is_staff(ctx)\n lhRole = discord.utils.get(member.guild.roles, name=ROLE_LH)\n if staff or lhRole in member.roles: return True", "def setup_not_complete():\n\n async def predicate(ctx:vbu.Context):\n if await fetch_guild_settings(ctx):\n raise CheckFailure('Your server has already been set up.')\n return True\n return commands.check(predicate)", "def is_bot(self) -> bool:", "async def team_whoami(self, ctx: commands.Context):\n team_id = await self.config.user(ctx.author).team_id()\n\n if team_id == -1:\n await ctx.send('You are not affiliated with any team.')\n return\n if team_id not in self.teams:\n await ctx.send('You are affiliated with a team whose ID I do not'\n f'recognize. ({team_id})')\n return\n await ctx.send('You are affiliated with Team **%s**.' % (\n self.teams[team_id].display_name),)", "async def on_guild_unavailable(self, guild: discord.Guild):\n if guild.id != RushGuild.id:\n return\n\n self._guild_available.clear()", "async def on_message(message):\n # Check the message has been sent by a user, to stop the bot replying to itself\n if message.author == client.user:\n return\n if message.content.startswith('!assign'):\n # Use a regex function to search for an email address in the message\n try:\n email_address = re.search(r'[\\w\\.-]+@[\\w\\.-]+', message.content).group()\n except AttributeError:\n email_address = None\n\n if email_address:\n # Find the premium role\n premium_role = [role for role in client.guilds[0].roles if role.name.lower() == 'premium'][0]\n user_roles = message.author.roles\n # Check whether or not the user has the premium role\n if premium_role in user_roles:\n # Alert the user that they are already premium\n await message.channel.send(f\"{message.author.mention} You are already a premium member!\")\n else:\n # Add the email and username to the spreadsheet\n sheet_response = write_to_sheet(email_address, message.author._user.name)\n if sheet_response == \"Email and/or username already registered!\":\n await message.channel.send(f\"{message.author.mention} {sheet_response}\")\n else:\n # Upgrade the user role to premium\n await message.author.add_roles(premium_role)\n await message.channel.send(f\"{message.author.mention} {sheet_response}Welcome to premium!\")\n else:\n # Mention the user and prompt them to try again with a valid email\n no_email_msg = \"No email found! Please try again with a valid email.\"\n await message.channel.send(\n f\"{message.author.mention} {no_email_msg}\"\n )\n # Delete the user's initial message\n await message.delete()\n elif message.content.startswith('!stocks'):\n if message.author.guild_permissions.administrator is True:\n # Retrieve the current stocks\n trending = check_stocks(stocks_list)\n stocks_response = ''.join([\n f\"__**{key}**__:\\n{', '.join(item)}\\n\\n\" if item else f\"__**{key}**__:\\n{None}\\n\\n\" for key, item in trending.items()\n ])\n await message.channel.send(stocks_response)\n else:\n await message.channel.send(f'Sorry {message.author.mention}, only the server admin can call that function!')\n # Delete the user's initial message\n await message.delete()", "async def team_ignore(self, ctx: commands.Context):\n await self.config.user(ctx.author).do_not_message.set(True)\n await ctx.send('Okay, I won\\'t DM about this anymore.')", "async def leaveserver(self, ctx, guild: int):\n guild = self.bot.get_guild(guild)\n await guild.leave()\n embed = discord.Embed(title=f\"left {guild.name} owned by: {guild.owner.name}\")\n embed.set_author(name=ctx.author.nick if ctx.author.nick else ctx.author.name, icon_url=ctx.author.avatar_url)\n await ctx.message.delete()\n await ctx.send(embed=embed)", "def fountain_on_location(game, loc):\n my_fountains = game.get_my_mana_fountains()\n for fountain in my_fountains:\n if fountain.location.equals(loc):\n return True\n return False", "async def guild_infected(self, ctx, *, guild: discord.Guild = None):\n if not guild:\n guild = ctx.guild\n user_list = await self.config.all_users()\n infected_list = []\n for user, data in user_list.items():\n user = guild.get_member(user)\n if user:\n userState = data[\"gameState\"]\n if userState == \"infected\":\n infected_list.append(f\"{user.mention} - {user}\")\n if infected_list:\n infected_list = \"\\n\".join(infected_list)\n color = await ctx.embed_color()\n if len(infected_list) > 2000:\n embeds = []\n infected_pages = list(pagify(infected_list))\n for index, page in enumerate(infected_pages, start=1):\n embed = discord.Embed(color=color, title=\"Infected Members\", description=page)\n embed.set_footer(text=f\"{index}/{len(infected_pages)}\")\n embeds.append(embed)\n await menu(ctx, embeds, DEFAULT_CONTROLS)\n else:\n await ctx.send(\n embed=discord.Embed(\n color=color,\n title=\"Infected Members\",\n description=infected_list,\n )\n )\n else:\n await ctx.send(\"No one has been infected yet..\")", "async def on_ready():\r\n for each_guild in client.guilds:\r\n if each_guild.name == PRIMARY_GUILD_NAME:\r\n print(\"Locked In 😎\\n\") # we are where we want to be\r\n elif each_guild.name == TESTING_GUILD_NAME:\r\n print(f\"{client.user} is connected to {each_guild.name}, which is recognized as a Testing \"\r\n f\"Guild\\n\")\r\n else:\r\n print(\"Name's didn't match 🤔\")\r\n print(f'{client.user} has successfully connected to {each_guild.name}! 😁\\n')\r\n await client.change_presence(activity=discord.Game('RDO - Wagon Stealing')) # sets the bots Activity\r", "async def optout(self, ctx):\n optout.insert_one({\"_id\": ctx.author.id})\n await ctx.send(f\"You have **opted out** of A Sound Mood. To join the program again, use ?optin.\")" ]
[ "0.59104276", "0.5812901", "0.5811559", "0.57705164", "0.568164", "0.56639165", "0.5625076", "0.55851394", "0.5549394", "0.5538837", "0.54885685", "0.5444899", "0.54436094", "0.5433115", "0.5410844", "0.54084325", "0.53876704", "0.5387098", "0.53671765", "0.5336776", "0.5330393", "0.5330348", "0.53201145", "0.53107643", "0.5285688", "0.52633554", "0.52501345", "0.52426183", "0.52269065", "0.52256864" ]
0.7293132
0
Gets an XKCD comic with the given number, or the current one if one isn't specified, and displays it.
async def xkcd(self, ctx, comic: int = 0): if comic < 0: await ctx.send("Requested XKCD can't be negative") return data = await self.bot.session.get_xkcd(comic or None) if data is None: await ctx.send(f"No comic for ID `{comic}` found") return title = data["title"] img = data["img"] alt = data["alt"] if self.bot.should_embed(ctx): with dutils.PaginatedEmbed() as embed: embed.title = title embed.set_image(url=img) embed.set_footer(text=alt) embed.timestamp = dt.datetime(year=int(data["year"]), month=int(data["month"]), day=int(data["day"])) await ctx.send(embed=embed) else: img_data = discord.File(data["img_data"], filename=data["filename"]) await ctx.send("**" + title + "**\n" + alt, file=img_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_xkcd(self, ctx, number = \"random\"):\n if not self.module_check(ctx): return\n if number == \"latest\":\n r = requests.get('https://xkcd.com/info.0.json')\n elif number == \"random\":\n r = requests.get('https://xkcd.com/info.0.json')\n r = json.loads(r.text)\n random_xkcd = random.randint(0, r['num'])\n r = requests.get('http://xkcd.com/{}/info.0.json'.format(random_xkcd))\n else:\n r = requests.get('http://xkcd.com/{}/info.0.json'.format(number))\n if r.status_code == 404:\n await self.bot.say(\"Invalid xkcd number.\")\n return\n r = json.loads(r.text)\n embed = discord.Embed(title=r['safe_title'], description=r['alt'], url=\"https://xkcd.com/{}\".format(r['num']))\n embed.set_image(url=r['img'])\n await self.bot.say(embed=embed)", "async def xkcd(self, ctx, num:int = -1):\r\n if (num < 0):\r\n webpage = \"https://c.xkcd.com/random/comic/\"\r\n else:\r\n try:\r\n webpage = \"https://xkcd.com/\" + str(num)\r\n\r\n r = requests.get(webpage).text\r\n soup = BeautifulSoup(r, 'lxml')\r\n title = soup.find_all(\"div\", {\"id\": \"ctitle\"})[0]\r\n comic = soup.find_all(\"div\", {\"id\": \"comic\"})[0]\r\n img = comic.find_all(\"img\")[0]\r\n await ctx.send(title.text)\r\n await ctx.send(\"https:\" + img.get('src'))\r\n return\r\n\r\n except IndexError:\r\n webpage = \"https://xkcd.com/\"\r\n\r\n r = requests.get(webpage).text\r\n soup = BeautifulSoup(r)\r\n title = soup.find_all(\"div\", {\"id\": \"ctitle\"})[0]\r\n comic = soup.find_all(\"div\", {\"id\": \"comic\"})[0]\r\n img = comic.find_all(\"img\")[0]\r\n await ctx.send(title.text)\r\n await ctx.send(\"https:\" + img.get('src'))", "async def xkcd(self, ctx, comic_id=None):\n self.logger.info(misolog.format_log(ctx, f\"\"))\n if comic_id is None:\n url = \"https://c.xkcd.com/random/comic/\"\n response = requests.get(url, headers={\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Connection\": \"keep-alive\",\n \"Referer\": \"https://xkcd.com/\",\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0\"})\n location = response.url\n else:\n location = f\"https://xkcd.com/{comic_id}/\"\n await ctx.send(location)", "def get_xkcd() -> str:\n if xkcd == 'true':\n r = requests.get(\"https://xkcd.com/info.0.json\").json()\n img = r['img']\n\n return f'<a href=\"https://xkcd.com/\">\\n <img src=\"{img}\" />\\n</a>'\n\n return \"\"", "def get_xkcd_comic_meta():\n # get random comic if from 1 to last comic on now\n comic_id = random.randint(1, cf.get_last_xkcd_num())\n\n url = f'http://xkcd.com/{comic_id}/info.0.json'\n response = requests.get(url)\n cf.raise_response_errors(response)\n\n comic_meta = response.json()\n\n return comic_meta", "def getComic(handle):\n result = self.db.query('select from comics where handle = %s limit 1', handle)\n if len(result) < 1:\n return None\n else:\n return result[0]", "def disp_found(num):\n from x84.bbs import getterminal, echo\n term = getterminal()\n echo(u''.join((u'\\r',\n term.bold_white(u'%d' % (num,)),\n term.yellow(u' lOCAtiON%s diSCOVEREd ' %\n (u's' if num > 1 else u'')),\n term.bold_black(u'...'),)))", "def show_digit(digit):\n\n # Create a window for the digit. The digit is 14x14, so create a window \n # which is 150x150. We'll leave a border of 5 pixels, and each digit\n # \"pixel\" will be 10x10\n\n master = Tk()\n\n canvas = Canvas(master, width=150, height=150)\n canvas.pack()\n\n # Draw a rectange for each pixel in the digit\n for i in range(14):\n y = 10*i + 5\n for j in range(14):\n x = 10*j + 5\n \n\n # Determine the hex value of this pixel color\n pixel_value = digit[14*i + j]\n pixel_hex = hex(int(pixel_value*255)).replace('0x','')\n pixel_hex = '#' + pixel_hex + pixel_hex + pixel_hex\n \n # Draw the rectangle\n canvas.create_rectangle(x, y, x+10, y+10, fill=pixel_hex)\n\n # Done!\n return canvas", "def circled_number(number, bold_circle=True):\n if number <= 0:\n raise ValueError()\n elif number < 10:\n return chr((0x2775 if bold_circle else 0x245f) + number)\n elif number < 21 and not bold_circle:\n return chr(0x245f + number)\n elif number == 10 and bold_circle:\n return chr(0x277f)\n elif number < 21 and bold_circle:\n return chr(0x24e0 + number)\n elif bold_circle:\n return '[%s]' % (number,) # raise ValueError()\n elif number < 30:\n return chr(0x323c + number)\n elif number == 30:\n return chr(0x325a)\n elif number < 36:\n return chr(0x323c + number)\n elif number < 51:\n return chr(0x328d + number)\n else:\n return '(%s)' % (number,) # raise ValueError()", "async def cmd_xkcd(\n self, args: Args, src: Src, _explain: int = None, _e: int = None, **_\n ):\n ex = _explain if _explain is not None else _e\n if ex is not None:\n return \"This is what XKCD #{0} means:\\n<https://www.explainxkcd.com/wiki/index.php?title={0}>\".format(\n ex\n )\n\n try:\n indexresp = json.loads(\n requests.get(\"http://xkcd.com/info.0.json\").content.decode()\n )\n except requests.exceptions.ConnectionError:\n return \"XKCD did not return a valid response. It may be down.\"\n except ValueError as e:\n return \"XKCD response was missing data. Try again. [{}]\".format(str(e))\n\n if args:\n try:\n target_number = int(args[0])\n\n except ValueError:\n return \"You must enter a **number** for a custom xkcd\"\n else:\n if int(target_number) == 404:\n return \"Don't be that guy\"\n\n else:\n number = indexresp[\"num\"]\n target_number = randint(0, number)\n while target_number == 404:\n target_number = randint(0, number)\n\n try:\n if target_number != 0:\n resp = json.loads(\n requests.get(\n \"http://xkcd.com/{0}/info.0.json\".format(target_number)\n ).content.decode()\n )\n else:\n resp = json.loads(\n requests.get(\"http://xkcd.com/info.0.json\").content.decode()\n )\n\n except requests.exceptions.ConnectionError:\n return \"XKCD did not return a valid response. It may be down.\"\n except ValueError as e:\n return \"XKCD response was missing data. Try again. [{}]\".format(str(e))\n\n embed = (\n discord.Embed(color=0x96A8C8)\n .set_image(url=resp[\"img\"])\n .set_author(\n name=\"XKCD #{}: {}\".format(resp[\"num\"], resp[\"safe_title\"]),\n url=\"https://www.xkcd.com/{}\".format(resp[\"num\"]),\n icon_url=\"https://is1-ssl.mzstatic.com/image/thumb/Purple128/v4/e0/a4/67/e0a467b3-dedf-cc50-aeeb-2efd42bb0386/source/512x512bb.jpg\",\n )\n .set_footer(text=resp[\"alt\"])\n )\n\n await self.client.embed(src.channel, embed)", "async def xkcd(self, ctx: Context) -> None:\n try:\n result: dict = await self.xkcd_svc.get_xkcd_comic(ctx)\n embed = self.xkcd_svc.create_embed_comic(result)\n\n await ctx.send(embed=embed)\n except Exception as ex:\n print(ex)", "def get_info():\r\n app = application.Application()\r\n\r\n app.start(r\"C:\\\\AL50022\\\\Circ\\\\bin\\\\Circ.exe\")\r\n\r\n app.Circ.menu_select(\"View\")", "def Cnum(self, default=None):\n return self.data.get('cnum', default)", "def Cnum(self, default=None):\n return self.data.get('cnum', default)", "def show_bingo_number_on_display(number):\n global sending, selected_mode\n if sending:\n return\n if not selected_mode == Mode.BINGO:\n sending = True\n MicroController.get_instance().send_one_byte(254)\n time.sleep(0.01)\n sending = False\n selected_mode = Mode.BINGO\n MicroController.get_instance().send_one_byte(number)", "def cget(self, key):\n return self._widget_cget(key, cook=False)", "def display_c(c, font, screen, lcd, size=5, x=0, y=0):\n char = font[str(c)]\n width, height = char.size\n \"\"\"\n if not(size == 10):\n size /= 10.0\n width = int(round(size*width))\n height = int(round(size*height))\n char.resize((width,height))\n \"\"\"\n size = int(round(size * 10))\n images.display_img(char,screen,lcd,size,x,y)", "def xkcd():", "def comics_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=COMIC_TYPE_URI,\n rdf_type_name=COMIC_TYPE_NAME, \n kls=Comic)", "def getCurrencySymbol(id=None):", "def __getChemDrawCmd(self):\n return self.__getCmd(\"JUMBO_HOME\", \"cdx2cml\", \"ChemDraw to CML\")", "def draw_number(x):\n \"\"\" GRAPSTE TON KWDIKA SAS APO KATW \"\"\"\n if(x>0):\n while(x>0):\n print(\"+\",end='')\n x = x-1\n print(\"\",end=\"\\n\")\n if(x<0):\n while(x<0):\n print(\"-\",end='')\n x = x+1\n print(\"\",end=\"\\n\")", "def display_number_with_default(self):\r\n if self.display_coursenumber:\r\n return self.display_coursenumber\r\n\r\n return self.number", "def select_dispenser(id=1, timeout=default_timeout):\n return click_key(controls['Fuel']['prepay_dispenser_by_id'] % id, timeout=timeout)", "def display_number(com,count):\n print \"NUM: \", count\n try:\n if count > 999:\n count = 999\n safenum=str(int(count))\n #com = serial.Serial(config.devnum, 9600, timeout=3)\n #com.close()\n #com.open()\n comstr = config.num['display']+safenum+config.num['eot']\n com.write(comstr)\n #com.close()\n except serial.SerialException as e:\n logging.warning(\"Serial exception: \"+str(e))", "def getIcon(self, bVal):\n\t\tif bVal:\n\t\t\treturn u\"%c\" %(CyGame().getSymbolID(FontSymbols.POWER_CHAR) + 14)\n\t\telse:\n\t\t\treturn u\"%c\" %(CyGame().getSymbolID(FontSymbols.POWER_CHAR) + 15)", "def card(n):\r\n assert type(n) == int and n > 0 and n <= 13, \"Bad card n\"\r\n specials = {1: 'A', 11: 'J', 12: 'Q', 13: 'K'}\r\n return specials.get(n, str(n))", "def get_k(self):\n kidx = self.kComboBox.currentIndex()\n self.k = int(self.klist[kidx])", "def print_number():\n\tfavorite_number = fetch_number()\n\tprint(f\"Your favorite number is {favorite_number}!\")", "def cget2(self, key, **kw):\n kw.update(cook=True)\n return self._widget_cget(key, **kw)" ]
[ "0.68028086", "0.61957276", "0.59368753", "0.5861915", "0.56918675", "0.55075777", "0.53484565", "0.5283768", "0.52726424", "0.51808345", "0.51591676", "0.50742537", "0.50456804", "0.50456804", "0.5033829", "0.5017756", "0.5013748", "0.50132114", "0.49857825", "0.49768993", "0.49649996", "0.49292478", "0.48531255", "0.48347273", "0.48082", "0.48050892", "0.4747858", "0.47475287", "0.47245908", "0.47134033" ]
0.64772284
1
Sets up the JokeCommands extension. Adds the JokeCommands cog to the bot
def setup(bot): bot.add_cog(JokeCommands(bot))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(bot):\n new_cog = Commands(bot)\n bot.add_cog(new_cog)", "def setup(bot):\n bot.logger.debug(\n 'Registering extension \"Quiz\"'\n )\n bot.add_cog(QuizCog(bot))", "def setup(bot):\n bot.add_cog(AdminCommands(bot))", "def setup(bot):\n @bot.event\n async def on_command_error(ctx, error):\n if isinstance(error, TimeoutError):\n return\n print(error)\n\n @bot.event\n async def on_ready():\n print('Logged in as:\\n{0} (ID: {0.id})'.format(bot.user))\n bot.load_extension('cogs.music')\n bot.load_extension('cogs.utility')\n bot.load_extension('cogs.general')\n bot.load_extension('cogs.fun')\n await bot.change_presence(activity=discord.Game(Bot.PREFIX+\"help\"))", "def setup(bot: KingArthur) -> None:\n bot.add_cog(Ed(bot))", "def setup(bot):\n bot.add_cog(MyAnimeList())", "async def setup(bot):\n await bot.add_cog(WordleCmd(bot))", "def setup(bot):\n bot.add_cog(DBCog(bot))", "def setup(bot):\n bot.add_cog(LookAtMe())", "def setup(bot: commands.Bot) -> None:\n bot.add_cog(AdventOfCode(bot))", "def setup(bot: commands.Bot) -> None:\n bot.add_cog(DragNames(bot))", "def setup(bot):\n bot.add_cog(Help(bot))", "async def setup(bot):\n await bot.add_cog(People(bot))", "async def setup(bot: Bot) -> None:\n await bot.add_cog(Utils(bot))", "def setup(bot):\n bot.add_cog(Queue(bot))", "def setup(bot: util.CustomBot):\r\n bot.add_cog(Info(bot))", "def setup(bot: Bot) -> None:\n bot.add_cog(Help(bot))", "def setup(bot: Red):\n bot.add_cog(Welcome(bot))", "def setup(bot):\n bot.add_cog(Info(bot))", "def setup(bot: Bot) -> None:\n bot.add_cog(Armory(bot))", "def setup_commands(bot):\n # Reset the bot's command setup\n bot.reset_commands()\n # Load enabled mods\n for mod in bot.enabled_mods:\n try:\n full = 'mod_%s' % mod\n m = getattr(__import__('mods.%s' % full), full)\n except Exception:\n bot.log(ERROR, 'Importing the %s mod failed!' % mod)\n sys.excepthook(*sys.exc_info())\n continue\n\n try:\n bot.installed_mods[mod] = m\n # Check for a 404 handler, and replace the current one if there is\n p404 = getattr(m, 'handle_404', None)\n if p404:\n bot.cb_404 = p404\n\n # Check for a setup function, and run it if there is\n setup = getattr(m, 'setup', None)\n if setup:\n setup(bot)\n\n # Required command bank\n for cmd in m.command_bank:\n # Get the actual function\n func = getattr(m, cmd)\n # Get the args for the command\n data = m.command_bank[cmd]\n # If data[0] is true, mod_help will recognize this command\n if data[0]:\n bot.help_db[data[1]] = parse_help(func)\n # Get the main name and aliases inserted\n for alias in data[1:]:\n bot.command_db[alias] = func\n\n # Helper function for optional nameless multiples\n def add_optional(olist, name):\n olist.extend(getattr(m, f) for f in getattr(m, name, ()))\n\n # Optional filters are loaded and added to the list\n add_optional(bot.filters, 'filters')\n\n # Ditto for time-cycle callbacks\n add_optional(bot.periodic_cbs, 'periodic')\n\n # Handlers are the same, but structured as a dict with\n # \"type\": \"single function-name\" items\n handlers = getattr(m, 'handlers', None)\n if handlers:\n for cbtype in handlers:\n bot.handlers[cbtype].append(getattr(m, handlers[cbtype]))\n\n # Register any requirements\n # NOTE: By putting this at the end, we avoid the possibility of\n # getting fake requires.\n reqs = getattr(m, 'requires', None)\n if reqs:\n bot.required_mods.update(reqs)\n except Exception:\n bot.log(ERROR, 'Unable to install the %s mod!' % mod)\n del bot.installed_mods[mod]\n sys.excepthook(*sys.exc_info())\n\n missing = bot.required_mods - set(bot.installed_mods)\n if missing:\n raise MissingRequirementsError(missing)\n\n # And now for the post-install triggers.\n for mod, m in bot.installed_mods.items():\n post = getattr(m, 'post_prepare', None)\n if post:\n try:\n post(bot)\n except Exception:\n bot.log(ERROR, 'Unable to post-prepare the %s mod!' % mod)\n sys.excepthook(*sys.exc_info())", "def setup(bot):\n bot.add_cog(ReactionRoles(bot))", "async def adding_command_list(self):\n command_aliases=['anime','fun','mod','nekogif'] #This includes the aliases and the cog names\n #NOTE: fun command added\n for i in self.bot.commands:\n self.commands.append(i.name)\n \n for i in command_aliases:\n self.commands.append(i)", "def setup(bot):\n bot.add_cog(Session(bot))", "async def hockey_commands(self, ctx: commands.Context) -> None:\n pass", "def setup(bot):\n bot.add_cog(TruthOrDareCmd(bot))", "def setup(bot: commands.Bot) -> None:\n bot.add_cog(CommandErrorHandler(bot))", "def setup(bot):\n bot.add_cog(Miniscape(bot))", "def setup(bot):\n bot.add_cog(EmailAddressCRUD(bot))", "async def extensions(ctx):\n if ctx.invoked_subcommand is None:\n embed = Embed(\n title=\"Extensions\",\n description=\"The following extensions are loaded:\",\n colour=bot.colors['default']\n )\n for k, v in bot.cogs.items():\n embed.add_field(\n name=k,\n value=v.description,\n inline=False)\n await ctx.channel.send(embed=embed)" ]
[ "0.69343936", "0.68088084", "0.66989225", "0.65883654", "0.65673536", "0.65430915", "0.65312517", "0.64955974", "0.64893925", "0.6323563", "0.63162094", "0.6146145", "0.6126287", "0.6121231", "0.6090291", "0.60819864", "0.60638237", "0.60156745", "0.5999061", "0.59527", "0.59180844", "0.59154725", "0.5913398", "0.59120077", "0.5786205", "0.57792544", "0.568034", "0.5658734", "0.5649435", "0.56187165" ]
0.7969974
0
A method to get interpolated weight spectrumn for a given spw and row id Should be implemented in child class
def _get_interpolated_wtsp(self, *args, **kwargs): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_weight_row(self, i):\n return self.weights[i]", "def get_spec_weight(self, i, j):\n return self.weights[i][j]", "def update_weight(wij, yj, tj, xi, lr = 0.25):\n\n new_wij = wij - lr * ((yj - tj) * xi)\n new_wij = round(new_wij, 3)\n #print(\"\\t\", wij, \"-\", lr, \"* (\", yj, \"-\", tj, \") *\", xi, \"=\", new_wij)\n\n return new_wij", "def GetParameters_and_Weight_of_CalSensor(ind, similar_sensors): \n v, a, h = similar_sensors.loc[ind]['Vert_Shift'], similar_sensors.loc[ind]['Amplitude'], similar_sensors.loc[ind]['Horiz_Shift']\n por, res, drain = similar_sensors.loc[ind]['Porosity'], similar_sensors.loc[ind]['Res_SM'], similar_sensors.loc[ind]['Drainage']\n n, w = similar_sensors.loc[ind]['n'], similar_sensors.loc[ind]['Weight']\n return v,a,h,por,res,drain,n,w", "def get_metric(self, data_row: pd.Series) -> float:", "def getWeightValue(self, index):\r\n\t\treturn None", "def FIGrowth1(self, p, x, y=None, C=None, sumsq=True, weights=None):\n Fzero, Ibreak, F1amp, F2amp, Irate = p\n yd = numpy.zeros(x.shape)\n m1 = (x < Ibreak)\n m2 = (x >= Ibreak)\n yd[m1] = Fzero + x[m1] * F1amp / Ibreak\n maxyd = numpy.max(yd)\n yd[m2] = F2amp * (1.0 - numpy.exp(- (x[m2] - Ibreak) * Irate)) + maxyd\n if y is None:\n return yd\n else:\n dy = y - yd\n w = numpy.ones(len(x))\n# xp = numpy.argwhere(x>0)\n# w[xp] = w[xp] + 3.*x[xp]/numpy.max(x)\n if sumsq is True:\n ss = numpy.sqrt(numpy.sum((w * dy) ** 2.0))\n return ss\n else:\n return w * dy", "def weight_to_line(w):\n b = -(w[0] / w[2])\n m = -(w[1] / w[2])\n \n return b, m", "def get(self, index):\n assert isinstance(index, np.ndarray)\n return self.weight[index]", "def getByWeight(list, w):\n itemId = 0\n partialWeight = list[0][1]\n while partialWeight < w:\n itemId += 1\n partialWeight += list[itemId][1]\n return list[itemId]", "def get_hardwired_speed_weights(self):\n \n phase_shift=self.speed_phase_shift\n \n # row 1 has the weights of speed cells to grid cell 1\n self.W_speed_east=np.zeros_like(self.W_ee) \n self.W_speed_west=np.zeros_like(self.W_ee) \n self.W_speed_north=np.zeros_like(self.W_ee) \n self.W_speed_south=np.zeros_like(self.W_ee) \n\n if self.use_eight_directions is True:\n self.W_speed_north_east=np.zeros_like(self.W_ee) \n self.W_speed_north_west=np.zeros_like(self.W_ee) \n self.W_speed_south_east=np.zeros_like(self.W_ee) \n self.W_speed_south_west=np.zeros_like(self.W_ee) \n\n\n for phase_idx,phase in enumerate(self.gp.phases):\n shifted_north_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/2.),self.gp.phases)\n shifted_south_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/2.),self.gp.phases)\n shifted_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(0),self.gp.phases)\n shifted_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi),self.gp.phases)\n\n self.W_speed_north[phase_idx,:]=self.W_ee[shifted_north_phase_idx,:]\n self.W_speed_south[phase_idx,:]=self.W_ee[shifted_south_phase_idx,:]\n self.W_speed_east[phase_idx,:]=self.W_ee[shifted_east_phase_idx,:]\n self.W_speed_west[phase_idx,:]=self.W_ee[shifted_west_phase_idx,:] \n \n if self.use_eight_directions is True:\n shifted_north_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/4),self.gp.phases)\n shifted_north_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi*3/4),self.gp.phases)\n shifted_south_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/4),self.gp.phases)\n shifted_south_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi*3/4),self.gp.phases)\n \n self.W_speed_north_east[phase_idx,:]=self.W_ee[shifted_north_east_phase_idx,:]\n self.W_speed_north_west[phase_idx,:]=self.W_ee[shifted_north_west_phase_idx,:]\n self.W_speed_south_east[phase_idx,:]=self.W_ee[shifted_south_east_phase_idx,:]\n self.W_speed_south_west[phase_idx,:]=self.W_ee[shifted_south_west_phase_idx,:]", "def __getitem__( self, line ):\n\n # check if the line is a string and if yes convert it to an index\n if isinstance( line, str ):\n line = find_line( self._line_info, line )\n if line < 0:\n raise ValueError(\"The desired spectral window could not be found!\")\n \n return self._sji_data[line]", "def return_weight(self, startVertex: np.int, endVertex:np.int):\n return self.__mat[startVertex][endVertex]", "def WLS(store):\n calcweighted(store)\n store['regsampler'].update_yvec(store['yvectil'])\n store['regsampler'].update_xmat(store['xmattil'])\n return store['regsampler'].sample()", "def tablebroad(w, s, xip, yip):\n \"\"\"\n History\n -------\n 22-May-92 JAV\n Switched instrumental profile from multiple gaussians\n to gaussian with power-law wings.\n 04-Aug-92 JAV\n Renamed from ipsmo.pro# changed f/ procedure to function.\n Switched f/ 10 to 15 Hamilton pixels in each wing.\n 20-Oct-92 JAV\n Switched from gpfunc to ipfun (4 to 5 par).\n 23-Aug-94 JAV\n Switched to explicitly passed IPs.\n Oct-18 AW\n Python version\n \"\"\"\n\n # Define sizes\n dsdh = np.abs(np.min(np.diff(xip)))\n nip = 2 * int(15 / dsdh) + 1 ## profile points\n\n # Generate instrumental profile on model pixel scale.\n x = (\n np.arange(nip, dtype=float) - (nip - 1) / 2\n ) * dsdh # offset in Hamilton pixels\n ip = interp1d(xip, yip, kind=\"cubic\")(x)\n # ip = bezier_interp(xip, yip, x) # spline onto new scale\n ip = ip[::-1] # reverse for convolution\n ip = ip / np.sum(ip) # ensure unit area\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, ip, mode=\"nearest\")\n\n return sout # return convolved spectrum", "def get_param_sample_weight(self, name):\n if name == 'negbin_r_0':\n weights = np.asarray(self.model.p_outlier_total[:, 0])\n elif name == 'negbin_r_1':\n weights = np.asarray(self.model.p_outlier_total[:, 1])\n elif name == 'betabin_M_0':\n weights = np.asarray(self.model.p_outlier_allele[:, 0])\n elif name == 'betabin_M_1':\n weights = np.asarray(self.model.p_outlier_allele[:, 1])\n elif name == 'negbin_hdel_mu':\n weights = self._get_hdel_weights()\n elif name == 'negbin_hdel_r_0':\n weights = self._get_hdel_weights() * np.asarray(self.model.p_outlier_total[:, 0])\n elif name == 'negbin_hdel_r_1':\n weights = self._get_hdel_weights() * np.asarray(self.model.p_outlier_total[:, 1])\n elif name == 'betabin_loh_p':\n weights = self._get_loh_weights()\n elif name == 'betabin_loh_M_0':\n weights = self._get_loh_weights() * np.asarray(self.model.p_outlier_allele[:, 0])\n elif name == 'betabin_loh_M_1':\n weights = self._get_loh_weights() * np.asarray(self.model.p_outlier_allele[:, 1])\n norm = weights.sum()\n if norm > 0.:\n return weights / norm\n else:\n print ('nothing for ' + name)\n return None", "def rms_smooth(self, i, sp_mw):\n mw = int(sp_mw*self.s_freq) # convert moving window size from seconds to samples\n \n # convolve for rolling RMS\n datsq = np.power(self.spfiltEEG[i], 2)\n window = np.ones(mw)/float(mw)\n # convolution mode 'valid' will remove edge effects, but also introduce a time shift\n # and downstream erors because it changes the length of the rms data\n rms = np.sqrt(np.convolve(datsq, window, 'same')) \n #spinfilt_RMS = pd.DataFrame(rms, index=self.data.index) --> add this back for > speed\n self.spRMS[i] = rms # for > speed, don't store spinfilt_RMS[i] as an attribute\n \n # smooth with moving average\n rms_avg = self.spRMS[i].rolling(mw, center=True).mean()\n self.spRMSmavg[i] = rms_avg", "def _get_weight(self, weight):\n if isinstance(weight, pd.DataFrame):\n pass\n elif isinstance(weight, str):\n if weight.endswith(\"gctx\"):\n from cmapPy.pandasGEXpress.parse import parse\n weight = parse(weight).data_df\n elif weight.endswith(\"csv\"):\n weight = pd.read_csv(weight)\n else:\n weight = pd.read_table(weight)\n else:\n raise Exception(\"Unsupported file format\")\n\n assert weight.shape[1] == 979 # first column is offset\n\n return weight", "def get_wavelet_radiant(row, wavelet_df):\n\twavelet_row = wavelet_df[wavelet_df.solar == row['solar']//1]\n\tif len(wavelet_row.index):\n\t\trow['radiant_ll0'] = wavelet_row.ll0.values[0]\n\t\trow['radiant_beta'] = wavelet_row.beta.values[0]\n\t\treturn row\n\telse:\n\t\tpeak_row = wavelet_df.iloc[wavelet_df.xsig.idxmax()]\n\t\trow['radiant_ll0'] = peak_row.ll0\n\t\trow['radiant_beta'] = peak_row.beta\n\t\treturn row", "def _update_w(self, idx):\n self.w = ((self._w - 0.4) * (self._generations - idx)) /\\\n (self._generations + 0.4)", "def create_spectral_bandpass_interpol(interpol_wavelen, interpol_rad, center_wvl,\n save_dir):\n\n save_dir = os.path.join(save_dir, r'look_up_table')\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n\n center_wvl1 = np.arange(min(center_wvl), max(center_wvl), 2)\n\n\n\n\n for j in np.arange(0, interpol_wavelen.shape[1]):\n #print(j)\n dframe = pd.DataFrame()\n wavelen = interpol_wavelen[:, j]\n\n radiance = interpol_rad[:, j]\n sampled_wvl = np.arange(min(wavelen), max(wavelen), 0.01)\n fit_params = interp1d(wavelen, radiance, kind='slinear')\n fitted_val = fit_params(sampled_wvl)\n #peak_val = np.where(fitted_val==max(fitted_val))[0]\n #print(peak_val)\n #peak_shift = sampled_wvl[peak_val] - CW1[j]\n\n\n# if peak_shift >0:\n# sampled_wvl = sampled_wvl - peak_shift\n# elif peak_shift <0:\n# sampled_wvl = sampled_wvl + peak_shift\n# else:\n# sampled_wvl = sampled_wvl\n#\n# print(sampled_wvl[peak_val] - CW1[j])\n\n dframe['Wavelength'] = sampled_wvl\n dframe['Radiance'] = fitted_val\n dframe.round(4).to_csv(save_dir + '/' + 'bandpass_' + \\\n str(round(center_wvl1[j], 2))+'_nm.csv')\n plt.plot(sampled_wvl, fitted_val/np.max(fitted_val), 'g.--')\n plt.grid(True, linestyle=':')\n plt.xlabel('Wavelength (nm)')\n plt.ylabel('Normalized Spectral Response')\n plt.title('TEMPO Spectral Bandpass (WL = ' + str(round(center_wvl1[j], 2)) + ' nm)')\n plt.ylim(0, 1.1)\n plt.xlim(min(wavelen), max(wavelen))\n #plt.show()\n\n # Now let us save the spectral bandpass data and spectral bandpass plot\n plt.savefig(save_dir + '/' + 'bandpass_' + str(round(center_wvl1[j], 2))+'_nm.png',\n dpi=100)\n plt.close('all')", "def _get_interpolation(self) :\n \n return self._interpolation", "def interp_weights(xyz, uvw):\r\n\t\r\n\t## Dimension of data\r\n\td = xyz.shape[-1]\r\n\t\r\n\t## Delaunay triangulation of random points\r\n\ttri = sp.spatial.Delaunay(xyz)\r\n\t\r\n\t## Find indices of triangles approximating regular grid\r\n\t## -1 if grid point is outside tesselated region (N)\r\n\tsimplex = tri.find_simplex(uvw)\r\n\t\r\n\t## Grid-triangles' vertex indices (N,3)\r\n\t## tri.simplices returns indices of all triangles in tesselation\r\n\tvertices = np.take(tri.simplices, simplex, axis=0)\r\n\t\r\n\t## The transformed coordinates of grid-triangles\r\n\t## tri.transform.shape = (xyz.shape[0], 3, d)\r\n\t## temp.shape = (uvw.shape[0], 3, d)\r\n\t## The last vertex-coordinate for each row temp[:,-1,:]=(0,0) roughly\r\n\ttemp = np.take(tri.transform, simplex, axis=0)\r\n\r\n\t## Gridpoint (uvw) coordinates caluclated from triangulation-coordinate zeros\r\n\tdelta = uvw - temp[:, d, :]\r\n\t\r\n\t## For each gridpoint in uvw, dot coordinate-offset delta=(dx,dy) with\r\n\t## the coordinate of the corresponding triangle's vertices temp[:d]=((v1x,v1y),(v2x,v2y))\r\n\t## Result is ((v1x*dx+v1y*dy),(v2x*dx+v2y*dy)) for each row (each gridpoint) in uvw\r\n\t## bary.shape = (uvw.shape[0], d)\r\n\tbary = np.einsum('njk,nk->nj', temp[:, :d, :], delta)\r\n\t\r\n\t## Return (i) indices of triangles corresponding to uvw gridpoints;\r\n\t## (ii)\r\n\treturn vertices, np.hstack([bary, 1 - bary.sum(axis=1, keepdims=True)])", "def assemble_hyper_surface(self):\n def get_row_no(k0):\n hdf = pd.HDFStore(self.ewlibpath, 'r')\n hdf0 = hdf.get(k0)\n idx = np.where((np.abs(hdf0.th_wavelength-self.wavelength)<=0.025)\n & (np.abs(hdf0.th_EP - self.ep)<=0.02)\n & (hdf0.element == self.element))[0]\n if idx.size!=0:\n idx = idx[0]\n else:\n idx = -1\n hdf.close()\n return idx\n\n if self.interpolated:\n raise NotImplementedError(\"Interpolated model doesn't have such method!\")\n\n row_no = get_row_no(self._keys[0])\n if row_no == -1:\n warnings.warn(\"Data for interpolation is not enough!\")\n self._hyper_surface = None\n return self._hyper_surface\n else:\n f = h5py.File(self.ewlibpath, 'r')\n if self.cal == \"nlte\":\n ews = [np.array(f[k+\"/table\"])[row_no][1][3] for k in self._keys]\n else:\n ews = [np.array(f[k+\"/table\"])[row_no][1][2] for k in self._keys]\n f.close()\n\n\n datapoints = np.concatenate((np.array(self._atmos_pars), np.transpose([ews])), axis=1)\n datapoints = datapoints[~np.isnan(datapoints).any(axis=1)]\n\n if datapoints.shape[0] <= 3:\n warnings.warn(\"Data for interpolation is not enough!\")\n self._hyper_surface = None\n del datapoints\n return self._hyper_surface\n else:\n self._hyper_surface = datapoints\n print(\"Grid is prepared!\")\n del datapoints\n return self._hyper_surface", "def sample(self, gdf, wdw=0):\n # TODO: add method for line geometries\n if not np.all(gdf.geometry.type == \"Point\"):\n raise ValueError(\"Only point geometries accepted\")\n\n if gdf.crs is not None and self.crs is not None and gdf.crs != self.crs:\n gdf = gdf.to_crs(self.crs)\n\n pnts = gdf.geometry\n r, c = self.rowcol(pnts.x.values, pnts.y.values, mask_outside=True, nodata=-1)\n if wdw > 0:\n ar_wdw = np.arange(-wdw, wdw + 1)\n rwdw = np.add.outer(r, np.repeat(ar_wdw, ar_wdw.size))\n cwdw = np.add.outer(c, np.tile(ar_wdw, ar_wdw.size))\n nrow, ncol = self.shape\n mask = np.logical_or(\n np.logical_or(rwdw < 0, rwdw >= nrow),\n np.logical_or(cwdw < 0, cwdw >= ncol),\n )\n rwdw[mask] = -1\n cwdw[mask] = -1\n ds_sel = xr.Dataset(\n {\n \"index\": xr.IndexVariable(\"index\", gdf.index.values),\n \"mask\": xr.Variable((\"index\", \"wdw\"), ~mask),\n self.x_dim: xr.Variable((\"index\", \"wdw\"), cwdw),\n self.y_dim: xr.Variable((\"index\", \"wdw\"), rwdw),\n }\n )\n else:\n ds_sel = xr.Dataset(\n {\n \"index\": xr.IndexVariable(\"index\", gdf.index.values),\n \"mask\": xr.Variable(\"index\", np.logical_and(r != -1, c != -1)),\n self.x_dim: xr.Variable(\"index\", c),\n self.y_dim: xr.Variable(\"index\", r),\n }\n )\n obj_out = self._obj.isel(ds_sel[[self.y_dim, self.x_dim]])\n if np.any(~ds_sel[\"mask\"]): # mask out of domain points\n obj_out = obj_out.raster.mask(ds_sel[\"mask\"])\n return obj_out", "def getDoubleRow(self, int: int) -> typing.List[float]:\n ...", "def convert_rows_to_wv(direct_file, grism_file, rows):\n\n # Collect data from FITS headers\n with fits.open(grism_file) as hdu:\n hdr = hdu[0].header\n hdr1 = hdu[1].header\n sci_postarg_1 = hdr['POSTARG1']\n sci_postarg_2 = hdr['POSTARG2']\n sci_crpix_1 = hdr1['CRPIX1'] # this isn't a real keyword...\n sci_crpix_2 = hdr1['CRPIX2'] \n\n with fits.open(direct_file) as hdu:\n hdr = hdu[0].header\n hdr1 = hdu[1].header\n data = hdu[1].data\n cal_postarg_1 = hdr['POSTARG1']\n cal_postarg_2 = hdr['POSTARG2']\n cal_crpix_1 = hdr1['CRPIX1']\n cal_crpix_2 = hdr1['CRPIX2']\n\n\n # Find the central source\n mean, med, std = sigma_clipped_stats(data, sigma=3.0, iters=5)\n sources = daofind(data-med, fwhm=3.0, threshold=5.*std)\n \n source = sources[np.where(sources['flux'] == np.max(sources['flux']))]\n x_cen, y_cen = source['xcentroid'], source['ycentroid']\n\n\n # Calculate the offset\n x_offset = sci_crpix_1 - cal_crpix_1 + (sci_postarg_1 - cal_postarg_1)/0.135\n y_offset = sci_crpix_2 - cal_crpix_2 + (sci_postarg_2 - cal_postarg_2)/0.121\n\n pos_x, pos_y = x_cen + x_offset, y_cen + y_offset\n\n constants_0 = [8.95E3, 9.35925E-2, 0.0, 0.0, 0.0, 0.0]\n constants_1 = [4.51423E1, 3.17239E-4, 2.17055E-3, -7.42504E-7, 3.4863E-7, 3.09213E-7]\n\n coords_0 = constants_0[0] + constants_0[1]*pos_x + constants_0[2]*pos_y\n coords_1 = constants_1[0] + constants_1[1]*pos_x + constants_1[2]*pos_y + constants_1[3]*pos_x**2 + constants_1[4]*pos_x*pos_y + constants_1[5]*pos_y**2\n \n wv = coords_0 + coords_1*(rows-pos_x) + pos_y\n\n return wv", "def get_weights(self, var_id: int, batch_no: int) -> ndarray:\n pass", "def getWeight(self) -> float:\n ...", "def interpolate_w(w_2d_full):\n w_2d_half = np.zeros_like(w_2d_full)\n\n w_2d_half[:, 0] = w_2d_full[:, 0] / 2.\n\n for idx in range(1, np.shape(w_2d_half)[1], 1):\n w_2d_half[:,idx] = 0.5 * (w_2d_full[:, idx] + w_2d_full[:, idx-1])\n\n return w_2d_half" ]
[ "0.6086211", "0.54288447", "0.5354366", "0.534022", "0.5280534", "0.52703285", "0.52215284", "0.5198095", "0.5197607", "0.5134318", "0.51240146", "0.5096904", "0.50929785", "0.5077011", "0.50710326", "0.501282", "0.49915385", "0.49869552", "0.49808988", "0.49808666", "0.4946103", "0.49375433", "0.49311683", "0.49139974", "0.49103057", "0.49071363", "0.48922622", "0.48876464", "0.48786542", "0.48740563" ]
0.6111145
0
Returns True if the column exists in the table
def _column_exists(self, tbname, colname): self._check_file(tbname) tb = tbtool() tb.open(tbname) cols = tb.colnames() tb.close() return (colname in cols)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def column_exists(self, column_name):\n return column_name in self.columns", "def tableHasColumn(self, schema, table, column):\r\n res = self.fetchSqlRecords(\r\n \"select count(*) from information_schema.columns c where c.table_schema = '{}' and c.table_name='{}' and c.column_name='{}'\".format(schema, table, column))\r\n return res[0][0] > 0", "def check_column(self, column_name, table, verbose=True): \n assert(self.connected)\n try: \n assert(self.check_table(table, verbose=False)) \n except AssertionError: \n raise TableNotFoundError\n \n \n CHECK_COLUMN_COMMAND = \"SHOW COLUMNS FROM {0} LIKE '{1}'\".format(table, column_name)\n \n self.cursor.execute(CHECK_COLUMN_COMMAND)\n \n exists=False\n for row in self.cursor:\n exists = True\n break\n \n if verbose and exists: print(\"Column with label '{0}' found in table '{1}'\".format(column_name, table))\n elif verbose: print(\"Column with label '{0}' not found in table '{1}'\".format(column_name, table)) \n \n return exists", "def has_column(self, column_name):\n return column_name in self._columns", "def has_column(self, column_name):\n return column_name in self._columns", "def has_column(self, column):\n if column == '*':\n return True\n for c in self.columns:\n if column == c.data.name:\n return True\n return False", "def check_column(self, table_name: str, column_name: str) -> bool:\n try:\n insp = reflection.Inspector.from_engine(self.engine)\n for col in insp.get_columns(table_name):\n if column_name in col[\"name\"]:\n return True\n return False\n except Exception as err:\n logger.error(\"check_column [error] -> %s\" % err)\n return False", "def table_has_column(table: str, column: str) -> bool:\n config = op.get_context().config\n engine = engine_from_config(\n config.get_section(config.config_ini_section), prefix=\"sqlalchemy.\"\n )\n insp = reflection.Inspector.from_engine(engine)\n try:\n return any(col[\"name\"] == column for col in insp.get_columns(table))\n except NoSuchTableError:\n return False", "def check_if_column_exists(cursor,table_schema,table_name,column_name):\n query = \"SELECT * FROM information_schema.columns WHERE table_schema = '\" + table_schema + \"' AND table_name = '\" + table_name + \"' AND column_name = '\" + column_name.lower() + \"';\"\n cursor.execute(query)\n rows = cursor.fetchall()\n if len(rows) == 1:\n return(rows)", "def _check_columns_with_table(table: Table, columns: Sequence[str]) -> Optional[bool]:\n for column in columns:\n if column not in table.c.keys():\n raise TypeError(f\"Specified column {column} did not exist on table {table}\")\n return True", "def check_column(self, columns):\n for i in columns:\n if i.name == self.name:\n raise ColumnNameAlreadyInTableException(f'Column \"{self.name}\" is already in the table!')\n return True", "def __contains__(self, column):\n if isinstance(column, orb.Column):\n return self.__model == column.schema().model() and self.__column == column.name()\n else:\n return column == self.__column", "def checkIfColumnControlledVocab(self, column_name):\n try:\n con = self.getMetadataDatabaseConnection()\n valid_controlled_column=0\n db_output=con.cursor().callproc('check_if_column_controlled',\n [column_name.upper(),\\\n valid_controlled_column])\n if db_output[1]==0:\n return False\n else:\n return True\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), str(e))\n return False", "def is_specific_column_present(self, grid_div_id, column_name):\n specific_column_locator = (By.XPATH, \"//div[contains(@id, '%s')]/descendant::th[@data-field='%s']\" % (grid_div_id, column_name))\n return self.is_element_present(specific_column_locator)", "def is_expected_grid_column_present(self, expected_column_name):\n grid_column_locator = (By.XPATH, \"//th[@data-title='%s']\" %(expected_column_name))\n return self.is_element_present(grid_column_locator)", "def check_for_column(self, column_name):\n if column_name not in self.data.columns:\n raise RuntimeError(\"Source {} has no '{}' column\".format(\n self.name, column_name))", "def __contains__(self, column):\n for query in self.__queries:\n if column in query:\n return True\n return False", "def __contains__(self, item):\r\n if isinstance(item, six.string_types):\r\n return item in self.table._columns\r\n else:\r\n return item in self", "def has_attribute(self, name):\n return name in self.schema", "def contains_col(self, col_name):\n fmt_name = ColNameFormatter.fmt(col_name)\n col_in_solar = fmt_name in self.__solar_cols\n col_in_wind = fmt_name in self.__wind_cols\n return col_in_solar or col_in_wind", "def exists(self):\n self.cursor.execute(f\"\"\"\n SELECT 1\n FROM {self.table_name}\n WHERE {self.lookup_type}='{self.word}'\n \"\"\")\n return True if self.cursor.fetchone() else False", "def ensure_column_exists(df, col_name, col_alt = False):\n if col_name not in df.columns:\n renamed = False\n if col_alt:\n for dta in col_alt:\n if dta in df.columns:\n df.rename(columns={dta:col_name}, inplace = True)\n renamed = True\n else:\n pass\n if not renamed:\n txt = 'Column %s not found.'%col_name\n if col_alt:\n txt += 'Neither one of the alternatives: %s'(col_alt)\n raise AttributeError(txt)\n return", "def is_specific_column_on_vendor_profile_grid_present(self, column_name):\n column_locator = (By.XPATH, \"//div[contains(@id, 'divCustomerDialedDigit')]/descendant::a[text()='%s']\" % column_name)\n return self.is_element_present(column_locator)", "def check_df_col(df, column, name=None):\n if column is not None:\n\n if type(column) != list:\n\n column = [column]\n\n for col in column:\n if name is None:\n error_message = f\"The value '{col}' is not present in any of the columns of your DataFrame.\"\n else:\n error_message = f\"Your {name} value '{col}' is not present in any of the columns of your DataFrame.\"\n error_message += \"\\nYou may be looking for:\\n \" + str(list(df.columns))\n\n assert col in df.columns, error_message", "def check_foreign_key_exists(self, table_name, column_name, referenced_table, referenced_column):\n ans = self.execute(self.commands.foreign_key_exists(self.db.name, table_name, column_name, referenced_table, referenced_column))\n if not ans:\n return False\n return True", "def field_exists(table, field):\n fieldList = [f.name for f in arcpy.ListFields(table)]\n return True if field in fieldList else False", "def is_target_buy_policies_grid_column_present(self, column_name):\n column_locator = (By.XPATH, \"//div[contains(@id, '%s')]/descendant::th[@data-title='%s']\" % (self.target_buy_policies_grid_div_id, column_name))\n return self.is_element_present(column_locator)", "def has_table(self, table):\n return table in self.get_table_list(\".\" in table)", "def is_specific_column_present_in_workflow_tab(self, column_name):\n column_locator = (By.XPATH, \"//div[contains(@id, 'divOutboundWorkFlowGrid_')]/descendant::div[@class='k-grid-header']/descendant::th[@data-title='%s']\" % column_name)\n return self.is_element_present(column_locator)", "def exists(self, table, cursor):\n cursor.execute(f\"SELECT name FROM sqlite_master WHERE type='table' AND name='{table}'\")\n res = cursor.fetchone()\n return True if res else False" ]
[ "0.83644086", "0.8292885", "0.82175887", "0.81018114", "0.81018114", "0.8073049", "0.8035705", "0.7999258", "0.78273046", "0.7082933", "0.70003563", "0.6905545", "0.68490213", "0.6835957", "0.6805615", "0.6759569", "0.67382336", "0.6727789", "0.6726648", "0.6660846", "0.6653446", "0.66067356", "0.65654725", "0.6529032", "0.64969236", "0.64914966", "0.6485003", "0.6476651", "0.6431042", "0.64175576" ]
0.833071
1
Generates a polynomial array of length nchan. The polynomial coefficients should be given in ascending order, i.e., when coeff = [1.0, 2.0, 3.0] elements of the return array will be polyarr[ichan] = 1.0 + 2.0ichan + 3.0ichan2 (ichan=0~nchan1)
def _generate_poly_array(self, nchan, coeff=[]): if nchan < 0: raise ValueError, "nchan should be >=0" if len(coeff)==0: if nchan ==0: return [] else: raise ValueError, "No valid coefficient given." polyarr = numpy.zeros(nchan) for iorder in range(len(coeff)): polyarr += coeff[iorder]*numpy.array(xrange(nchan))**iorder return polyarr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_ploynomial_array(self, coeff, x):\n xarr = numpy.array(x)\n yarr = numpy.zeros(len(xarr))\n for idim in range(len(coeff)):\n ai = coeff[idim]\n yarr += ai*xarr**idim\n return yarr", "def generate_polynomial():\n degree = numpy.random.choice(range(3, 7))\n x = numpy.linspace(-10, 10, 1000)\n coefficients = numpy.random.chisquare(3, size=degree) + 1\n coefficients *= numpy.random.choice([-1, 1], size=coefficients.shape)\n coefficients *= 0.5\n y = numpy.polyval(coefficients, x)\n add_noise(y, 0.1)\n return x, y", "def cheb_poly(x, n):\n if n == 0:\n return anp.array([1 for i in x])\n elif n == 1:\n return x\n else:\n return 2*x*cheb_poly(x, n-1)-cheb_poly(x, n-2)\n\n raise NotImplementedError(\"Problem 6 Incomplete\")", "def build_poly(x, degree):\n phi = np.ones(len(x))\n phi = np.vstack((phi, [x**(j+1) for j in range(degree)]))\n \n return phi.T", "def build_poly(x, degree):\n tx = np.zeros((x.shape[0], x.shape[1]*(degree+1)))\n \n for j in range(degree+1):\n tx[:,x.shape[1]*j:x.shape[1]*(j+1)] = np.power(x,j)\n \n return tx", "def poly(x, coeffs):\n return np.sum([coeffs[i] * x ** i for i in range(len(coeffs))], axis=0)", "def causal_poly(n, N):\n # Check input parameters\n assert np.all(np.array(n, dtype=int) == n), 'causal_poly: n should be an integer or an array of integers.'\n assert int(N) == N, 'causalpol: The provided polynomial degree is not an integer.'\n assert N >= 1, 'causalpol: The polynomial degree should be bigger than 0.'\n N = int(N)\n # Construct plynomial array where each row corresponds to an n value and each column corresponds to an N value\n A = np.zeros((len(n), N))\n A[:][n>=0] = np.linspace(1, N, N)\n A.T[:, n>=0] += n[n>=0]\n # Calculate product over rows and normalize\n return np.prod(A, axis=1) / np.math.factorial(N)", "def build_poly(tx, degree) :\n shape = tx.shape\n poly = np.zeros((shape[0], shape[1] * degree))\n poly[:,:shape[1]] = tx\n for deg in range(2, degree + 1) :\n for j in range(0, shape[1]) :\n poly[:, shape[1] * (deg - 1) + j] = tx[:,j] ** deg\n return poly", "def generate_random_tropical_poly(max_degree, min_coefficient, max_coefficient):\n coefficients = []\n for d in range(0, random.randint(1, max_degree) + 1):\n coefficients.append(random.randint(min_coefficient, max_coefficient))\n return coefficients", "def polynomial_creator(*coefficients):\n def polynomial(x):\n res = 0\n for index, coeff in enumerate(coefficients):\n res += coeff * x** index\n return res\n return polynomial", "def get_poly_cc(n, k, t):\n assert (n > 0 and k >= 0), \"order and derivative must be positive.\"\n\n cc = np.ones(n)\n D = np.linspace(0, n-1, n)\n\n for i in range(n):\n for j in range(k):\n cc[i] = cc[i] * D[i]\n D[i] = D[i] - 1\n if D[i] == -1:\n D[i] = 0\n\n for i, c in enumerate(cc):\n cc[i] = c * np.power(t, D[i])\n\n return cc", "def build_poly(x, degree): \n # ***************************************************\n # COPY YOUR CODE FROM EX03 HERE\n # polynomial basis function: TODO\n # this function should return the matrix formed\n # by applying the polynomial basis to the input data\n # ***************************************************\n raise NotImplementedError", "def poly(x, y, pd) :\n # Maximum polynomial degree allowed is 7.\n maxD = 7\n if pd > maxD :\n exit(\"Please choose a reasonable polynomial degree (0 <= pd <= \" + maxD + \").\")\n \n # Make the polynomial matrix one degree at a time.\n p = np.zeros((len(x), int((pd+1)*(pd+2)/2)), float)\n count = 0\n numP = 0\n for i in range(pd + 1) :\n for j in range(numP + 1) :\n if (j == 0) and (numP == 0) :\n p[:,count] = 1\n elif (j == 0) :\n p[:,count] = x**(numP-j)\n elif (numP-j == 0) :\n p[:,count] = y**j\n else :\n p[:,count] = x**(numP-j) * y**j\n count += 1\n numP += 1\n \n return p", "def poly2cheby(cin):\n # 2009-07-07 09:41 IJC: Created\n from scipy.special import poly1d, chebyt\n \n cin = poly1d(cin)\n cout = []\n\n ord = cin.order\n for ii in range(ord+1):\n chebyii = chebyt(ord-ii)\n cout.append(cin.coeffs[0]/chebyii.coeffs[0])\n cin -= chebyii*cout[ii]\n\n return cout", "def polynomial_basis(X, degree):\n n_samples, n_features = X.shape\n\n # The number of monomials is (n + d) choose d\n n_monomials = int(factorial(n_features + degree)/(factorial(n_features)*factorial(degree)))\n features = np.ones((n_monomials, n_samples))\n col = 1\n x_T = X.T\n\n for deg in range(1, degree + 1):\n for combs in combinations_with_replacement(x_T, deg):\n features[col, :] = reduce(lambda x, y: x * y, combs)\n col += 1\n return features.T", "def causal_poly(t, N):\n # Check input parameters\n assert int(N) == N, 'causalpol: The provided polynomial degree is not an integer.'\n assert N >= 1, 'causalpol: The polynomial degree should be bigger than 0.'\n N = int(N)\n # Construct plynomial array where each row corresponds to a t value and each column corresponds to an N value\n A = np.zeros((len(t), N))\n A[:][t>=0] = np.linspace(1, N, N)\n A.T[:, t>=0] += t[t>=0]\n # Calculate product over rows and normalize\n return np.prod(A, axis=1) / np.math.factorial(N)", "def cheby2poly(cin):\n # 2009-10-22 22:19 IJC: Created\n from scipy.special import poly1d, chebyt\n \n cin = poly1d(cin)\n cout = poly1d(0)\n\n ord = cin.order\n for ii in range(ord+1):\n cout += chebyt(ii)*cin[ii]\n\n return cout", "def nc_coeffs(poly, var, max_deg=10, order='increasing'):\r\n\r\n # TODO: elegant way to find out the degree\r\n # workarround: pass the maximum expected degree as kwarg\r\n\r\n D0 = sp.Dummy('D0')\r\n poly = poly.expand() + D0 # ensure class add\r\n\r\n assert isinstance(poly, sp.Add)\r\n res = []\r\n # special case: 0-th power of var\r\n coeff = 0\r\n for a in poly.args:\r\n if not a.has(var):\r\n coeff += a\r\n res.append(coeff.subs(D0, 0))\r\n\r\n # special case: first power of var\r\n coeff = poly.diff(var).subs(var, 0)\r\n res.append(coeff)\r\n\r\n # powers > 1:\r\n for i in xrange(1, max_deg):\r\n coeff = 0\r\n for a in poly.args:\r\n if a.has(var**(i + 1)):\r\n term = a.subs(var, 1)\r\n coeff += term\r\n res.append(coeff)\r\n\r\n if order == \"decreasing\":\r\n res.reverse()\r\n\r\n return res", "def polynomial_basis(theta: np.array, degree: int) -> np.array:\n\n # Minimum degree is 1\n if degree < 1:\n raise Exception(\"Degree has to be 1 or greater!\")\n\n basis = np.empty((degree, theta.size), dtype=np.float)\n basis[0,] = np.ones((1, theta.size))\n\n for row in range(1, degree):\n basis[row,] = theta\n\n for row in range(2, degree):\n basis[row,] *= basis[row - 1,]\n\n return basis", "def polyFeat(X, p):\r\n # You need to return the following variables correctly.\r\n X_poly = np.zeros((X.shape[0], p))\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n\r\n for i in range(p):\r\n X_poly[:, i] = X[:, 0] ** (i + 1)\r\n\r\n # ============================================================\r\n return X_poly", "def bezierPoly(ctrlP):\n n = len(ctrlP) - 1 #degree of the polynomial\n first = True\n for t in np.linspace(0.0, 1.0, 5 * n):\n point = bezierFunc(ctrlP, t)\n if first: # Initialize list of points in the polynomial\n bezierPointsList = np.copy(point)\n first = False\n else:\n bezierPointsList = np.append(bezierPointsList, point, axis=0)\n return bezierPointsList", "def make_coefficients(r, a, num_terms):\n\n\tnum_vars = 4\n\tcoeffs = np.zeros((num_vars, num_terms))\n\tfor i in range(num_vars):\n\t\tcoeffs[i, i+1] = r[i]\n\tcoeffs[0, [5, 6, 7, 8]] = a[0]\n\tcoeffs[1, [6, 9, 10, 11]] = a[1]\n\tcoeffs[2, [7, 10, 12, 13]] = a[2]\n\tcoeffs[3, [8, 11, 13, 14]] = a[3]\n\t\n\treturn coeffs.ravel()", "def construct_poly(data, power):\n return np.power(data, power)", "def poly(x, degree=2):\n x = np.array(x)\n X_trans = np.transpose(np.vstack((x**k for k in range(degree + 1))))\n return np.linalg.qr(X_trans)[0][:, 1:]", "def conway_polynomial(n=100):\n \n xs = []\n ys = []\n for i in range(n):\n x = (i - n/2) / 50\n xs.append(x)\n\n y = x**71 - x**69 - 2*x**68 - x**67 + 2*x**66 + 2*x**65 + x**64 - x**63 - x**62 - x**61 - x**60 - x**59 + 2*x**58 + 5*x**57 + 3*x**56 +\\\n - 2*x**55 - 10*x**54 - 3*x**53 - 2*x**52 + 6*x**51 + 6*x**50 + x**49 + 9*x**48 - 3*x**47 - 7*x**46 - 8*x**45 - 8*x**44 + 10*x**43 +\\\n + 6*x**42 + 8*x**41 - 5*x**40 - 12*x**39 + 7*x**38 - 7*x**37 + 7*x**36 + x**35 - 3*x**34 + 10*x**33 + x**32 - 6*x**31 - 2*x**30 +\\\n - 10*x**29 - 3*x**28 + 2*x**27 + 9*x**26 - 3*x**25 + 14*x**24 - 8*x**23 - 7*x**21 + 9*x**20 + 3*x**19 - 4*x**18 - 10*x**17 - 7*x**16 +\\\n + 12*x**15 + 7*x**14 + 2*x**13 - 12*x**12 - 4*x**11 - 2*x**10 + 5*x**9 + x**7 - 7*x**6 + 7*x**5 - 4*x**4 + 12*x**3 - 6*x**2 + 3*x - 6\n\n ys.append(y)\n\n return xs, ys", "def generate_polynomial_features(self, X) :\n\n n,d = X.shape\n\n ### ========== TODO : START ========== ###\n # part b: modify to create matrix for simple linear model\n # part g: modify to create matrix for polynomial model\n Phi = X\n m = self.m_\n\n if m == 1:\n Phi = np.zeros((n,2))\n for i in range(n):\n Phi[i,0] = 1\n Phi[i, 1] = X[i]\n\n else:\n Phi = np.ones((n,m+1))#n*m+1 dimmension\n power_arr = np.arange(0, m+1)\n for index, row in enumerate(Phi):# get every row\n row = np.repeat(X[index],m+1)\n row = np.power(row,power_arr)\n Phi [index,] = row\n #also could use the following\n \"\"\"\n import sklearn.preprocessing as sk\n #X is a N*1 vector\n poly_mat = sk.PolynomialFeatures(3)\n poly.fit_transform(a)\n \"\"\"\n\n\n\n\n\n ### ========== TODO : END ========== ###\n\n return Phi", "def poly_int(params: PolyParams, x: NDArray, order: int) -> NDArray:\n\n return np.polyval(np.polyint(params, -order), x)", "def coefficients_from_Weierstrass_polynomial(f):\n R = f.parent()\n cubic_variables = [x for x in R.gens() if f.degree(x) == 3]\n quadratic_variables = [y for y in R.gens() if f.degree(y) == 2]\n try:\n x = cubic_variables[0]\n y = quadratic_variables[0]\n except IndexError:\n raise ValueError('polynomial is not in long Weierstrass form')\n\n a1 = a2 = a3 = a4 = a6 = 0\n x3 = y2 = None\n for coeff, mon in f:\n if mon == x**3:\n x3 = coeff\n elif mon == x**2:\n a2 = coeff\n elif mon == x:\n a4 = coeff\n elif mon == 1:\n a6 = coeff\n elif mon == y**2:\n y2 = -coeff\n elif mon == x*y:\n a1 = -coeff\n elif mon == y:\n a3 = -coeff\n else:\n raise ValueError('polynomial is not in long Weierstrass form')\n\n if x3 != y2:\n raise ValueError('the coefficient of x^3 and -y^2 must be the same')\n elif x3 != 1:\n a1, a2, a3, a4, a6 = a1/x3, a2/x3, a3/x3, a4/x3, a6/x3\n return [a1, a2, a3, a4, a6]", "def polynomial_matrix(order):\n \n matrix = np.identity(order)\n for i in range(order-1):\n matrix[i,i+1] = 1\n return matrix", "def gen_rand_poly(deg_lower_limit = 1, deg_upper_limit = 10, coeff_limit = 10):\n deg = random.randint(deg_lower_limit,deg_upper_limit)\n coeffs = [random.randint(-coeff_limit, coeff_limit) for _ in range(deg+1)]\n\n # Never have 0 as leading coefficient\n if coeffs[deg] == 0:\n coeffs[deg] = 1\n\n def term(coeff, d):\n if coeff == 0:\n return ''\n elif d == 0:\n return (' + ' if coeff>0 else ' - ') + str(abs(coeff))\n elif d == 1:\n return (' + ' if coeff>0 else ' - ') + (f'{abs(coeff)}x' if abs(coeff)!=1 else 'x')\n elif d == deg:\n return ('' if coeff>0 else '-') + (f'{abs(coeff)}x^{d}' if abs(coeff)!=1 else f'x^{d}')\n else:\n return (' + ' if coeff>0 else ' - ') + (f'{abs(coeff)}x^{d}' if abs(coeff)!=1 else f'x^{d}')\n\n terms = [term(coeffs[d], d) for d in range(deg+1)]\n return deg, coeffs, ''.join([terms[d]for d in range(deg,-1,-1)]).strip('+ ')" ]
[ "0.68056685", "0.6573853", "0.6233045", "0.6194051", "0.6190378", "0.6133814", "0.61265975", "0.60940784", "0.6003684", "0.59296507", "0.5928541", "0.59085226", "0.5857575", "0.5854614", "0.5853316", "0.58489704", "0.584802", "0.5847538", "0.5844446", "0.58300024", "0.579835", "0.5771155", "0.5720685", "0.5713615", "0.56999284", "0.5688833", "0.56581616", "0.56114656", "0.5591464", "0.55582553" ]
0.9005255
0
Compares two arrays and returns True if they are within a tolerance. checks shapes
def _compare_arrays(self, data, reference, atol=1.e-5, rtol=1.e-5): if not (data.shape==reference.shape): return False ret=numpy.allclose(data,reference, atol=atol, rtol=rtol) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def array_equal(a, b, unit_tol=1e-4, total_tol=1e-4, with_sign=True):\n\n a = to_nparray(a)\n b = to_nparray(b)\n\n if len(a) == 0 and len(b) == 0:\n return True\n\n if not with_sign:\n a, b = np.abs(a), np.abs(b)\n res = (np.sum(np.abs(a - b) > unit_tol)) / a.size < total_tol\n return res", "def _allclose(x, y, rtol=1e-7, atol=1e-14):\n for a, b in zip(x, y):\n if np.abs(a - b) > (atol + rtol * np.abs(b)):\n return False\n return True", "def equals(a, b, tol=1e-10):\n return np.abs(a-b) <= tol", "def _compare_vector(arr1, arr2):\n\n length = len(arr1)\n if len(arr2) != length:\n return False\n\n for i in range(length):\n element_1 = float(arr1[i])\n element_2 = float(arr2[i])\n\n\n diff = abs(abs(element_1) - abs(element_2))\n if diff != 0.0:\n rel = diff / min(abs(element_1), abs(element_2))\n \n # For a basis set, a relatively coarse comparison\n # should be acceptible\n if rel > 1.0e-10:\n return False\n\n return True", "def checkArray(comment, first, second, dtype, tol=1e-10, update=True):\n res = True\n if len(first) != len(second):\n res = False\n print(\"checking answer\",comment,'|','lengths do not match:',len(first),len(second))\n else:\n for i in range(len(first)):\n if dtype == float:\n pres = checkFloat('',first[i],second[i],tol,update=False)\n elif dtype in (str,unicode):\n pres = checkSame('',first[i],second[i],update=False)\n if not pres:\n print('checking array',comment,'|','entry \"{}\" does not match: {} != {}'.format(i,first[i],second[i]))\n res = False\n if update:\n if res:\n results[\"pass\"] += 1\n else:\n results[\"fail\"] += 1\n return res", "def _raise_assert_on_np_is_close_all(self, np0, np1):\r\n\r\n return self.assertTrue(np.isclose(np0, np1).all())", "def assert_equal_shapes(numpy_arrays: list):\n\n if len(numpy_arrays) < 2:\n return\n\n shapes = np.asarray([np.shape(_arr) for _arr in numpy_arrays]).astype(float)\n mean = np.mean(shapes, axis=0)\n for i in range(len(shapes)):\n shapes[i, :] = shapes[i, :] - mean\n\n if not np.sum(np.abs(shapes)) <= 1e-5:\n raise AssertionError(\"The given volumes did not all have the same\"\n \" dimensions. Please double check the simulation\"\n f\" parameters. Called from {inspect.stack()[1].function}\")", "def is_converged(self,a,b):\n return np.array_equal(a,b)", "def _numpy_checker(x, y):\r\n x, y = x[0], y[0]\r\n if (x.dtype != y.dtype or x.shape != y.shape\r\n or numpy.any(numpy.abs(x - y) > 1e-10)):\r\n raise Exception(\"Output mismatch.\", {'performlinker': x, 'clinker': y})", "def validate_points(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\treturn (diff_y == 0 and diff_x != 0) or (diff_x == 0 and diff_y != 0) or abs(diff_x) == abs(diff_y)", "def within_tolerance(a_vec, b_vec, tol_vec):\n\tfor a, b, tol in zip(a_vec, b_vec, tol_vec):\n\t\tif abs(a - b) > tol:\n\t\t\treturn False\n\treturn True", "def have_same_shapes(array1, array2):\n return array1.shape == array2.shape", "def check_evaluation_points(x, y):\n assert x.ndim == y.ndim == 1\n assert x.shape == y.shape\n assert x.dtype == y.dtype == np.float64", "def test_compare(self): \n d1 = heat(\n np.array([[0.5, 1]]),\n np.array([[0.5, 1.1]])\n )\n d2 = heat(\n np.array([[0.5, 1]]),\n np.array([[0.5, 1.5]])\n )\n\n # These are very loose bounds\n assert d1 < d2", "def compare_area(test, expected):\r\n \r\n # ensure test and expected are both lists and of the same length\r\n if not isinstance(test, list) or not isinstance(expected, list):\r\n raise ValueError(\"Colour arrays are not lists.\")\r\n \r\n if len(test) != len(expected):\r\n raise ValueError(\"Colour arrays are not same length.\")\r\n \r\n # set boolean, then compare each RGB value and if threshold is exceeded\r\n # change bool from False to True\r\n is_different = False\r\n \r\n for test_value, expected_value in zip(test, expected):\r\n if abs(test_value - expected_value) > s.IMAGE_THRESHOLD:\r\n is_different = True\r\n \r\n return is_different", "def contains(self, points, abs_tol=ABS_TOL):\n test = self.A.dot(points) - self.b[:, np.newaxis] < abs_tol\n return np.all(test, axis=0)", "def eq(m1, m2, tol):\n if m1.ndim == 2 and m2.ndim == 2:\n m = abs(m1 - m2)\n\n if np.amax(m) < tol:\n return True\n else:\n return False\n elif m1.ndim == 2:\n msz = np.shape(m2)[0]\n tmat1 = m1.reshape((1, 9)) \n tmat2 = np.tile(tmat1, (msz, 1))\n tmat3 = tmat2.reshape(msz, 3, 3)\n\n m = abs(tmat3 - m2)\n max1 = np.amax(np.amax(m, axis=1), axis=1) < tol\n if np.any(max1):\n return True\n else:\n return False\n\n elif m2.ndim == 2:\n msz = np.shape(m1)[0]\n tmat1 = m2.reshape(msz, (1, 9))\n tmat2 = np.tile(tmat1, (msz, 1))\n tmat3 = tmat2.reshape(msz, 3, 3)\n\n m = abs(m1 - tmat3)\n max1 = np.amax(np.amax(m, axis=1), axis=1) < tol\n if np.any(max1):\n return True\n else:\n return False\n else:\n if np.shape(m1)[0] == np.shape(m2)[0]:\n m = abs(m1 - m2)\n max1 = np.amax(np.amax(m, axis=1), axis=1) < tol\n return np.where(max1)\n else:\n raise Exception('Wrong Input Types')", "def _shape_compare(shape1, shape2):\n if len(shape1) != len(shape2):\n return False\n for s1, s2 in zip(shape1, shape2):\n if s1 != s2:\n return False\n return True", "def shapeCompare(*args, **kwargs)->int:\n pass", "def within_tolerance(x, y, tolerance): \r\n return abs(x) <= tolerance and abs(y) <= tolerance", "def approx_eq(x, y, tolerance = 0.000001):\n\treturn abs(x - y) < tolerance", "def np_comparison_check(h2o_data, np_data, num_elements):\n # Check for numpy\n try:\n imp.find_module('numpy')\n except ImportError:\n assert False, \"failed comparison check because unable to import numpy\"\n\n import numpy as np\n rows, cols = h2o_data.dim\n for i in range(num_elements):\n r = random.randint(0,rows-1)\n c = random.randint(0,cols-1)\n h2o_val = h2o_data[r,c]\n np_val = np_data[r,c] if len(np_data.shape) > 1 else np_data[r]\n if isinstance(np_val, np.bool_): np_val = bool(np_val) # numpy haz special bool type :(\n assert np.absolute(h2o_val - np_val) < 1e-5, \\\n \"failed comparison check! h2o computed {0} and numpy computed {1}\".format(h2o_val, np_val)", "def compare_coordinates(a: tuple, b: tuple) -> bool:\n return all(np.array(a) < np.array(b))", "def is_array_dominated(array_0, array_1):\n for val_0, val_1 in zip(sorted(array_0), sorted(array_1)):\n if val_0 >= val_1:\n return False\n return True", "def np_comparison_check(h2o_data, np_data, num_elements):\n # Check for numpy\n try:\n imp.find_module('numpy')\n except ImportError:\n assert False, \"failed comparison check because unable to import numpy\"\n\n import numpy as np\n rows, cols = h2o_data.dim\n for i in range(num_elements):\n r = random.randint(0,rows-1)\n c = random.randint(0,cols-1)\n h2o_val = h2o_data[r,c] if isinstance(h2o_data,H2OFrame) else h2o_data[r]\n np_val = np_data[r,c] if len(np_data.shape) > 1 else np_data[r]\n if isinstance(np_val, np.bool_): np_val = bool(np_val) # numpy haz special bool type :(\n assert np.absolute(h2o_val - np_val) < 1e-6, \\\n \"failed comparison check! h2o computed {0} and numpy computed {1}\".format(h2o_val, np_val)", "def check_shapes(arrs):\r\n shps = [i.shape for i in arrs]\r\n eq = np.all(np.array([shps[0] == i for i in shps[1:]]))\r\n err = \"Arrays arr not of the same shape...\"\r\n if not eq:\r\n raise ValueError(\"{}\\n{}\".format(err, shps))", "def _point_almost_equal(a,b, rtol=RTOL, atol=ATOL):\n return np.allclose(a._Point__loc, b._Point__loc,\n rtol=rtol, atol=atol)", "def equalWithinTolerance(a, b, tol):\n return abs(a - b) <= tol", "def assert_compare(x, y, atol=1e-5, method='ALL'):\n mae = 0\n mse = 0\n rmse = 0\n result = 0\n if method == 'MAE':\n mae = np.abs(x-y).mean()\n result = mae\n elif method == 'RMSE':\n rmse = np.sqrt(np.square(x - y).mean())\n result = rmse\n #result=np.sqrt(((x - y) ** 2).mean())\n elif method == 'MSE':\n mse = np.square(x - y).mean()\n result = mse\n #result=((x - y) ** 2).mean()\n else:\n mae = np.abs(x-y).mean()\n rmse = np.sqrt(np.square(x - y).mean())\n mse = np.square(x - y).mean()\n\n if result > atol or (method == 'ALL' and (mae > atol or rmse > atol or mse > atol)):\n f = six.StringIO()\n f.write(\n 'assert_compare failed: \\n' +\n ' atol: {} \\n'.format(atol) +\n ' method: {}\\n'.format(method) +\n ' MAE: {}\\n'.format(mae) +\n ' MSE: {}\\n'.format(mse) +\n ' RMSE: {}\\n'.format(rmse) +\n ' shape: {} {}\\n'.format(x.shape, y.shape) +\n ' dtype: {} {}\\n'.format(x.dtype, y.dtype))\n if x.shape == y.shape:\n xx = x if x.ndim != 0 else x.reshape((1,))\n yy = y if y.ndim != 0 else y.reshape((1,))\n err = np.abs(xx - yy)\n i = np.unravel_index(np.argmax(err), err.shape)\n f.write(\n ' i: {}\\n'.format(i) +\n ' x[i]: {}\\n'.format(xx[i]) +\n ' y[i]: {}\\n'.format(yy[i]) +\n ' err[i]: {}\\n'.format(err[i]))\n opts = np.get_printoptions()\n try:\n np.set_printoptions(threshold=10000)\n f.write('x: ' + np.array2string(x, prefix='x: ') + '\\n')\n f.write('y: ' + np.array2string(y, prefix='y: ') + '\\n')\n finally:\n np.set_printoptions(**opts)\n logging.warning(f.getvalue())\n return False\n else:\n return True", "def _assert_all_close_according_to_type(self, a, b):\n if a.dtype == np.float32:\n np.testing.assert_allclose(a, b, rtol=1e-6, atol=1e-6)\n elif a.dtype == np.float64:\n np.testing.assert_allclose(a, b, rtol=1e-15, atol=1e-15)\n else:\n assert False" ]
[ "0.7049362", "0.6753018", "0.6666613", "0.6650952", "0.66391", "0.65610904", "0.6543794", "0.65330875", "0.641461", "0.63840634", "0.63636976", "0.63067174", "0.6305565", "0.62804353", "0.627131", "0.6226086", "0.62086785", "0.620447", "0.6167091", "0.6160811", "0.61569476", "0.6146051", "0.61450195", "0.61446", "0.61390156", "0.6128323", "0.6115164", "0.61105186", "0.6074377", "0.60721225" ]
0.7189442
0
Convert interpolation string to a list of interpolations in time (should be defined) and frequency (default is 'linear') E.g. 'linear,cspline' > ['linear', 'cpline'] 'nearest' > ['nearest', 'linear' (using the default)]
def interpolation_to_list(self, interpolation): interplist = interpolation.split(',') if len(interplist) == 0: interplist = ['linear', 'linear'] elif len(interplist) == 1: interplist += ['linear'] return interplist[0:2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interpolateCubicPeriodic() :\n\n S = []\n\n # for all parameters\n for i in range(11):\n y = []\n # get i-th parameter\n for k in range(len(keyframe)):\n y.append(keyframe[k][i])\n\n interpolants = interpolatePeriodicSpline(keytime, y)\n S.append(interpolants)\n return S", "def parse_times(time_str):\n warnings = []\n days, interval = time_str.split(',')\n assert int(days) == float(days)\n days = int(days)\n assert int(interval) == float(interval)\n interval = int(interval)\n if interval < 3:\n warnings.append('Minimum interval is 3 hours')\n if days > 14:\n warnings.append('Maximum spot forecast period is 14 days')\n hours = np.arange(days * 24 + 1)[::interval]\n return hours.tolist(), warnings", "def tickStrings(values, scale, spacing):\n # sending a list of values in format \"HH:MM:SS.SS\" generated from Total seconds.\n return [(int2dt(value).strftime(\"%H:%M:%S.%f\"))[:-4] for value in values]", "def parse_float_list(string):\n new_list = []\n convert_fun = int\n for num in string[1:-1].split(';'):\n if '/' in num:\n num = float(Fraction(num))\n convert_fun = float\n elif ',' in num or '.' in num:\n num = float(num.replace(',', '.'))\n convert_fun = float\n elif num == \"inf\":\n convert_fun = float\n new_list.append(num)\n return [convert_fun(x) for x in new_list]", "def interpolateCubicNatural() :\n\n S = []\n\n # for all parameters\n for i in range(11):\n y = []\n # get i-th paramter\n for k in range(len(keyframe)):\n y.append(keyframe[k][i])\n\n interpolants = interpolateSpline(keytime, y)\n S.append(interpolants)\n return S", "def interpolate_to_frequency(a, freq_llimit, freq_ulimit):\n a_min = a.min()\n a_max = a.max()\n return np.interp(a, (a_min, a_max), (freq_llimit, freq_ulimit))", "def transform_string_feature_range_into_list(text):\n values = []\n for part in text.split(\",\"):\n if part.strip() == \"\":\n continue\n if \"-\" in part:\n start, end = part.split(\"-\")[:2]\n values.extend(list(range(int(start), int(end) + 1)))\n else:\n values.append(int(part))\n return values", "def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:\n\n features_by_offsets = {\n offsets.YearEnd: [],\n offsets.QuarterEnd: [MonthOfYear],\n offsets.MonthEnd: [MonthOfYear],\n offsets.Week: [DayOfMonth, WeekOfYear],\n offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Minute: [\n MinuteOfHour,\n HourOfDay,\n DayOfWeek,\n DayOfMonth,\n DayOfYear,\n ],\n offsets.Second: [\n SecondOfMinute,\n MinuteOfHour,\n HourOfDay,\n DayOfWeek,\n DayOfMonth,\n DayOfYear,\n ],\n }\n\n offset = to_offset(freq_str)\n\n for offset_type, feature_classes in features_by_offsets.items():\n if isinstance(offset, offset_type):\n return [cls() for cls in feature_classes]\n\n supported_freq_msg = f\"\"\"\n Unsupported frequency {freq_str}\n The following frequencies are supported:\n Y - yearly\n alias: A\n M - monthly\n W - weekly\n D - daily\n B - business days\n H - hourly\n T - minutely\n alias: min\n S - secondly\n \"\"\"\n raise RuntimeError(supported_freq_msg)", "def people_speed_interp():\n\n return f\"\"\"<enumeratedValueSet variable=\"Slow\"> <value value=\"%s\"/> </enumeratedValueSet>\n <enumeratedValueSet variable=\"Medium\"> <value value=\"%s\"/> </enumeratedValueSet>\n <enumeratedValueSet variable=\"Fast\"> <value value=\"%s\"/> </enumeratedValueSet>\n \"\"\"", "def tickStrings(self, values, scale, spacing):\n ret = []\n if not values:\n return []\n\n if spacing >= 31622400: # 366 days\n fmt = \"%Y\"\n\n elif spacing >= 2678400: # 31 days\n fmt = \"%Y %b\"\n\n elif spacing >= 86400: # = 1 day\n fmt = \"%b/%d\"\n\n elif spacing >= 3600: # 1 h\n fmt = \"%b/%d-%Hh\"\n\n elif spacing >= 60: # 1 m\n fmt = \"%H:%M\"\n\n elif spacing >= 1: # 1s\n fmt = \"%H:%M:%S\"\n\n else:\n # less than 2s (show microseconds)\n # fmt = '%S.%f\"'\n fmt = '[+%fms]' # explicitly relative to last second\n\n for x in values:\n try:\n t = datetime.fromtimestamp(x)\n ret.append(t.strftime(fmt))\n except ValueError: # Windows can't handle dates before 1970\n ret.append('')\n\n return ret", "def parser(string: str, token: str) -> List[float]:\n search_token = re.compile(r\"{token}: (.*?){unit}\".format(token=token,\n unit=UNIT))\n output = re.findall(search_token, string)\n if len(output) == 0:\n return []\n\n return [float(i) for i in output]", "def process_time_string(timestr):\n timestr = timestr.strip()\n toks = timestr.split('+')\n timeslices = []\n for t in toks:\n tm = t.strip()\n mobj = re.search('\\\\*', tm)\n if mobj == None:\n timeslices += [int(tm)]\n else:\n tms = tm.split('*')\n timeslices += int(tms[0]) * [int(tms[1])]\n\n return timeslices", "def string_to_numeric_list(alist):\n l = None\n try:\n l = [ float(i) for i in alist ]\n except ValueError:\n pass\n else:\n return l\n # try with ',' as a comma separator\n try:\n l = [ float(i.replace(',', '.')) for i in alist ]\n except ValueError:\n raise ValueError, \"Invalid literal for float\"\n else:\n return l", "def time_features_from_frequency_str(cls, freq_str: str) -> List[str]:\n\n features_by_offsets = {\n offsets.YearBegin: [],\n offsets.YearEnd: [],\n offsets.MonthBegin: [\n \"Month\",\n \"Quarter\",\n \"Is_quarter_end\",\n \"Is_quarter_start\",\n \"Is_year_end\",\n \"Is_year_start\",\n ],\n offsets.MonthEnd: [\n \"Month\",\n \"Quarter\",\n \"Is_quarter_end\",\n \"Is_quarter_start\",\n \"Is_year_end\",\n \"Is_year_start\",\n ],\n offsets.Week: [\n \"Month\",\n \"Quarter\",\n \"Is_quarter_end\",\n \"Is_quarter_start\",\n \"Is_year_end\",\n \"Is_year_start\",\n \"Is_month_start\",\n \"Week\",\n ],\n offsets.Day: [\n \"Month\",\n \"Quarter\",\n \"Is_quarter_end\",\n \"Is_quarter_start\",\n \"Is_year_end\",\n \"Is_year_start\",\n \"Is_month_start\",\n \"Week\" \"Day\",\n \"Dayofweek\",\n \"Dayofyear\",\n ],\n offsets.BusinessDay: [\n \"Month\",\n \"Quarter\",\n \"Is_quarter_end\",\n \"Is_quarter_start\",\n \"Is_year_end\",\n \"Is_year_start\",\n \"Is_month_start\",\n \"Week\" \"Day\",\n \"Dayofweek\",\n \"Dayofyear\",\n ],\n offsets.Hour: [\n \"Month\",\n \"Quarter\",\n \"Is_quarter_end\",\n \"Is_quarter_start\",\n \"Is_year_end\",\n \"Is_year_start\",\n \"Is_month_start\",\n \"Week\" \"Day\",\n \"Dayofweek\",\n \"Dayofyear\",\n \"Hour\",\n ],\n offsets.Minute: [\n \"Month\",\n \"Quarter\",\n \"Is_quarter_end\",\n \"Is_quarter_start\",\n \"Is_year_end\",\n \"Is_year_start\",\n \"Is_month_start\",\n \"Week\" \"Day\",\n \"Dayofweek\",\n \"Dayofyear\",\n \"Hour\",\n \"Minute\",\n ],\n }\n\n offset = to_offset(freq_str)\n\n for offset_type, feature in features_by_offsets.items():\n if isinstance(offset, offset_type):\n return feature\n\n supported_freq_msg = f\"\"\"\n Unsupported frequency {freq_str}\n\n The following frequencies are supported:\n\n Y, YS - yearly\n alias: A\n M, MS - monthly\n W - weekly\n D - daily\n B - business days\n H - hourly\n T - minutely\n alias: min\n \"\"\"\n raise RuntimeError(supported_freq_msg)", "def ranges_to_list(s):\n r = []\n for p in s.split(\",\"):\n p = p.strip()\n try:\n r += [int(p)]\n continue\n except:\n pass\n match = rx_range.match(p)\n if not match:\n raise SyntaxError\n f, t = [int(x) for x in match.groups()]\n if f >= t:\n raise SyntaxError\n for i in range(f, t + 1):\n r += [i]\n return sorted(r)", "def process_arg(x):\n if x.lower().startswith('set:'):\n return x[x.find(':')+1:].split(',')\n elif x.lower().startswith('range:'):\n sss = x[x.find(':')+1:].split(',')\n if len(sss) == 2:\n start, stop = map(float, sss[:2])\n step = 1\n if len(sss) == 3:\n start, stop, step = map(float, sss)\n return [str(intify(x)) for x in frange(start, stop, step)]\n else:\n return [x]", "def guess_series(input_string):\n float_finder = re.compile(\n r\"(nan|[-+]?inf|[-+]?[0-9]*\\.?[0-9]+(?:e[-+]?[0-9]+)?)\", re.I\n )\n return [\n i\n for i in [\n _convert_to_float(j)\n for j in float_finder.findall(input_string)\n # Remove entires we couldn't convert to a sensible value.\n ]\n if i is not None\n ]", "def get_input_from_string(str_input):\n\tfrom string import split\n\tres = []\n\tlist_input = split(str_input)\n\tfor i in xrange(len(list_input)):\n\t\tres.append(float(list_input[i]))\n\treturn res", "def get_point_list(self, string):\n a = re.findall('\\(\\d+\\.\\d+, \\d+\\.\\d+\\)', string)\n lst = []\n for tp in a:\n lst.append(self.get_tuple(tp))\n print lst", "def getListfromConfig(string, types='int'):\n if types == 'int':\n vals = [int(i) for i in string.split(',')]\n return vals", "def _intervals(parts, duration) -> list:\n part_duration = duration / parts\n return [(floor(i * part_duration), floor((i + 1) * part_duration)) for i in range(parts)]", "def interpolate(i0, d0, i1, d1):\n if i0 == i1:\n return [d0]\n values = []\n a = (d1 - d0) / (i1 - i0)\n d = d0\n for i in range(i0,i1+1):\n values.append(d)\n d = d + a\n return values", "def interpolate_replacements(text, expr_replacements):\n if not expr_replacements:\n return [text]\n\n replacement_strings = list(expr_replacements.keys())\n splitter = re.compile(\n \"({0})\".format(\"|\".join(re.escape(r) for r in replacement_strings))\n )\n split_text = [p for p in splitter.split(text) if p]\n return [expr_replacements.get(t, t) for t in split_text]", "def conf_load_par_linspace(par_def):\n s,e,l = par_def[:-1].split(':')\n try:\n s = float(s)\n e = float(e)\n l = str_to_positive_float(l)\n except ValueError, e:\n raise ValueError(\n \"Excpected float1:float2:positive_floatl for the linspace definition. {}\".format(e)\n )\n par_list = list(np.linspace(s,e,l))\n if len(par_list) == 0:\n raise ValueError(\"No parameter values generated.\")\n return par_list", "def get_twist_from_string(value_string):\n\n\tvalue_array = value_string.split(\",\")\n\n\tif not len(value_array) == 6:\n\t\traise ValueError(\"Incorrect number of elements in line\")\n\n\t# Might throw exception - intentional!\n\tvalue_array = [int(x) for x in value_array]\n\n\tnew_twist = Twist()\n\n\tnew_twist.linear.x = value_array[0]\n\tnew_twist.linear.y = value_array[1]\n\tnew_twist.linear.z = value_array[2]\n\tnew_twist.angular.x = value_array[3]\n\tnew_twist.angular.y = value_array[4]\n\tnew_twist.angular.z = value_array[5]\n\n\treturn new_twist", "def str_map(values, formatting):\n return [formatting % v for v in values]", "def get_lp(s):\n sl = [] \n for stock in s.symbols: \n #creates a list of latest stock prices\n quote = get(stock,\"LON\")\n #changes string to integer and removes ','\n x = (quote.replace(',',''))\n x = float(x)\n sl.append(x)\n return sl", "def parse_replacements(args):\n line = \" \".join(args.terms).strip()\n return [term.split(':') for term in line.split(',') if term]", "def getConversion(body):\n values = []\n msg_template_f = u\"* {:,.2f} Schmeckles → **${:,.2f} USD**\\n\" # with decimals\n msg_template_i = u\"* {:,.0f} Schmeckles → **${:,.0f} USD**\\n\" # without decimals\n msg_inf = u\"* There's a lot of numbers there, I think you could probably go to Wall Street.\\n\\n*You ever hear about Wall Street, Morty? Y-Y-Y'know what those guys do i-in-in their fancy boardrooms? They take their balls and they dip 'em in cocaine and wipe 'em all over each other—y'know.*\\n\"\n pairs = p.findall(body)\n if len(pairs) > 0:\n for match in pairs:\n # '<number> schmeckle' -> match, float(<number>)\n value_str = match.split()[0]\n\n # Handle numbers with over 9000 characters. Yes, it's over 9000.\n if (len(value_str)) > 9000:\n values.append(locale.atof('inf'))\n else:\n values.append(locale.atof(value_str))\n \n response = []\n for schmeckle in values:\n if isinf(schmeckle):\n response.append(msg_inf)\n else:\n usd = schmeckle2usd(schmeckle)\n if schmeckle.is_integer():\n response.append(msg_template_i.format(schmeckle, usd))\n else:\n response.append(msg_template_f.format(schmeckle, usd))\n \n return [response, values]", "def s2l(sents,i,f,freq):\n return [str(l) for _,_,l in sents[i]]" ]
[ "0.56173795", "0.5576247", "0.55391693", "0.53756726", "0.5346058", "0.50850666", "0.5063842", "0.5045507", "0.49662524", "0.49306107", "0.49246454", "0.49028924", "0.48939764", "0.48712966", "0.48580346", "0.4848678", "0.48370945", "0.48291197", "0.4823621", "0.48156068", "0.48134556", "0.4801245", "0.47936934", "0.47695273", "0.47394887", "0.47352543", "0.47345948", "0.47220463", "0.472064", "0.46982446" ]
0.74464566
0
Array comparison. Duplicate reference for pol if necessary, i.e., If cell.shape==reference.shape, this method compares cell and reference directly if cell.shape!=reference.shape (e.g., cell.shape=[npol, nchan] while reference.shape=[nchan]),
def _testCell(self, cell, reference, atol=1.e-5, rtol=1.e-5): cellarr = numpy.array(cell) refarr = numpy.array(reference) if cellarr.ndim != refarr.ndim: # pol loop for ipol in range(cellarr.shape[0]): testarr = cellarr[ipol] self._testCell(testarr, refarr) else: if self.verbose and refarr.size < 130: print("Reference = %s" % str(refarr)) print("Data = %s" % str(cellarr)) self.assertEqual(cellarr.shape,refarr.shape) self.assertTrue(self._compare_arrays(cellarr, refarr, rtol=rtol, atol=atol))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compare_arrays(self, data, reference, atol=1.e-5, rtol=1.e-5):\n if not (data.shape==reference.shape): return False\n ret=numpy.allclose(data,reference, atol=atol, rtol=rtol)\n return ret", "def test_reference_to_array(self):\n arr = numpy.arange(0.0, 10.0, 0.1)\n arr = numpy.reshape(arr, (25, 4))\n vtk_arr = array_handler.array2vtk(arr)\n arr1 = array_handler.vtk2array(vtk_arr)\n # Now make sure these are using the same memory.\n arr[0][0] = 100.0\n self.assertEqual(arr[0][0], arr1[0][0])\n self.assertEqual(arr.shape, arr1.shape)", "def __eq__(self, other: JaggedArray) -> bool:\n\n return np.array_equal(self.data, other.data) and np.array_equal(\n self.shape, other.shape\n )", "def test_compare(self): \n d1 = heat(\n np.array([[0.5, 1]]),\n np.array([[0.5, 1.1]])\n )\n d2 = heat(\n np.array([[0.5, 1]]),\n np.array([[0.5, 1.5]])\n )\n\n # These are very loose bounds\n assert d1 < d2", "def test_by_ref_non_contiguous(self):\n self.init()\n corners = self.ff64_2[::2,::2]\n assert not corners.flags['OWNDATA']\n set_to_zero_by_ref(corners)\n assert np.all(self.ff64_2 == np.array([[0,1,0],[3,4,5],[0,7,0]]))", "def CompareNPY(ref_npy, ref_icc, dec_npy, dec_icc, threshold=0.):\n ref = numpy.load(ref_npy)\n dec = numpy.load(dec_npy)\n if ref.shape != dec.shape:\n raise ConformanceTestError('Expected shape %s but found %s' %\n (ref.shape, dec.shape))\n # TODO(deymo): Implement this comparison.", "def test_equal14():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = np.array([[True, False, True], [True, False, True], [True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def _on_same_device(self, other: \"PArray\") -> bool:\n this_device = self._current_device_index\n return this_device in other._array", "def test_equal13():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = np.array([[True, False, True], [True, False, True], [True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def _match_dims(poly1, poly2, copy=None):\r\n if copy is None:\r\n copy = True\r\n\r\n if copy:\r\n p1 = deepcopy(poly1)\r\n p2 = deepcopy(poly2)\r\n else:\r\n p1 = poly1\r\n p2 = poly2\r\n\r\n dim1 = poly1.multi_index.spatial_dimension\r\n dim2 = poly2.multi_index.spatial_dimension\r\n if dim1 >= dim2:\r\n poly2.expand_dim(dim1)\r\n else:\r\n poly1.expand_dim(dim2)\r\n return poly1, poly2", "def sameGrid(A, B):\n if np.isscalar(A):\n # A is a scalar \n if not np.isscalar(B):\n return False\n else:\n return (A == B)\n else:\n # Assume two GriddedBasis objects \n if A.nd != B.nd or A.ng != B.ng:\n return False \n elif not np.all(A.gridpts == B.gridpts):\n return False \n else:\n return True", "def have_same_shapes(array1, array2):\n return array1.shape == array2.shape", "def __NDim_restriction_correct_ndarray_ndarray(self):\n\n strTestName = 'The number of dimensions in a Numpy array equals the number of dimensions in another Numpy array (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('m4RefParameter1', 'Numpy array reference parameter')\n RxCSObject.paramType('m4RefParameter1', np.ndarray)\n\n # Now, let us define a Numpy array parameter\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramNDimHE('parameter1', 'm4RefParameter1', add=1)\n\n RxCSObject.m4RefParameter1 = np.random.rand(4, 2, 9)\n RxCSObject.parameter1 = np.random.rand(2, 1, 9, 5)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def test_equal11():\n x = np.array([[True, False, True]])\n y = np.array([[[[[True, False, True], [True, False, True], [True, False, True]]]]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_by_ref_no_copy(self):\n self.init()\n # Test with something that really shouldn't be copied.\n ptr = get_memptr(self.ff64_1)\n assert ptr == get_memptr(self.ff64_1)\n # Test with things I really hope won't be copied. (C-contiguous vector [should be same], F-contiguous matrix)\n ptr1 = get_memptr(self.f64_1)\n ptr2 = get_memptr(self.ff64_2)\n assert ptr1 == get_memptr(self.f64_1)\n assert ptr2 == get_memptr(self.ff64_2)\n # Test with things that should be copied. (C-contiguous matrix)\n ptr = get_memptr(self.f64_2)\n assert ptr != get_memptr(self.f64_2)", "def test_equal15():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = x\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_numpy_ops(self):\n\n arr = np.array([1, 2, 3])\n c = Column('a', arr)\n eq = c == arr\n assert np.all(eq)\n assert len(eq) == 3\n assert type(eq) == Column\n assert eq.dtype.str == '|b1'\n eq = arr == c\n assert np.all(eq)\n\n lt = c - 1 < arr\n assert np.all(lt)", "def __eq__(self, other):\n return np.all(self.grid == other.grid) and np.all(self.pos == other.pos)", "def point_to_same_memory(a, b):\n return a.data == b.data", "def is_identical(self, other):\n return (self.compounddatatype == other.compounddatatype and\n self.min_row == other.min_row and\n self.max_row == other.max_row)", "def _shape_compare(shape1, shape2):\n if len(shape1) != len(shape2):\n return False\n for s1, s2 in zip(shape1, shape2):\n if s1 != s2:\n return False\n return True", "def __eq__(self,rkm):\n K1=np.vstack([self.A,self.b])\n K2=np.vstack([rkm.A,rkm.b])\n if K1.shape!=K2.shape:\n return False\n else:\n return (np.vstack([self.A,self.b])==np.vstack([rkm.A,rkm.b])).all()", "def _point_equal(a,b):\n return np.array_equal(a._Point__loc, b._Point__loc)", "def similar(self, other):\r\n if self.rows == other.rows and self.columns == other.columns:\r\n return True\r\n else:\r\n return False", "def __eq__(self, other):\n return other and self.cells == other.cells", "def _compare_arrays(self, old_arr, new_arr):\n inters = min(len(old_arr), len(new_arr)) # this is the smaller length\n\n result = {\n u\"+++\": {},\n u\"---\": {},\n }\n\n for idx in range(inters):\n res = self._compare_elements(old_arr[idx], new_arr[idx])\n if res is not None:\n result[idx] = res\n\n # the rest of the larger array\n if (inters == len(old_arr)):\n for idx in range(inters, len(new_arr)):\n result[idx] = {u'+++': new_arr[idx]}\n else:\n for idx in range(inters, len(old_arr)):\n result[idx] = {u'---': old_arr[idx]}\n\n # Clear out unused keys in result\n out_result = {}\n for key in result:\n if len(result[key]) > 0:\n out_result[key] = result[key]\n\n return self._filter_results(result)", "def array_equal_to(obj):\n return ArrayIsEqual(obj)", "def conflict_check() ->None:\r\n global conflict_space\r\n conflict_space = np.zeros(mShape)\r\n for x in range(shape):\r\n for y in range(shape):\r\n for z in range(y+1, shape):\r\n if example[x, y] == example[x, z]:\r\n conflict_space[x, y] = example[x, y]\r\n conflict_space[x, z] = example[x, z]\r\n if example[y, x] == example[z, x]:\r\n conflict_space[y, x] = example[y, x]\r\n conflict_space[z, x] = example[z, x]", "def _datacopied(arr, original):\n if arr is original:\n return False\n if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):\n return False\n return arr.base is None", "def __eq__(self, other):\n if self.get_dimensions() == other.get_dimensions():\n is_equal = (np.allclose(self.lon_arr, other.lon_arr) and\n np.allclose(self.lat_arr, other.lat_arr))\n else:\n is_equal = False\n return is_equal" ]
[ "0.65500224", "0.5869078", "0.5757813", "0.53636265", "0.5318079", "0.530859", "0.52684", "0.52240014", "0.5221366", "0.5194449", "0.51814866", "0.51687014", "0.5168295", "0.51641154", "0.515756", "0.51557255", "0.5147352", "0.5133151", "0.51277983", "0.5117408", "0.5087433", "0.50810087", "0.50730526", "0.5069566", "0.5047614", "0.5045582", "0.50446784", "0.50411415", "0.5037515", "0.50200033" ]
0.6374898
1
returns an array of 1./in_arr^2 This corresponds to WEIGHT_SPECTRUM by 1./Tsys^2 in case input is Tsys spectrum
def tsysweightsp_from_tsysarr(self, in_arr): return 1./(numpy.array(in_arr)**2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weight_from_meantsys(self, in_arr):\n return 1./(numpy.mean(in_arr)**2)", "def comp_output_spectra(self):\n assert(hasattr(self,'r'))\n \n self.nx=int(self.nx)\n \n r_mat=self.r.T.reshape(self.nx,self.nx,self.N)\n\n in_allfreqs = np.fft.fftshift(np.fft.fftfreq(self.nx,d=self.L/self.nx))\n \n self.freqs=in_allfreqs[self.nx/2:]\n \n r_dft_flat=np.fft.fftshift(np.fft.fft2(r_mat,axes=[0,1]),axes=[0,1])*(self.L/self.nx)**2\n\n r_pw=abs(r_dft_flat)**2 \n r_pw_profiles=gl.dft2d_profiles(r_pw)\n \n self.re_pw_profile=np.mean(r_pw_profiles,axis=0)\n self.he_pw_profile=self.inputs.in_mean_pw_profile", "def spectrum(self):\r\n f, spectrum = tsa.get_spectra(self.input.data, method=self.method)\r\n return spectrum", "def spectral(w, s=1.0):\n n_in, n_out = w.size()\n n = max(n_out, n_in)\n gain = s / math.sqrt(n)\n return w.normal_(0, 1).mul_(gain)", "def ms_reg(self):\n return np.squeeze(self._ms_reg)", "def _get_power_of_spectrum(U):\n \n _, n_samples, n_frames, n_frequencies = U.shape\n del _\n\n P = np.zeros([n_samples, n_samples, n_frequencies], dtype=np.complex64)\n P = np.sum(np.abs(U)**2, axis=2) / n_frames\n \n return P", "def specmod(self, dmbin, tbin, bgwindow=4):\n\n# smarr = n.zeros(len(self.dataph)) # uncomment to do specmod lightcurve\n# for int in range(len(self.dataph)-bgwindow):\n diff = self.tracksub(dmbin, tbin, bgwindow=bgwindow)\n bfspec = diff.mean(axis=0).real # should be ok for multipol data...\n sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )\n\n return sm", "def get_freq_array(bandwidth, n_chans):\n return numpy.arange(n_chans)*float(bandwidth)/n_chans", "def get_wavelengths(system, info=False):\n\n system_data = system.SystemData\n wavelengths = system_data.Wavelengths\n N_wavelengths = wavelengths.NumberOfWavelengths\n\n if info is True:\n print(\"\\nReading Wavelengths\")\n print(\"Total Number of Wavelengths: %d\" % N_wavelengths)\n\n wavelength_array = np.zeros(N_wavelengths)\n\n for k in np.arange(1, N_wavelengths + 1):\n _wave = wavelengths.GetWavelength(k).Wavelength\n wavelength_array[k - 1] = _wave\n\n if info is True:\n print(\"%.5f microns\" % _wave)\n\n return wavelength_array", "def specmod(self, dmbin, tbin, bgwindow=4):\n\n# smarr = n.zeros(len(self.dataph)) # uncomment to do specmod lightcurve\n# for int in range(len(self.dataph)-bgwindow):\n bfspec = self.dedisperse(dmbin)[tbin].mean(axis=0).real\n sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )\n\n return sm", "def _get_split_spectrum(T,WK):\n\n n_samples, n_frames, n_frequencies = T.shape\n U = np.zeros([n_samples, n_samples, n_frames, n_frequencies], dtype=np.complex64)\n \n for l in range(n_frequencies):\n for n in range(n_samples):\n _T = np.zeros([n_samples, n_frames], dtype=np.complex64)\n _T[n,:] = T[n,:,l]\n inv_WK = np.linalg.inv(WK[:,:,l])\n U[n,:,:,l] = np.dot(inv_WK, _T)\n \n return U", "def tsz_spectrum(self, nu):\n x = NU_SCALE * nu # Frequency/temperature\n #g_nu = ( x*(np.exp(x) + 1.) / (np.exp(x) - 1.) ) - 4. # tSZ spectral dependence\n g_nu = x**2. * np.exp(x) * (x/np.tanh(x/2.) - 4.) / (np.exp(x) - 1.)**2.\n return g_nu", "def return_power_spectrum(self, freq_signal, time_signal):\n\n\t\tfreq_signal **= 2 \n\t\tlen_fts = len(freq_signal)\n\t\tlen_signal = len(signal)\n\n\t\tif len_signal % 2:\n\t\t\tfreq_signal[1:len_fts] *= 2\n\n\t\telse:\n\t\t\tfreq_signal[1:len_fts-1] *= 2\n\n\t\treturn freq_signal", "def spectral():\n c = _si.c.value\n h = _si.h.value\n hc = h * c\n two_pi = 2.0 * np.pi\n inv_m_spec = si.m**-1\n inv_m_ang = si.radian / si.m\n\n return Equivalency(\n [\n (si.m, si.Hz, lambda x: c / x),\n (si.m, si.J, lambda x: hc / x),\n (si.Hz, si.J, lambda x: h * x, lambda x: x / h),\n (si.m, inv_m_spec, lambda x: 1.0 / x),\n (si.Hz, inv_m_spec, lambda x: x / c, lambda x: c * x),\n (si.J, inv_m_spec, lambda x: x / hc, lambda x: hc * x),\n (inv_m_spec, inv_m_ang, lambda x: x * two_pi, lambda x: x / two_pi),\n (si.m, inv_m_ang, lambda x: two_pi / x),\n (si.Hz, inv_m_ang, lambda x: two_pi * x / c, lambda x: c * x / two_pi),\n (si.J, inv_m_ang, lambda x: x * two_pi / hc, lambda x: hc * x / two_pi),\n ],\n \"spectral\",\n )", "def process(self, data):\n spectr = stft(data, n_fft=512, hop_length=160)\n return np.concatenate((spectr.real[:, :, np.newaxis], spectr.imag[:, :, np.newaxis]), axis=2)", "def normalise(self, spectrum):\n\n return spectrum", "def normalise(self, spectrum):\n\n return spectrum", "def fnutofwave(warr, farr):\n c= 2.99792458e18 #spped of light in Angstroms/s\n return farr*c/warr**2", "def _compute_bare_spectrum_constant(self):\n eigendata = []\n for subsys in self._hilbertspace:\n if subsys not in self.subsys_update_list:\n evals_count = subsys.truncated_dim\n eigendata.append(subsys.eigensys(evals_count=evals_count))\n else:\n eigendata.append(None)\n return eigendata", "def spectrum_multi_taper(self):\r\n if np.iscomplexobj(self.input.data):\r\n psd_len = self.input.shape[-1] \r\n dt = complex\r\n else:\r\n psd_len = self.input.shape[-1] / 2 + 1\r\n dt = float\r\n\r\n #Initialize the output\r\n spectrum_multi_taper = np.empty((self.input.shape[:-1] + (psd_len,)),\r\n dtype=dt)\r\n\r\n #If multi-channel data:\r\n if len(self.input.data.shape) > 1:\r\n for i in range(self.input.data.shape[0]):\r\n # 'f' are the center frequencies of the frequency bands\r\n # represented in the MT psd. These are identical in each\r\n # iteration of the loop, so they get reassigned into the same\r\n # variable in each iteration:\r\n f, spectrum_multi_taper[i], _ = tsa.multi_taper_psd(\r\n self.input.data[i],\r\n Fs=self.input.sampling_rate,\r\n BW=self.BW,\r\n adaptive=self.adaptive,\r\n low_bias=self.low_bias)\r\n else:\r\n f, spectrum_multi_taper, _ = tsa.multi_taper_psd(self.input.data,\r\n Fs=self.input.sampling_rate,\r\n BW=self.BW,\r\n adaptive=self.adaptive,\r\n low_bias=self.low_bias)\r\n\r\n return f, spectrum_multi_taper", "def spectrum(self):\r\n spectrum = tsa.cache_to_psd(self.cache, self.ij)\r\n\r\n return spectrum", "def sigmasp_from_weightsp(self, in_arr):\n return 1./numpy.sqrt(numpy.array(in_arr))", "def ss_reg(self):\n return np.squeeze(self._ss_reg)", "def _uniform_freqs(Nd, xp=np):\n if xp.isscalar(Nd):\n Nd = (Nd,)\n ndim = len(Nd)\n fs = [2 * np.pi * xp.arange(Nd[d]) / Nd[d] for d in range(ndim)]\n fs = xp.meshgrid(*fs, indexing=\"ij\")\n fs = [f.reshape((-1, 1), order=\"F\") for f in fs]\n return xp.hstack(fs)", "def spect(self):\n return 1", "def calculateenergy_timedomain(input_signal_or_spectrum):\n if isinstance(input_signal_or_spectrum, (sumpf.Spectrum)):\n ip = sumpf.modules.InverseFourierTransform(spectrum=input_signal_or_spectrum).GetSignal()\n else:\n ip = input_signal_or_spectrum\n energy_allchannels = []\n for c in ip.GetChannels():\n energy_singlechannel = []\n for s in c:\n energy_singlechannel.append(abs(s) ** 2)\n energy_allchannels.append(numpy.sum(energy_singlechannel))\n return energy_allchannels", "def get_spectrum(self):\n return self.spectrum.copy()", "def DFTpower2(time, signal, freqs):\n \n powerSpectrum = np.zeros(len(freqs))\n\n for i, freq in enumerate(freqs):\n arg = 2.0 * np.pi * freq * time\n powerSpectrum[i] = np.sum(signal * np.cos(arg))**2 + np.sum(signal * np.sin(arg))**2\n\n powerSpectrum = powerSpectrum * 4.0 / len(time)**2\n return(powerSpectrum)", "def get_clean_spectrum(time_arr, f_arr, sigma_arr, freq_arr):\n\n iteration = 10\n gain = 1.0\n\n window = get_window_function(time_arr, freq_arr)\n\n residual_arr = copy.deepcopy(f_arr)\n\n pspec, tau, aa, bb, cc = get_ls(time_arr, residual_arr,\n sigma_arr, freq_arr)\n\n aa_list = []\n bb_list = []\n cc_list = []\n tau_list = []\n omega_list = []\n\n for it in range(1, iteration+1):\n max_dex = np.argmax(pspec)\n freq_max = freq_arr[max_dex]\n tau_max = tau[max_dex]\n aa_max = aa[max_dex]*gain\n bb_max = bb[max_dex]*gain\n cc_max = cc[max_dex]*gain\n\n aa_list.append(aa_max)\n bb_list.append(bb_max)\n cc_list.append(cc_max)\n tau_list.append(tau_max)\n omega_list.append(freq_max)\n\n model = aa_max*np.cos(freq_max*(time_arr-tau_max))\n model += bb_max*np.sin(freq_max*(time_arr-tau_max))\n model += cc_max\n\n residual_arr -= model\n if it<iteration:\n pspec, tau, aa, bb, cc = get_ls(time_arr, residual_arr,\n sigma_arr, freq_arr)\n\n return (np.array(aa_list), np.array(bb_list),\n np.array(cc_list), np.array(omega_list),\n np.array(tau_list))", "def IFourierSeries(input):\n N=len(input);\n w=2*cmath.pi/N;\n k=numpy.arange(0,N); \n output = [complex(0)] * N \n for n in range(N): \n r=input*cexp(-1j*w*n*k);\n output[n]=np.mean(r);\n\n print output.__class__ \n return output;" ]
[ "0.62197316", "0.5731945", "0.561864", "0.55723643", "0.5519251", "0.54838157", "0.5463326", "0.5450829", "0.5421233", "0.5408736", "0.53334206", "0.5332715", "0.52982306", "0.528176", "0.5272986", "0.5269713", "0.5269713", "0.52356917", "0.52283984", "0.5218861", "0.5218008", "0.5206645", "0.5203087", "0.5202483", "0.51798964", "0.5173057", "0.51724714", "0.5129171", "0.5119544", "0.51080793" ]
0.65089893
0
returns 1./mean(in_arr)^2 This corresponds to WEIGHT by 1./Tsys^2 in case WEIGH_SPECTRUM does not exists.
def weight_from_meantsys(self, in_arr): return 1./(numpy.mean(in_arr)**2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tsysweightsp_from_tsysarr(self, in_arr):\n return 1./(numpy.array(in_arr)**2)", "def normalizing_constant(self):\n\t\tdim = self.train_data.shape[1]\n\t\treturn 1 / (2 * np.pi * ((self.bandwidth) ** 2)) ** (dim / 2)", "def wo_mean(arr):\n\n return np.array(arr) - np.mean(arr, axis=0)", "def sigmasp_from_weightsp(self, in_arr):\n return 1./numpy.sqrt(numpy.array(in_arr))", "def search_intensification(self):\n self.stage = 1\n return np.mean(self.MTM[:,:-1], axis=0, \n keepdims=True).T", "def fnutofwave(warr, farr):\n c= 2.99792458e18 #spped of light in Angstroms/s\n return farr*c/warr**2", "def har_mean(array):\n return ((sum([1/x for x in array]))**(-1))*len(array)", "def imgNormalize(img): \n constant = np.sum(sitk.GetArrayFromImage(img))*np.prod(img.GetSpacing())\n return img/constant", "def w_mean(self) -> Optional[np.ndarray]:\n\n def _retrieve(fm: VariationalFM) -> np.ndarray:\n return fm.w\n\n return runtime_error_to_optional(self, _retrieve)", "def modspec(self, elem):\r\n image = elem.reshape(self.stimshape)\r\n fourier = np.fft.rfft2(image)\r\n mid = int(fourier.shape[0]/2)\r\n power = np.abs(fourier)**2\r\n avgmag = np.array([(power[ii] + power[-ii])/2 for ii in range(mid)])\r\n return avgmag", "def specmod(self, tbin, bgwindow=4):\n\n diff = self.tracksub(tbin, bgwindow=bgwindow)\n bfspec = diff.mean(axis=0).real # should be ok for multipol data...\n sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )\n\n return sm", "def w(self) -> float:\n return self.A[0] if self.scalar_vector else self.A[3]", "def spm_wnorm(A):\n\n A = A + 1e-16\n\n norm = np.divide(1.0, np.sum(A, axis=0))\n\n avg = np.divide(1.0, A)\n\n wA = norm - avg\n\n return wA", "def w0_mean(self) -> Optional[float]:\n\n def _retrieve(fm: VariationalFM) -> float:\n return fm.w0\n\n return runtime_error_to_optional(self, _retrieve)", "def spectral_flatness(data, fft_data):\n spec = np.abs(fft_data)\n spec_mean = np.mean(spec)\n spec_gmean = scipy.stats.gmean(spec)\n if spec_mean == 0:\n return 1\n return spec_gmean/spec_mean", "def normalise(array,tot=1.0):\r\n tot1 = np.sum(np.abs(array)**2)\r\n if tot1 == 0.0 :\r\n print 'bg.normalise : warning sum array = 0'\r\n arrayout = np.copy(array)\r\n else :\r\n arrayout = array * np.sqrt(tot / tot1)\r\n return arrayout", "def _normalize(self, inp):\n \n return inp/inp.sum()", "def spectral(w, s=1.0):\n n_in, n_out = w.size()\n n = max(n_out, n_in)\n gain = s / math.sqrt(n)\n return w.normal_(0, 1).mul_(gain)", "def intrinsic_impedance(self,freq):\n if freq == 0:\n return cmath.sqrt(self.mu/self.eps)\n else:\n gamma = self.propagation_constant(freq)\n w = 2*np.pi*freq\n return 1j*w*self.mu/gamma", "def get_norm_factor(self, arr):\r\n\r\n\t\tif self.func == \"sigmoid\":\r\n\t\t\tnorm_factor = sigmoid(arr, *self.popt) \r\n\t\telif self.func == \"constant\":\r\n\t\t\tnorm_factor = arr*0 + self.popt\t#ensures that output is the same size as arr\r\n\t\treturn(norm_factor)", "def mean(self):\r\n\t\treturn sum(self.sample)/len(self.sample)", "def specmod(self, dmbin, tbin, bgwindow=4):\n\n# smarr = n.zeros(len(self.dataph)) # uncomment to do specmod lightcurve\n# for int in range(len(self.dataph)-bgwindow):\n bfspec = self.dedisperse(dmbin)[tbin].mean(axis=0).real\n sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )\n\n return sm", "def spect(self):\n return 1", "def mean(arr) -> float:\n return sum(arr) / len(arr)", "def specmod(self, dmbin, tbin, bgwindow=4):\n\n# smarr = n.zeros(len(self.dataph)) # uncomment to do specmod lightcurve\n# for int in range(len(self.dataph)-bgwindow):\n diff = self.tracksub(dmbin, tbin, bgwindow=bgwindow)\n bfspec = diff.mean(axis=0).real # should be ok for multipol data...\n sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )\n\n return sm", "def spectral_brightness(data, fft_data):\n spec = np.abs(fft_data)\n weight_vec = np.log(np.linspace(1, 100, len(fft_data)))\n weight_vec = np.pi*weight_vec/weight_vec[-1]\n weight = np.cos(weight_vec)/2 + 0.5\n low_spec_sum = sum(spec*weight)\n high_spec_sum = sum(spec*(1-weight))\n if low_spec_sum == 0:\n return 1 # attention: if signal is a sine at fs/2 this is also hit\n return high_spec_sum/low_spec_sum", "def _get_mean(self):\n mu = self._get_conditional_negative_energy()\n return sigmoid(mu)", "def confint(arr):\n res=[[],[],[]]\n #r=hpd(arr)\n r=(sap(arr,2.5),sap(arr,97.5))\n res[0]=r[0]\n res[1]=arr.mean(0)\n res[2]=r[1]\n return np.array(res)", "def __weights(self):\n r, c = np.mgrid[:self.size, :self.size] + 0.5\n rad = np.sqrt((r - self.size/2)**2 + (c - self.size/2)**2)\n img = np.zeros((self.size, self.size))\n rmin = np.sqrt(2) * 0.5 * self.damp * rad.max()\n rmax = np.sqrt(2) * 0.5 * rad.max()\n zone = np.logical_and(rad > rmin, rad < rmax)\n img[rad < rmin] = 1.0\n img[rad > rmax] = 0.0\n img[zone] = (rmax - rad[zone]) / (rmax - rmin)\n return img", "def weight_from_weightsp(self, in_arr, takeEvenMean=False):\n return self._median(numpy.array(in_arr), takeEvenMean)" ]
[ "0.62368584", "0.5754579", "0.57493305", "0.5643843", "0.56121606", "0.56033355", "0.55805826", "0.5527867", "0.55130255", "0.54666394", "0.54532003", "0.54035026", "0.5393957", "0.53300005", "0.5329847", "0.5320016", "0.53127503", "0.52882606", "0.52683395", "0.52547145", "0.5229139", "0.52182657", "0.52134454", "0.52110875", "0.51974684", "0.51883304", "0.5152285", "0.5150205", "0.51268065", "0.5114445" ]
0.7573409
0
Returns a median value of an array. if takeEvenMean, middle two values are average if the number of elements in in_array is even. if not sort in_array in ascending order and returns an (n1)/2th element.
def _median(self, in_arr, takeEvenMean): if takeEvenMean: return numpy.median(in_arr) else: return numpy.sort(in_arr, axis=None)[(in_arr.size-1)/2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def median(array):\n sorted = [x for x in array]\n sorted.sort()\n middle = len(sorted)/2 #Gets the middle element, if present\n if len(sorted) % 2 == 0: #Even, so need to average together the middle two values\n return float((sorted[middle]+sorted[middle-1]))/2\n else:\n return sorted[middle]", "def amedianscore (inarray,dimension=None):\r\n if dimension == None:\r\n inarray = N.ravel(inarray)\r\n dimension = 0\r\n inarray = N.sort(inarray,dimension)\r\n if inarray.shape[dimension] % 2 == 0: # if even number of elements\r\n indx = inarray.shape[dimension]/2 # integer division correct\r\n median = N.asarray(inarray[indx]+inarray[indx-1]) / 2.0\r\n else:\r\n indx = inarray.shape[dimension] / 2 # integer division correct\r\n median = N.take(inarray,[indx],dimension)\r\n if median.shape == (1,):\r\n median = median[0]\r\n return median", "def arrMedian(array, length):\r\n\tif length == 1:\r\n\t\tarray_median = list[0]\r\n\t\treturn array_median\r\n\telif length not in odd_numbers(length): \r\n\t\t# Floor division with two to access the second relevant number for the median, from which we can add the first one and divide to obtain the median. \r\n\t\tmedian_floor = length // 2\r\n\t\tarr_median = (array[median_floor-1] + array[median_floor])/2 \r\n\t\treturn arr_median\r\n\telif length in odd_numbers(length):\r\n\t\t# Setting up a loop for the program to recognize when it has reached the middle (median) number, and returning it. \r\n\t\tfor i in range(length):\r\n\t\t\tif i + 1 == length - i: \r\n\t\t\t\tmedian = array[i] \r\n\t\t\t\treturn median", "def median(arr):\n indices = []\n\n list_size = len(arr)\n median = 0\n if list_size % 2 == 0:\n indices.append(int(list_size / 2) - 1) # -1 because index starts from 0\n indices.append(int(list_size / 2))\n median = (arr[indices[0]] + arr[indices[1]]) / 2\n else:\n indices.append(int(list_size / 2))\n median = arr[indices[0]]\n\n return median, indices", "def median(a, axis=0, out=None, overwrite_input=False):\n if overwrite_input:\n if axis is None:\n sorted = a.ravel()\n sorted.sort()\n else:\n a.sort(axis=axis)\n sorted = a\n else:\n sorted = sort(a, axis=axis)\n if axis is None:\n axis = 0\n indexer = [slice(None)] * sorted.ndim\n index = int(sorted.shape[axis]/2)\n if sorted.shape[axis] % 2 == 1:\n # index with slice to allow mean (below) to work\n indexer[axis] = slice(index, index+1)\n else:\n indexer[axis] = slice(index-1, index+1)\n # Use mean in odd and even case to coerce data type\n # and check, use out array.\n return mean(sorted[indexer], axis=axis, out=out)", "def compute_median(a):\n if len(a) > 0:\n if len(a) % 2 == 0:\n return math.trunc(math.ceil((a[len(a) / 2 - 1] + a[len(a) / 2]) / 2.0))\n else:\n return a[len(a) / 2]\n else:\n return None", "def calc_median(numbers):\n middle_index = len(numbers) // 2\n return sorted(numbers[middle_index]) # sorted returns the numbers sorted without changing", "def median(v):\n n = len(v)\n sorted_v = sorted(v)\n midpoint = n // 2\n\n if n % 2 == 1:\n # if odd, return th emiddle value\n return sorted_v[midpoint]\n else:\n # if even, return the average of the middle values\n lo = midpoint -1\n hi = midpoint\n return (sorted_v[lo] + sorted_v[hi]) / 2", "def find_median(values):\n midpoint = int(len(values) / 2)\n if len(values) % 2 == 0:\n median = (values[midpoint - 1] + values[midpoint]) / 2\n else:\n median = values[midpoint]\n return median", "def _median_even(xs: List[float]) -> float:\n sorted_xs = sorted(xs)\n hi_midpoint = len(xs) // 2 # e.g. length 4 => hi_midpoint 2\n return (sorted_xs[hi_midpoint - 1] + sorted_xs[hi_midpoint]) / 2", "def median(nums):\r\n count = len(nums)\r\n mid = count // 2\r\n if count % 2:\r\n return nums[mid]\r\n else:\r\n return (nums[mid - 1] + nums[mid]) / 2", "def amedian (inarray,numbins=1000):\r\n inarray = N.ravel(inarray)\r\n (hist, smallest, binsize, extras) = ahistogram(inarray,numbins,[min(inarray),max(inarray)])\r\n cumhist = N.cumsum(hist) # make cumulative histogram\r\n otherbins = N.greater_equal(cumhist,len(inarray)/2.0)\r\n otherbins = list(otherbins) # list of 0/1s, 1s start at median bin\r\n cfbin = otherbins.index(1) # get 1st(!) index holding 50%ile score\r\n LRL = smallest + binsize*cfbin # get lower read limit of that bin\r\n cfbelow = N.add.reduce(hist[0:cfbin]) # cum. freq. below bin\r\n freq = hist[cfbin] # frequency IN the 50%ile bin\r\n median = LRL + ((len(inarray)/2.0-cfbelow)/float(freq))*binsize # MEDIAN\r\n return median", "def find_median(values):\n\n midpoint = int(len(values) / 2)\n if len(values) % 2 == 0:\n median = (values[midpoint - 1] + values[midpoint]) / 2\n else:\n median = values[midpoint]\n\n return median", "def find_median(input):\n return find_order(input, len(input)//2)", "def median(data):\n data = sorted(data)\n data_len = len(data)\n if data_len == 0:\n raise StatisticsError('no median for empty data')\n if data_len % 2 == 1:\n return data[data_len // 2]\n if data_len % 2 == 0:\n i = data_len // 2\n return (data[i - 1] + data[i]) / 2", "def get_median(numlist):\n return np.median(numlist)", "def median(nums):\n #sort in ascending order\n nums.sort()\n\n if len(nums) % 2 == 0: #if list has even length\n elem1 = nums[(len(nums) // 2) - 1]\n elem2 = nums[len(nums) // 2]\n return (elem1 + elem2) / 2\n else:\n return nums[len(nums) // 2]", "def median(values):\n # Write the median() function\n midpoint = int(len(values) / 2)\n if len(values) % 2 == 0:\n median = (values[midpoint - 1] + values[midpoint]) / 2\n else:\n median = values[midpoint]\n return median", "def median(data_sorted):\n length = len(data_sorted)\n\n if length % 2 == 1:\n return data_sorted[((length + 1) // 2) - 1]\n\n half = length // 2\n a = data_sorted[half - 1]\n b = data_sorted[half]\n\n return (a + b) / 2", "def median(median_numbers):\n sorted_numbers = sorted(median_numbers)\n length = len(sorted_numbers)\n\n if len(median_numbers) % 2: # uneven numbers of integers\n return sorted_numbers[length / 2]\n\n return (sorted_numbers[length / 2] + sorted_numbers[length / 2 - 1]) / 2.0", "def median(self):\r\n\t\t_sorted = sorted(self.sample)\r\n\t\t_len\t= len(self.sample)\r\n\t\treturn _sorted[(_len - 1)//2] if _len % 2 == 1 else (_sorted[(_len - 1)//2] + _sorted[(_len//2)])/2", "def quick_median(array):\n length = len(array)\n if length < 2:\n return array\n pivot_index = Quick.median_of_three(array, 0, length-1)\n pivot = array.pop(pivot_index)\n above = []\n below = []\n for item in array:\n if item > pivot:\n above.append(item)\n else:\n below.append(item)\n return Quick.quick_median(below)+[pivot]+Quick.quick_median(above)", "def get_median(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n from math import floor\n # Sort the data\n sorted_data = sorted(list(data))\n n = len(sorted_data)\n # get the middle index\n odd_middle_index = floor(n / 2)\n upper_even_index = floor(n / 2)\n lower_even_index = floor(n / 2) - 1\n # print(f\"\\nodd_middle = {odd_middle_index}\")\n # print(f\"upper_even_middle = {upper_even_index}\")\n # print(f\"lower_even_middle = {lower_even_index}\")\n if n % 2 == 1:\n return float(sorted_data[odd_middle_index])\n # If n is even, gets the average of the middle two values\n else:\n median_lower = sorted_data[lower_even_index]\n median_upper = sorted_data[upper_even_index]\n return_median = (median_lower + median_upper) / 2\n return float(return_median)", "def data_filter(input_array, step):\n mod = input_array.shape[0] % step\n rows = input_array.shape[0] // step\n factor = np.arange(rows)\n if mod:\n in_mat = np.reshape(input_array[:-mod], (rows, -1))\n min_array = np.r_[in_mat.min(axis=1), min(input_array[-mod:])]\n max_array = np.r_[in_mat.max(axis=1), max(input_array[-mod:])]\n median = np.median(in_mat, axis=1)\n median_rest = np.median(input_array[-mod:])\n median_array = np.r_[median, median_rest]\n\n # get min, max and average value indices\n min_ind = in_mat.argmin(axis=1)\n min_ind += factor * step\n min_ind = np.append(min_ind, input_array[-mod:].argmin() + rows * step)\n\n max_ind = in_mat.argmax(axis=1)\n max_ind += factor * step\n max_ind = np.append(max_ind, input_array[-mod:].argmax() + rows * step)\n\n median_trans = np.reshape(median, (rows, -1))\n median_ind = abs(in_mat - median_trans).argmin(axis=1)\n median_ind += factor * step\n median_ind = np.append(median_ind, abs(\n input_array[-mod:] - median_rest).argmin() + rows * step)\n\n else:\n in_mat = np.reshape(input_array, (input_array.shape[0] // step, -1))\n min_array = in_mat.min(axis=1)\n max_array = in_mat.max(axis=1)\n median_array = np.median(in_mat, axis=1)\n\n # get min, max and average value indices\n min_ind = in_mat.argmin(axis=1)\n min_ind += factor * step\n\n max_ind = in_mat.argmax(axis=1)\n max_ind += factor * step\n\n median_trans = np.reshape(median_array, (rows, -1))\n median_ind = abs(in_mat - median_trans).argmin(axis=1)\n median_ind += factor * step\n\n return min_array, median_array, max_array, min_ind, median_ind, max_ind", "def median(nums):\n\n sorted_nums = sorted(nums)\n num_count = len(nums)\n if num_count % 2 == 0:\n num1 = sorted_nums[num_count / 2 - 1]\n num2 = sorted_nums[num_count / 2]\n return (num1 + num2) / 2.0\n else:\n return sorted_nums[num_count / 2]", "def median(self):\n self.data.sort()\n\n if len(self.data) % 2 == 1:\n median = self.data[int(self.size/2)]\n else:\n median = (self.data[int(self.size/2 - 1)] + \n self.data[int(self.size/2)]) / 2\n return median", "def median(values):\n # Write the median() function\n midpoint = int(len(values) / 2)\n if len(values) % 2 == 0:\n median = (values[midpoint - 1] + values[midpoint]) / 2\n else:\n median = values[midpoint]\n\n return float(median)", "def median(v):\n n = len(v)\n sorted_v = sorted(v)\n midpoint = n // 2\n\n if n % 2 == 1:\n # if odd, return the middle value\n return sorted_v[midpoint]\n else:\n # if even, return the average opf the middle values\n lo = midpoint - 1\n hi = midpoint\n return (sorted_v[lo] + sorted_v[hi]) / 2\n \n median(num_friends)", "def _median_odd(xs: List[float]) -> float:\n return sorted(xs)[len(xs) // 2]", "def mad(arr):\n dev = np.array(arr, copy=True)\n med = np.median(dev, overwrite_input=True)\n return np.abs(arr - med)" ]
[ "0.8438568", "0.78152245", "0.7581761", "0.7321425", "0.72606057", "0.7259399", "0.71910405", "0.71609664", "0.7081089", "0.70748556", "0.70470303", "0.7038986", "0.701688", "0.6998141", "0.69936776", "0.69832367", "0.6974105", "0.6971682", "0.6944141", "0.69430286", "0.693717", "0.6918099", "0.6901589", "0.69009316", "0.68940735", "0.68918705", "0.68909293", "0.68363994", "0.6823579", "0.68227714" ]
0.8658704
0
Test wtmode='tsys', interp='nearest,nearest', dowtsp=True
def testTsysNNSp(self): self._runTest('tsys', True, self.tsys_funcs.keys(), 'nearest,nearest')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testTinttsysNNSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysLCSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysLLSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTsysMapNNSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTsysMapNN(self):\n self._runTest('tsys', False, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTinttsysMapNNSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def _runTest(self, wtmode, dowtsp, testspw, interpolation=\"\", spwmap=[],\n atol=1.e-5, rtol=1.e-5):\n had_wtsp = self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\")\n had_sigsp = self._column_exists(self.inputms, \"SIGMA_SPECTRUM\")\n initweights(vis=self.inputms,wtmode=wtmode,\n tsystable=self.tsystable,\n interp=interpolation,spwmap=spwmap, dowtsp=dowtsp)\n # Test existence of MS and columns\n if self.verbose: print(\"Test if MS exists.\")\n self._check_file(self.inputms)\n # WEIGHT_SPECTRUM should exist when dowtsp=True or it pre-exists in MS\n if (dowtsp or had_wtsp) and not wtmode == \"delwtsp\":\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM does not exist even though dowtsp=True\")\n else:\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM exists when it shouldn't\")\n # test if SIGMA_SPECTRUM column exists\n # The column should exist if\n # (a) dowtsp = True AND wtmode='tsys' or 'tinttsys', OR\n # (b) SIGMA_SPECTRUM pre-exists and wtmode='delwtsp'\n # otherwise, the column will be removed from MS if exists\n sigsp_should_exists = (dowtsp and wtmode.find('tsys') > -1) or \\\n (had_sigsp and wtmode=='delwtsp')\n if sigsp_should_exists:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM does not exist\")\n else:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM exists when it shouldn't\")\n # more tests\n \n # if running on MMS, the following checks do not work because of\n # the different sorting order between MS and MMS\n if not self.testmms:\n self._test_results(wtmode, dowtsp, testspw, interpolation, atol, rtol)", "def testTinttsysMapNN(self):\n self._runTest('tinttsys', False, [1,3,5,7,15], 'nearest,nearest',self.spwmap)", "def test_temporal_smoothing_how(perfectModelEnsemble_initialized_control_1d_ym_cftime):\r\n pm = perfectModelEnsemble_initialized_control_1d_ym_cftime\r\n pm_smoothed_mean = pm.smooth({\"lead\": 4}, how=\"mean\")\r\n pm_smoothed_sum = pm.smooth({\"lead\": 4}, how=\"sum\")\r\n assert (\r\n pm_smoothed_sum.get_initialized().mean()\r\n > pm_smoothed_mean.get_initialized().mean() * 2\r\n )", "def _get_interpolated_wtsp(self, *args, **kwargs):\n raise NotImplementedError", "def test_isentropic_pressure_tmp_out_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 297.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 291.4579 * units.kelvin\n assert_almost_equal(isentprs[1][1], truetmp, 3)", "def smoother(df, p, win=30, sd=3):\r\n df1 = df\r\n df1.loc[:, 'dp' + p] = df1.loc[:, p].diff()\r\n df1.loc[:, 'ma' + p] = df1.loc[:, 'dp' + p].rolling(window=win, center=True).mean()\r\n df1.loc[:, 'mst' + p] = df1.loc[:, 'dp' + p].rolling(window=win, center=True).std()\r\n for i in df.index:\r\n try:\r\n if abs(df1.loc[i, 'dp' + p] - df1.loc[i, 'ma' + p]) >= abs(df1.loc[i, 'mst' + p] * sd):\r\n df.loc[i, p] = np.nan\r\n else:\r\n df.loc[i, p] = df.loc[i, p]\r\n except ValueError:\r\n try:\r\n if abs(df1.loc[i, 'dp' + p] - df1.loc[i, 'ma' + p]) >= abs(df1.loc[:, 'dp' + p].std() * sd):\r\n df.loc[i, p] = np.nan\r\n else:\r\n df.loc[i, p] = df.loc[i, p]\r\n except ValueError:\r\n df.loc[i, p] = df.loc[i, p]\r\n\r\n try:\r\n df1 = df1.drop(['dp' + p, 'ma' + p, 'mst' + p], axis=1)\r\n except(NameError, ValueError):\r\n pass\r\n del df1\r\n try:\r\n df = df.drop(['dp' + p, 'ma' + p, 'mst' + p], axis=1)\r\n except(NameError, ValueError):\r\n pass\r\n df = df.interpolate(method='time', limit=30)\r\n df = df[1:-1]\r\n return df", "def perform_stft(x,w,q,n):\n\n #bound = getoptions(options, 'bound', 'per');\n #transform_type = getoptions(options, 'transform_type', 'fourier');\n #normalization = getoptions(options, 'normalization', 'tightframe');\n #window_type = getoptions(options, 'window_type', 'sin');\n #eta = getoptions(options, 'eta', 1);\n \n if np.ndim(x) == 1:\n dir = 1\n else:\n dir = -1\n \n # perform sampling\n X = np.arange(1,n+2,q)\n\n p = len(X)\n eta = 1\n \n if w%2 == 1:\n w = np.ceil((w-1)/2)*2+1\n w1 = (w-1)//2\n dX = np.arange(-w1,w1+1)\n else:\n dX = np.arange(-w//2+1,w//2+1)\n \n X1 = np.tile(X,(w,1)) + np.transpose(np.tile(dX, (p,1)))\n #periodic boundary conditions\n X1 = ((X1-1)%n)+1;\n \n I = X1 -1\n \n # build a sin weight function\n W = .5 *(1 - np.cos(2*np.pi*np.arange(0,w)/(w-1)))\n \n #renormalize the windows\n weight = np.zeros(n)\n \n for i in range(p):\n weight[I[:,i]] = weight[I[:,i]] + W**2\n \n weight = np.sqrt(weight)\n Weight = np.transpose(np.tile(W, (p,1)))\n \n for i in range(p):\n Weight[:,i] = Weight[:,i]/weight[I[:,i]]\n \n #compute the transform\n if dir == 1:\n y = np.zeros([eta*w,p])\n if w%2 == 1:\n m = (eta*w+1)//2\n w1 = (w-1)//2\n sel = np.arange(m-w1,m+w1+1) - 1\n else:\n m = (eta*w)//2+1 \n w1 = w//2\n sel = np.arange(m-w1,m+w1) - 1\n y[sel,:] = x[I]*Weight\n\n #perform the transform\n y = my_transform(y,+1)\n\n else:\n x = my_transform(x,-1)\n x = np.real(x*Weight)\n y = np.zeros(n)\n for i in range(p):\n y[I[:,i]] = y[I[:,i]] + x[:,i]\n\n return y", "def interpolate_timeseries(self, x, t, **kw):\n v, t_v = self.timeseries(x, rmnans=True)\n kw.update(dict(bounds_error=False))\n interpolant = sp.interpolate.interp1d(t_v, v, **kw)\n return interpolant(t)", "def interp_n2(t, x, y):\n\n return y[:, 0] + (t - x[0]) * (y[:, 1] - y[:, 0]) / (x[1] - x[0])", "def test_twodstats():\n if __name__ == '__main__':\n logger = piff.config.setup_logger(2)\n else:\n logger = None\n\n model = piff.Gaussian(fastfit=True)\n interp = piff.Polynomial(order=1) # should find that order=1 is better\n # create background model\n stars, true_model = generate_starlist(100)\n psf = piff.SimplePSF(model, interp)\n psf.fit(stars, None, None)\n stars = psf.stars # These have the right fit parameters\n\n # check the coeffs of sigma and g2, which are actually linear fits\n # skip g1 since it is actually a 2d parabola\n # factor of 0.263 is to account for going from pixel xy to wcs uv\n np.testing.assert_almost_equal(psf.interp.coeffs[0].flatten(),\n np.array([0.4, 0, 1. / (0.263 * 2048), 0]), decimal=4)\n np.testing.assert_almost_equal(psf.interp.coeffs[2].flatten(),\n np.array([-0.1 * 1000 / 2048, 0, 0.1 / (0.263 * 2048), 0]),\n decimal=4)\n\n stats = piff.TwoDHistStats(nbins_u=5, nbins_v=5) # implicitly np.median\n stats.compute(psf, stars, logger=logger)\n # check the twodhists\n # get the average value in the bin\n u_i = 3\n v_i = 3\n icen = stats.twodhists['u'][v_i, u_i] / 0.263\n jcen = stats.twodhists['v'][v_i, u_i] / 0.263\n print('icen = ',icen)\n print('jcen = ',jcen)\n icenter = 1000\n jcenter = 2000\n # the average value in the bin should match up with the model for the average coordinates\n sigma, g1, g2 = psf_model(icen, jcen, icenter, jcenter)\n gsq = g1**2 + g2**2\n T = 2*sigma**2 * (1+gsq)/(1-gsq)\n T_average = stats.twodhists['T'][v_i, u_i]\n g1_average = stats.twodhists['g1'][v_i, u_i]\n g2_average = stats.twodhists['g2'][v_i, u_i]\n # assert equal to 4th decimal\n print('T, g1, g2 = ',[T,g1,g2])\n print('av T, g1, g2 = ',[T_average,g1_average,g2_average])\n np.testing.assert_almost_equal([T, g1, g2], [T_average, g1_average, g2_average],\n decimal=2)\n\n # Test the plotting and writing\n twodstats_file = os.path.join('output','twodstats.pdf')\n stats.write(twodstats_file)\n\n with np.testing.assert_raises(ValueError):\n stats.write() # If not given in constructor, must give file name here.\n\n # repeat for whisker\n stats = piff.WhiskerStats(nbins_u=21, nbins_v=21, reducing_function='np.mean')\n stats.compute(psf, stars)\n # Test the plotting and writing\n whisker_file = os.path.join('output','whiskerstats.pdf')\n stats.write(whisker_file)\n with np.testing.assert_raises(ValueError):\n stats.write()\n\n # With large number of bins, many will have no objects. This is ok.\n # Also, can use other np functions like max, std, instead to get different stats\n # Not sure when these would be useful, but they are allowed.\n # And, check usage where file_name is given in init.\n twodstats_file2 = os.path.join('output','twodstats.pdf')\n stats2 = piff.TwoDHistStats(nbins_u=50, nbins_v=50, reducing_function='np.std',\n file_name=twodstats_file2)\n with np.testing.assert_raises(RuntimeError):\n stats2.write() # Cannot write before compute\n stats2.compute(psf, stars, logger=logger)\n stats2.write()\n\n whisker_file2 = os.path.join('output','whiskerstats.pdf')\n stats2 = piff.WhiskerStats(nbins_u=100, nbins_v=100, reducing_function='np.max',\n file_name=whisker_file2)\n with np.testing.assert_raises(RuntimeError):\n stats2.write() # Cannot write before compute\n stats2.compute(psf, stars)\n stats2.write()", "def temporal_smooth(s, sample_rate, tau, hwinlen=20):\n\n t = np.arange(-hwinlen, hwinlen+1) / sample_rate\n w = np.exp(-t**2 / tau)\n w /= w.sum()\n return convolve1d(s, w)", "def t_welch(x, y, tails=2, return_tpdf=False):\n assert tails in (1,2), \"invalid: tails must be 1 or 2, found %s\"%str(tails)\n x, y = np.asarray(x), np.asarray(y)\n nx, ny = x.size, y.size\n vx, vy = x.var(), y.var()\n df = int((vx/nx + vy/ny)**2 / # Welch-Satterthwaite equation\n ((vx/nx)**2 / (nx - 1) + (vy/ny)**2 / (ny - 1)))\n t_obs = (x.mean() - y.mean()) / np.sqrt(vx/nx + vy/ny)\n p_value = tails * st.t.sf(abs(t_obs), df)\n if return_tpdf:\n return dict(t=t_obs, p=p_value, df=df)\n return TtestResults(t_obs, p_value)", "def test2Samp():\n\n sigmax = 1.0\n sigmay = 3.0\n mux = 0.0\n muy = 3.0\n nx = 10\n ny = 10\n # Update\n np.random.RandomState(0) # set seed to 0\n datax = sigmax * np.random.randn(nx) + mux\n datay = sigmay * np.random.randn(ny) + muy\n datadict = {'x': datax, 'y': datay}\n ranksums(datadict, dataLabel='Test Rank Sums (scipy)')\n ranksums(datadict, dataLabel='Test Rank Sums, Paired (scipy)', paired=True)\n ttest(datadict, dataLabel='Standard t-test (scipy)', \n textline=True, decimals=3, units='mV')\n ttest(datadict, dataLabel='Standard t-test (scipy), paired', paired=True,\n textline=True, decimals=3)\n (p, n) = permTS(datadict, dataLabel='R permTS')\n permutation(datadict, dataLabel='Test simple permute')\n KS(datadict, dataLabel='Test with KS')", "def test_twostep_tapering(self):\n qubit_op = SparsePauliOp.from_list(\n [\n (\"II\", -1.0537076071291125),\n (\"IZ\", 0.393983679438514),\n (\"ZI\", -0.39398367943851387),\n (\"ZZ\", -0.01123658523318205),\n (\"XX\", 0.1812888082114961),\n ]\n )\n z2_symmetries = Z2Symmetries.find_z2_symmetries(qubit_op)\n converted_op_firststep = z2_symmetries.convert_clifford(qubit_op)\n tapered_op_secondstep = z2_symmetries.taper_clifford(converted_op_firststep)\n\n with self.subTest(\"Check first step: Clifford transformation\"):\n converted_op_expected = SparsePauliOp.from_list(\n [\n (\"II\", -1.0537076071291125),\n (\"ZX\", 0.393983679438514),\n (\"ZI\", -0.39398367943851387),\n (\"IX\", -0.01123658523318205),\n (\"XX\", 0.1812888082114961),\n ]\n )\n\n self.assertEqual(converted_op_expected, converted_op_firststep)\n\n with self.subTest(\"Check second step: Tapering\"):\n tapered_op = z2_symmetries.taper(qubit_op)\n self.assertEqual(tapered_op, tapered_op_secondstep)", "def test_isentropic_pressure_tmp_out():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 296. * units.kelvin\n assert_almost_equal(isentprs[1], truetmp, 3)", "def test_cdtw(self):\n np.random.seed(1)\n M = 100\n N = 150\n t1 = np.linspace(0, 1, M)\n X = np.zeros((M, 2), dtype=np.float32)\n X[:, 0] = np.cos(2*np.pi*t1)\n X[:, 1] = np.sin(8*np.pi*t1)\n ## Sample an element from a dictionary of parameterizations\n ## and use this parameterization to interpolate the original\n ## time series\n D = linmdtw.alignmenttools.get_parameterization_dict(N)\n s = linmdtw.alignmenttools.sample_parameterization_dict(D, 4)\n Y = linmdtw.alignmenttools.get_interpolated_euclidean_timeseries(X, s)\n\n cost10 = linmdtw.get_path_cost(X, Y, linmdtw.cdtw(X, Y, 10))\n cost10_T = linmdtw.get_path_cost(Y, X, linmdtw.cdtw(Y, X, 10))\n assert(cost10 == cost10_T)\n cost4 = linmdtw.get_path_cost(X, Y, linmdtw.cdtw(X, Y, 4))\n cost4_T = linmdtw.get_path_cost(Y, X, linmdtw.cdtw(Y, X, 4))\n assert(cost4 == cost4_T)\n assert(cost10 < cost4)\n assert(cost10_T < cost4_T)", "def time_windows(baz, arriv_p, arriv_s, init_sec, is_local):\n\n # TIME WINDOWS (for arrivals and subplots)\n # Window lengths dependent on event distance\n if is_local == 'non-local':\n min_pw = arriv_p\n max_pw = min_pw + (arriv_s - arriv_p) // 4\n min_sw = arriv_s - 0.001 * (arriv_s - arriv_p)\n max_sw = arriv_s + 150\n min_lwi = surf_tts(baz[0], init_sec) - 20\n t1 = (baz[0]/1000000) * 50\n # window length grows 50 sec per 1000 km.\n max_lwi = min_lwi + t1\n min_lwf = max_lwi\n t2 = (baz[0]/1000000) * 60\n # window length grows 60 sec per 1000 km.\n max_lwf = min_lwf + t2\n elif is_local == 'local':\n min_pw = arriv_p\n max_pw = min_pw + 20\n min_sw = arriv_s - 5\n max_sw = min_sw + 20\n min_lwi = surf_tts(baz[0], init_sec) + 20\n max_lwi = min_lwi + 50\n min_lwf = max_lwi\n max_lwf = min_lwf + 80\n else:\n min_pw = arriv_p\n max_pw = min_pw + 7\n min_sw = arriv_s\n max_sw = min_sw + 7\n min_lwi = surf_tts(baz[0], init_sec) + 5\n max_lwi = min_lwi + 12\n min_lwf = max_lwi\n max_lwf = min_lwf + 80\n\n return min_pw, max_pw, min_sw, max_sw, min_lwi, max_lwi, min_lwf, max_lwf", "def test_fit_temp_sta():\n time_raw = np.linspace(-0.5, 0, 30)\n time_fit = np.linspace(-0.5, 0, 30)\n params = {'amp1': 0.5, 'amp2': 0.9, 'tau1': -0.2, 'tau2': -0.1, 'n': 7}\n\n np.random.seed(0)\n sta_noise = (np.random.rand(30,)-0.5)/5\n test_model = fitt.two_cascades(params, time_raw)\n test_raw = test_model+sta_noise\n params_fit, test_fit = fitt.fit_temp_sta(test_raw, time_raw, time_fit)\n\n assert np.all(np.abs(test_fit - test_model) <= 0.05), \\\n 'Difference between model and fitting more than 5% by negative time'\n\n time_raw = np.linspace(0, 0.5, 30)\n time_fit = np.linspace(0, 0.5, 30)\n params = {'amp1': 0.5, 'amp2': 0.9, 'tau1': 0.2, 'tau2': 0.1, 'n': 7}\n\n np.random.seed(0)\n sta_noise = (np.random.rand(30,)-0.5)/5\n test_model = fitt.two_cascades(params, time_raw)\n test_raw = test_model+sta_noise\n params_fit, test_fit = fitt.fit_temp_sta(test_raw, time_raw, time_fit)\n\n assert np.all(np.abs(test_fit - test_model) <= 0.05), \\\n 'Difference between model and fitting more than 5% by positive time'", "def calc_tolerance(wt):\n return 1 - wt", "def test_2d_time_tran():\n dic,data = ng.pipe.read(\"common_data/2d_pipe/test_tp.ft2\")\n assert data.shape == (4096, 2048)\n assert data.dtype == 'float32'\n assert round(data[0,1],2) == -1525.10\n assert round(data[10,22],2) == 1731.94\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[253.90, -143.80])\n check_ppm_limits(dic,data,1,[174.84, 65.21])", "def test_isentropic_pressure_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 297] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk)\n trueprs = 936.213 * units.hPa\n assert_almost_equal(isentprs[0][1], trueprs, 3)", "def test_ssq_stft():\n th = 1e-1\n for N in (128, 129):\n x = np.random.randn(N)\n for n_fft in (120, 121):\n for window_scaling in (1., .5):\n if window_scaling == 1:\n window = None\n else:\n window = get_window(window, win_len=n_fft//1, n_fft=n_fft)\n window *= window_scaling\n\n Sx, *_ = ssq_stft(x, window=window, n_fft=n_fft)\n xr = issq_stft(Sx, window=window, n_fft=n_fft)\n\n txt = (\"\\nSSQ_STFT: (N, n_fft, window_scaling) = ({}, {}, {})\"\n ).format(N, n_fft, window_scaling)\n assert len(x) == len(xr), \"%s != %s %s\" % (N, len(xr), txt)\n mae = np.abs(x - xr).mean()\n assert mae < th, \"MAE = %.2e > %.2e %s\" % (mae, th, txt)", "def testMatchSwarpNearestExposure(self):\n self.compareToSwarp(\"nearest\", useWarpExposure=True, atol=60)", "def test_scipy_resample():\r\n # create a freq list with max freq < 16 Hz\r\n freq_list = np.random.randint(0, high=15, size=5)\r\n # make a test signal with sampling freq = 64 Hz\r\n a = [np.sin(2 * np.pi * f * np.linspace(0, 1, 64, endpoint=False))\r\n for f in freq_list]\r\n tst = np.array(a).sum(axis=0)\r\n # interpolate to 128 Hz sampling\r\n t_up = signaltools.resample(tst, 128)\r\n np.testing.assert_array_almost_equal(t_up[::2], tst)\r\n # downsample to 32 Hz\r\n t_dn = signaltools.resample(tst, 32)\r\n np.testing.assert_array_almost_equal(t_dn, tst[::2])\r\n\r\n # downsample to 48 Hz, and compute the sampling analytically for comparison\r\n dn_samp_ana = np.array([np.sin(2 * np.pi * f * np.linspace(0, 1, 48, endpoint=False))\r\n for f in freq_list]).sum(axis=0)\r\n t_dn2 = signaltools.resample(tst, 48)\r\n npt.assert_array_almost_equal(t_dn2, dn_samp_ana)" ]
[ "0.69159997", "0.6296606", "0.6176141", "0.6069002", "0.5954954", "0.5914424", "0.58652925", "0.57686806", "0.5644779", "0.5482096", "0.5315107", "0.5299039", "0.52878356", "0.5247621", "0.5207398", "0.5188795", "0.5156379", "0.5139908", "0.51383054", "0.5126579", "0.51224476", "0.50972027", "0.5091355", "0.50651294", "0.50627095", "0.50378835", "0.50265026", "0.50223917", "0.5016907", "0.5008402" ]
0.6856138
1
Test wtmode='tinttsys', interp='nearest,nearest', dowtsp=True
def testTinttsysNNSp(self): self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testTsysNNSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysLCSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysLLSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysMapNNSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTinttsysMapNN(self):\n self._runTest('tinttsys', False, [1,3,5,7,15], 'nearest,nearest',self.spwmap)", "def testTsysMapNN(self):\n self._runTest('tsys', False, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTsysMapNNSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def test_isentropic_pressure_tmp_out_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 297.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 291.4579 * units.kelvin\n assert_almost_equal(isentprs[1][1], truetmp, 3)", "def _runTest(self, wtmode, dowtsp, testspw, interpolation=\"\", spwmap=[],\n atol=1.e-5, rtol=1.e-5):\n had_wtsp = self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\")\n had_sigsp = self._column_exists(self.inputms, \"SIGMA_SPECTRUM\")\n initweights(vis=self.inputms,wtmode=wtmode,\n tsystable=self.tsystable,\n interp=interpolation,spwmap=spwmap, dowtsp=dowtsp)\n # Test existence of MS and columns\n if self.verbose: print(\"Test if MS exists.\")\n self._check_file(self.inputms)\n # WEIGHT_SPECTRUM should exist when dowtsp=True or it pre-exists in MS\n if (dowtsp or had_wtsp) and not wtmode == \"delwtsp\":\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM does not exist even though dowtsp=True\")\n else:\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM exists when it shouldn't\")\n # test if SIGMA_SPECTRUM column exists\n # The column should exist if\n # (a) dowtsp = True AND wtmode='tsys' or 'tinttsys', OR\n # (b) SIGMA_SPECTRUM pre-exists and wtmode='delwtsp'\n # otherwise, the column will be removed from MS if exists\n sigsp_should_exists = (dowtsp and wtmode.find('tsys') > -1) or \\\n (had_sigsp and wtmode=='delwtsp')\n if sigsp_should_exists:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM does not exist\")\n else:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM exists when it shouldn't\")\n # more tests\n \n # if running on MMS, the following checks do not work because of\n # the different sorting order between MS and MMS\n if not self.testmms:\n self._test_results(wtmode, dowtsp, testspw, interpolation, atol, rtol)", "def test_twostep_tapering(self):\n qubit_op = SparsePauliOp.from_list(\n [\n (\"II\", -1.0537076071291125),\n (\"IZ\", 0.393983679438514),\n (\"ZI\", -0.39398367943851387),\n (\"ZZ\", -0.01123658523318205),\n (\"XX\", 0.1812888082114961),\n ]\n )\n z2_symmetries = Z2Symmetries.find_z2_symmetries(qubit_op)\n converted_op_firststep = z2_symmetries.convert_clifford(qubit_op)\n tapered_op_secondstep = z2_symmetries.taper_clifford(converted_op_firststep)\n\n with self.subTest(\"Check first step: Clifford transformation\"):\n converted_op_expected = SparsePauliOp.from_list(\n [\n (\"II\", -1.0537076071291125),\n (\"ZX\", 0.393983679438514),\n (\"ZI\", -0.39398367943851387),\n (\"IX\", -0.01123658523318205),\n (\"XX\", 0.1812888082114961),\n ]\n )\n\n self.assertEqual(converted_op_expected, converted_op_firststep)\n\n with self.subTest(\"Check second step: Tapering\"):\n tapered_op = z2_symmetries.taper(qubit_op)\n self.assertEqual(tapered_op, tapered_op_secondstep)", "def test_isentropic_pressure_tmp_out():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 296. * units.kelvin\n assert_almost_equal(isentprs[1], truetmp, 3)", "def test_temporal_smoothing_how(perfectModelEnsemble_initialized_control_1d_ym_cftime):\r\n pm = perfectModelEnsemble_initialized_control_1d_ym_cftime\r\n pm_smoothed_mean = pm.smooth({\"lead\": 4}, how=\"mean\")\r\n pm_smoothed_sum = pm.smooth({\"lead\": 4}, how=\"sum\")\r\n assert (\r\n pm_smoothed_sum.get_initialized().mean()\r\n > pm_smoothed_mean.get_initialized().mean() * 2\r\n )", "def test_isentropic_pressure_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 297] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk)\n trueprs = 936.213 * units.hPa\n assert_almost_equal(isentprs[0][1], trueprs, 3)", "def _get_interpolated_wtsp(self, *args, **kwargs):\n raise NotImplementedError", "def testMatchSwarpNearestExposure(self):\n self.compareToSwarp(\"nearest\", useWarpExposure=True, atol=60)", "def calc_tolerance(wt):\n return 1 - wt", "def interp_n2(t, x, y):\n\n return y[:, 0] + (t - x[0]) * (y[:, 1] - y[:, 0]) / (x[1] - x[0])", "def smoother(df, p, win=30, sd=3):\r\n df1 = df\r\n df1.loc[:, 'dp' + p] = df1.loc[:, p].diff()\r\n df1.loc[:, 'ma' + p] = df1.loc[:, 'dp' + p].rolling(window=win, center=True).mean()\r\n df1.loc[:, 'mst' + p] = df1.loc[:, 'dp' + p].rolling(window=win, center=True).std()\r\n for i in df.index:\r\n try:\r\n if abs(df1.loc[i, 'dp' + p] - df1.loc[i, 'ma' + p]) >= abs(df1.loc[i, 'mst' + p] * sd):\r\n df.loc[i, p] = np.nan\r\n else:\r\n df.loc[i, p] = df.loc[i, p]\r\n except ValueError:\r\n try:\r\n if abs(df1.loc[i, 'dp' + p] - df1.loc[i, 'ma' + p]) >= abs(df1.loc[:, 'dp' + p].std() * sd):\r\n df.loc[i, p] = np.nan\r\n else:\r\n df.loc[i, p] = df.loc[i, p]\r\n except ValueError:\r\n df.loc[i, p] = df.loc[i, p]\r\n\r\n try:\r\n df1 = df1.drop(['dp' + p, 'ma' + p, 'mst' + p], axis=1)\r\n except(NameError, ValueError):\r\n pass\r\n del df1\r\n try:\r\n df = df.drop(['dp' + p, 'ma' + p, 'mst' + p], axis=1)\r\n except(NameError, ValueError):\r\n pass\r\n df = df.interpolate(method='time', limit=30)\r\n df = df[1:-1]\r\n return df", "def interpolate_timeseries(self, x, t, **kw):\n v, t_v = self.timeseries(x, rmnans=True)\n kw.update(dict(bounds_error=False))\n interpolant = sp.interpolate.interp1d(t_v, v, **kw)\n return interpolant(t)", "def test_twodstats():\n if __name__ == '__main__':\n logger = piff.config.setup_logger(2)\n else:\n logger = None\n\n model = piff.Gaussian(fastfit=True)\n interp = piff.Polynomial(order=1) # should find that order=1 is better\n # create background model\n stars, true_model = generate_starlist(100)\n psf = piff.SimplePSF(model, interp)\n psf.fit(stars, None, None)\n stars = psf.stars # These have the right fit parameters\n\n # check the coeffs of sigma and g2, which are actually linear fits\n # skip g1 since it is actually a 2d parabola\n # factor of 0.263 is to account for going from pixel xy to wcs uv\n np.testing.assert_almost_equal(psf.interp.coeffs[0].flatten(),\n np.array([0.4, 0, 1. / (0.263 * 2048), 0]), decimal=4)\n np.testing.assert_almost_equal(psf.interp.coeffs[2].flatten(),\n np.array([-0.1 * 1000 / 2048, 0, 0.1 / (0.263 * 2048), 0]),\n decimal=4)\n\n stats = piff.TwoDHistStats(nbins_u=5, nbins_v=5) # implicitly np.median\n stats.compute(psf, stars, logger=logger)\n # check the twodhists\n # get the average value in the bin\n u_i = 3\n v_i = 3\n icen = stats.twodhists['u'][v_i, u_i] / 0.263\n jcen = stats.twodhists['v'][v_i, u_i] / 0.263\n print('icen = ',icen)\n print('jcen = ',jcen)\n icenter = 1000\n jcenter = 2000\n # the average value in the bin should match up with the model for the average coordinates\n sigma, g1, g2 = psf_model(icen, jcen, icenter, jcenter)\n gsq = g1**2 + g2**2\n T = 2*sigma**2 * (1+gsq)/(1-gsq)\n T_average = stats.twodhists['T'][v_i, u_i]\n g1_average = stats.twodhists['g1'][v_i, u_i]\n g2_average = stats.twodhists['g2'][v_i, u_i]\n # assert equal to 4th decimal\n print('T, g1, g2 = ',[T,g1,g2])\n print('av T, g1, g2 = ',[T_average,g1_average,g2_average])\n np.testing.assert_almost_equal([T, g1, g2], [T_average, g1_average, g2_average],\n decimal=2)\n\n # Test the plotting and writing\n twodstats_file = os.path.join('output','twodstats.pdf')\n stats.write(twodstats_file)\n\n with np.testing.assert_raises(ValueError):\n stats.write() # If not given in constructor, must give file name here.\n\n # repeat for whisker\n stats = piff.WhiskerStats(nbins_u=21, nbins_v=21, reducing_function='np.mean')\n stats.compute(psf, stars)\n # Test the plotting and writing\n whisker_file = os.path.join('output','whiskerstats.pdf')\n stats.write(whisker_file)\n with np.testing.assert_raises(ValueError):\n stats.write()\n\n # With large number of bins, many will have no objects. This is ok.\n # Also, can use other np functions like max, std, instead to get different stats\n # Not sure when these would be useful, but they are allowed.\n # And, check usage where file_name is given in init.\n twodstats_file2 = os.path.join('output','twodstats.pdf')\n stats2 = piff.TwoDHistStats(nbins_u=50, nbins_v=50, reducing_function='np.std',\n file_name=twodstats_file2)\n with np.testing.assert_raises(RuntimeError):\n stats2.write() # Cannot write before compute\n stats2.compute(psf, stars, logger=logger)\n stats2.write()\n\n whisker_file2 = os.path.join('output','whiskerstats.pdf')\n stats2 = piff.WhiskerStats(nbins_u=100, nbins_v=100, reducing_function='np.max',\n file_name=whisker_file2)\n with np.testing.assert_raises(RuntimeError):\n stats2.write() # Cannot write before compute\n stats2.compute(psf, stars)\n stats2.write()", "def perform_stft(x,w,q,n):\n\n #bound = getoptions(options, 'bound', 'per');\n #transform_type = getoptions(options, 'transform_type', 'fourier');\n #normalization = getoptions(options, 'normalization', 'tightframe');\n #window_type = getoptions(options, 'window_type', 'sin');\n #eta = getoptions(options, 'eta', 1);\n \n if np.ndim(x) == 1:\n dir = 1\n else:\n dir = -1\n \n # perform sampling\n X = np.arange(1,n+2,q)\n\n p = len(X)\n eta = 1\n \n if w%2 == 1:\n w = np.ceil((w-1)/2)*2+1\n w1 = (w-1)//2\n dX = np.arange(-w1,w1+1)\n else:\n dX = np.arange(-w//2+1,w//2+1)\n \n X1 = np.tile(X,(w,1)) + np.transpose(np.tile(dX, (p,1)))\n #periodic boundary conditions\n X1 = ((X1-1)%n)+1;\n \n I = X1 -1\n \n # build a sin weight function\n W = .5 *(1 - np.cos(2*np.pi*np.arange(0,w)/(w-1)))\n \n #renormalize the windows\n weight = np.zeros(n)\n \n for i in range(p):\n weight[I[:,i]] = weight[I[:,i]] + W**2\n \n weight = np.sqrt(weight)\n Weight = np.transpose(np.tile(W, (p,1)))\n \n for i in range(p):\n Weight[:,i] = Weight[:,i]/weight[I[:,i]]\n \n #compute the transform\n if dir == 1:\n y = np.zeros([eta*w,p])\n if w%2 == 1:\n m = (eta*w+1)//2\n w1 = (w-1)//2\n sel = np.arange(m-w1,m+w1+1) - 1\n else:\n m = (eta*w)//2+1 \n w1 = w//2\n sel = np.arange(m-w1,m+w1) - 1\n y[sel,:] = x[I]*Weight\n\n #perform the transform\n y = my_transform(y,+1)\n\n else:\n x = my_transform(x,-1)\n x = np.real(x*Weight)\n y = np.zeros(n)\n for i in range(p):\n y[I[:,i]] = y[I[:,i]] + x[:,i]\n\n return y", "def test_isentropic_pressure_additional_args():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n rh = np.ones((4, 5, 5))\n rh[0, :] = 100.\n rh[1, :] = 80.\n rh[2, :] = 40.\n rh[3, :] = 20.\n relh = rh * units.percent\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, relh)\n truerh = 100. * units.percent\n assert_almost_equal(isentprs[1], truerh, 3)", "def test_isentropic_pressure():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290\n tmp[3, :] = 288.\n tmp[:, :, -1] = np.nan\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk)\n trueprs = np.ones((1, 5, 5)) * (1000. * units.hPa)\n trueprs[:, :, -1] = np.nan\n assert isentprs[0].shape == (1, 5, 5)\n assert_almost_equal(isentprs[0], trueprs, 3)", "def test_isentropic_pressure_addition_args_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n rh = np.ones((4, 5, 5))\n rh[0, :] = 100.\n rh[1, :] = 80.\n rh[2, :] = 40.\n rh[3, :] = 20.\n relh = rh * units.percent\n tmpk = tmp * units.kelvin\n isentlev = [296., 297.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, relh)\n truerh = 69.197 * units.percent\n assert_almost_equal(isentprs[1][1], truerh, 3)", "def test_isentropic_interpolation_dataarray():\n temp = xr.DataArray([[[296.]], [[292.]], [[290.]], [[288.]]] * units.K,\n dims=('isobaric', 'y', 'x'),\n coords={'isobaric': (('isobaric',), [1000., 950., 900., 850.],\n {'units': 'hPa'}),\n 'time': '2020-01-01T00:00Z'})\n\n rh = xr.DataArray([[[100.]], [[80.]], [[40.]], [[20.]]] * units.percent,\n dims=('isobaric', 'y', 'x'), coords={\n 'isobaric': (('isobaric',), [1000., 950., 900., 850.], {'units': 'hPa'}),\n 'time': '2020-01-01T00:00Z'})\n\n isentlev = [296., 297.] * units.kelvin\n press, rh_interp = isentropic_interpolation(isentlev, temp.isobaric, temp, rh)\n\n assert_array_almost_equal(press, np.array([[[1000.]], [[936.213]]]) * units.hPa, 3)\n assert_array_almost_equal(rh_interp, np.array([[[100.]], [[69.19706]]]) * units.percent, 3)", "def test_cdtw(self):\n np.random.seed(1)\n M = 100\n N = 150\n t1 = np.linspace(0, 1, M)\n X = np.zeros((M, 2), dtype=np.float32)\n X[:, 0] = np.cos(2*np.pi*t1)\n X[:, 1] = np.sin(8*np.pi*t1)\n ## Sample an element from a dictionary of parameterizations\n ## and use this parameterization to interpolate the original\n ## time series\n D = linmdtw.alignmenttools.get_parameterization_dict(N)\n s = linmdtw.alignmenttools.sample_parameterization_dict(D, 4)\n Y = linmdtw.alignmenttools.get_interpolated_euclidean_timeseries(X, s)\n\n cost10 = linmdtw.get_path_cost(X, Y, linmdtw.cdtw(X, Y, 10))\n cost10_T = linmdtw.get_path_cost(Y, X, linmdtw.cdtw(Y, X, 10))\n assert(cost10 == cost10_T)\n cost4 = linmdtw.get_path_cost(X, Y, linmdtw.cdtw(X, Y, 4))\n cost4_T = linmdtw.get_path_cost(Y, X, linmdtw.cdtw(Y, X, 4))\n assert(cost4 == cost4_T)\n assert(cost10 < cost4)\n assert(cost10_T < cost4_T)", "def temporal_smooth(s, sample_rate, tau, hwinlen=20):\n\n t = np.arange(-hwinlen, hwinlen+1) / sample_rate\n w = np.exp(-t**2 / tau)\n w /= w.sum()\n return convolve1d(s, w)", "def t_welch(x, y, tails=2, return_tpdf=False):\n assert tails in (1,2), \"invalid: tails must be 1 or 2, found %s\"%str(tails)\n x, y = np.asarray(x), np.asarray(y)\n nx, ny = x.size, y.size\n vx, vy = x.var(), y.var()\n df = int((vx/nx + vy/ny)**2 / # Welch-Satterthwaite equation\n ((vx/nx)**2 / (nx - 1) + (vy/ny)**2 / (ny - 1)))\n t_obs = (x.mean() - y.mean()) / np.sqrt(vx/nx + vy/ny)\n p_value = tails * st.t.sf(abs(t_obs), df)\n if return_tpdf:\n return dict(t=t_obs, p=p_value, df=df)\n return TtestResults(t_obs, p_value)", "def test_fit_temp_sta():\n time_raw = np.linspace(-0.5, 0, 30)\n time_fit = np.linspace(-0.5, 0, 30)\n params = {'amp1': 0.5, 'amp2': 0.9, 'tau1': -0.2, 'tau2': -0.1, 'n': 7}\n\n np.random.seed(0)\n sta_noise = (np.random.rand(30,)-0.5)/5\n test_model = fitt.two_cascades(params, time_raw)\n test_raw = test_model+sta_noise\n params_fit, test_fit = fitt.fit_temp_sta(test_raw, time_raw, time_fit)\n\n assert np.all(np.abs(test_fit - test_model) <= 0.05), \\\n 'Difference between model and fitting more than 5% by negative time'\n\n time_raw = np.linspace(0, 0.5, 30)\n time_fit = np.linspace(0, 0.5, 30)\n params = {'amp1': 0.5, 'amp2': 0.9, 'tau1': 0.2, 'tau2': 0.1, 'n': 7}\n\n np.random.seed(0)\n sta_noise = (np.random.rand(30,)-0.5)/5\n test_model = fitt.two_cascades(params, time_raw)\n test_raw = test_model+sta_noise\n params_fit, test_fit = fitt.fit_temp_sta(test_raw, time_raw, time_fit)\n\n assert np.all(np.abs(test_fit - test_model) <= 0.05), \\\n 'Difference between model and fitting more than 5% by positive time'", "def test_2d_time_tran():\n dic,data = ng.pipe.read(\"common_data/2d_pipe/test_tp.ft2\")\n assert data.shape == (4096, 2048)\n assert data.dtype == 'float32'\n assert round(data[0,1],2) == -1525.10\n assert round(data[10,22],2) == 1731.94\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[253.90, -143.80])\n check_ppm_limits(dic,data,1,[174.84, 65.21])" ]
[ "0.64893264", "0.6471385", "0.63866085", "0.6262443", "0.6248298", "0.57996994", "0.5792964", "0.57056886", "0.56932837", "0.55488276", "0.5546918", "0.5420293", "0.5406579", "0.5366721", "0.5271492", "0.52703965", "0.5220601", "0.5171412", "0.5167025", "0.51180923", "0.5101126", "0.5097682", "0.50948006", "0.50911194", "0.5072846", "0.50443697", "0.5041333", "0.5034596", "0.5019858", "0.50097346" ]
0.7156723
0
Test wtmode='tinttsys', interp='linear,cspline', dowtsp=True
def testTinttsysLCSp(self): self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testTinttsysNNSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysLLSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysMapLCSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTsysNNSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysMapNNSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTinttsysMapLC(self):\n self._runTest('tinttsys', False, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTsysLCSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'linear,cspline')", "def _get_interpolated_wtsp(self, *args, **kwargs):\n raise NotImplementedError", "def testTinttsysMapLLSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTinttsysMapNN(self):\n self._runTest('tinttsys', False, [1,3,5,7,15], 'nearest,nearest',self.spwmap)", "def testTsysLLSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'linear,linear')", "def _runTest(self, wtmode, dowtsp, testspw, interpolation=\"\", spwmap=[],\n atol=1.e-5, rtol=1.e-5):\n had_wtsp = self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\")\n had_sigsp = self._column_exists(self.inputms, \"SIGMA_SPECTRUM\")\n initweights(vis=self.inputms,wtmode=wtmode,\n tsystable=self.tsystable,\n interp=interpolation,spwmap=spwmap, dowtsp=dowtsp)\n # Test existence of MS and columns\n if self.verbose: print(\"Test if MS exists.\")\n self._check_file(self.inputms)\n # WEIGHT_SPECTRUM should exist when dowtsp=True or it pre-exists in MS\n if (dowtsp or had_wtsp) and not wtmode == \"delwtsp\":\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM does not exist even though dowtsp=True\")\n else:\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM exists when it shouldn't\")\n # test if SIGMA_SPECTRUM column exists\n # The column should exist if\n # (a) dowtsp = True AND wtmode='tsys' or 'tinttsys', OR\n # (b) SIGMA_SPECTRUM pre-exists and wtmode='delwtsp'\n # otherwise, the column will be removed from MS if exists\n sigsp_should_exists = (dowtsp and wtmode.find('tsys') > -1) or \\\n (had_sigsp and wtmode=='delwtsp')\n if sigsp_should_exists:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM does not exist\")\n else:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM exists when it shouldn't\")\n # more tests\n \n # if running on MMS, the following checks do not work because of\n # the different sorting order between MS and MMS\n if not self.testmms:\n self._test_results(wtmode, dowtsp, testspw, interpolation, atol, rtol)", "def test_temporal_smoothing_how(perfectModelEnsemble_initialized_control_1d_ym_cftime):\r\n pm = perfectModelEnsemble_initialized_control_1d_ym_cftime\r\n pm_smoothed_mean = pm.smooth({\"lead\": 4}, how=\"mean\")\r\n pm_smoothed_sum = pm.smooth({\"lead\": 4}, how=\"sum\")\r\n assert (\r\n pm_smoothed_sum.get_initialized().mean()\r\n > pm_smoothed_mean.get_initialized().mean() * 2\r\n )", "def testTsysMapLCSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTsysMapNNSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def set_tlines(ty,slist):\r\n t = []\r\n for i in range(numpops-1):\r\n t.append([slist[5][4][i][1],slist[5][4][i][2],slist[5][4][i][3]]) ## [time, upper ci, lower ci]\r\n ty = []\r\n if gv[\"localyscale\"] == -1:\r\n yint = gv[\"line0y\"] - gv[\"lastt_lower_y\"]\r\n for i in range(numpops-1):\r\n ty.append([])\r\n if gv[\"eventimes\"] == False:\r\n tmax = slist[5][4][numpops-2][3] ## bottom of confidence interval of largest(oldest) t\r\n for j in range(3):\r\n ty[i].append(gv[\"line0y\"] - (t[i][j]*yint)/tmax)\r\n else:\r\n## ty[i].append(gv[\"line0y\"] - ((i+1)/float(numpops+1)*yint)/tmax)\r\n ty[i].append(gv[\"line0y\"] - yint * (i+1)/float(numpops) )\r\n else:\r\n timeumean = slist[7][4][1]\r\n scaleumean = slist[7][4][2]\r\n for i in range(numpops-1):\r\n ty.append([])\r\n for j in range(3):\r\n ty[i].append(gv[\"line0y\"] - (t[i][j] * (scaleumean/timeumean/1e6)* gv[\"localyscale\"]))\r\n if ty[i][j] < gv[\"lineINFy\"]:\r\n print ( \" time line too low in graph, reduce local y scale (-y value) \")\r\n gv[\"lastt_lower_y\"] = ty[numpops-2][2]\r\n## print \"ty : \",ty\r\n return ty", "def testTsysMapLC(self):\n self._runTest('tsys', False, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def spliner(x, y, k=3, sig=5, s=None, fev=100, w=None, clip='both', \\\n verbose=False, plotfit=False, plotall=False, diag=False):\n # 2010-07-05 13:51 IJC: Adapted from polyfitr\n from numpy import polyfit, polyval, isfinite, ones\n from scipy import interpolate\n from pylab import plot, legend, title\n\n xx = array(x, copy=True)\n yy = array(y, copy=True)\n noweights = (w==None)\n if noweights:\n ww = ones(xx.shape, float)\n else:\n ww = array(w, copy=True)\n\n #ww = 1./err**2\n\n ii = 0\n nrej = 1\n\n goodind = isfinite(xx)*isfinite(yy)*isfinite(ww)\n \n #xx = xx[goodind]\n #yy = yy[goodind]\n #ww = ww[goodind]\n\n while (ii<fev and (nrej<>0)):\n spline = interpolate.UnivariateSpline(xx[goodind],yy[goodind],w=ww[goodind],s=s,k=k)\n residual = yy[goodind] - spline(xx[goodind])\n stdResidual = std(residual)\n #if verbose: print stdResidual\n if clip=='both':\n ind = abs(residual) <= (sig*stdResidual) \n elif clip=='above':\n ind = residual < sig*stdResidual\n elif clip=='below':\n ind = residual > -sig*stdResidual\n else:\n ind = ones(residual.shape, bool)\n\n goodind *= ind\n #xx = xx[ind]\n #yy = yy[ind]\n #ww = ww[ind]\n\n ii += 1\n nrej = len(residual) - len(xx)\n if plotall:\n plot(x,y, '.', xx[goodind],yy[goodind], 'x', x, spline(x), '--')\n legend(['data', 'fit data', 'fit'])\n title('Iter. #' + str(ii) + ' -- Close all windows to continue....')\n\n if verbose:\n print str(len(x)-len(xx[goodind])) + ' points rejected on iteration #' + str(ii)\n\n if (plotfit or plotall):\n plot(x,y, '.', xx[goodind],yy[goodind], 'x', x, spline(x), '--')\n legend(['data', 'fit data', 'fit'])\n title('Close window to continue....')\n\n if diag:\n chisq = ( (residual)**2 / yy )[goodind].sum()\n spline = (spline, chisq, ii, goodind)\n\n return spline", "def perform_stft(x,w,q,n):\n\n #bound = getoptions(options, 'bound', 'per');\n #transform_type = getoptions(options, 'transform_type', 'fourier');\n #normalization = getoptions(options, 'normalization', 'tightframe');\n #window_type = getoptions(options, 'window_type', 'sin');\n #eta = getoptions(options, 'eta', 1);\n \n if np.ndim(x) == 1:\n dir = 1\n else:\n dir = -1\n \n # perform sampling\n X = np.arange(1,n+2,q)\n\n p = len(X)\n eta = 1\n \n if w%2 == 1:\n w = np.ceil((w-1)/2)*2+1\n w1 = (w-1)//2\n dX = np.arange(-w1,w1+1)\n else:\n dX = np.arange(-w//2+1,w//2+1)\n \n X1 = np.tile(X,(w,1)) + np.transpose(np.tile(dX, (p,1)))\n #periodic boundary conditions\n X1 = ((X1-1)%n)+1;\n \n I = X1 -1\n \n # build a sin weight function\n W = .5 *(1 - np.cos(2*np.pi*np.arange(0,w)/(w-1)))\n \n #renormalize the windows\n weight = np.zeros(n)\n \n for i in range(p):\n weight[I[:,i]] = weight[I[:,i]] + W**2\n \n weight = np.sqrt(weight)\n Weight = np.transpose(np.tile(W, (p,1)))\n \n for i in range(p):\n Weight[:,i] = Weight[:,i]/weight[I[:,i]]\n \n #compute the transform\n if dir == 1:\n y = np.zeros([eta*w,p])\n if w%2 == 1:\n m = (eta*w+1)//2\n w1 = (w-1)//2\n sel = np.arange(m-w1,m+w1+1) - 1\n else:\n m = (eta*w)//2+1 \n w1 = w//2\n sel = np.arange(m-w1,m+w1) - 1\n y[sel,:] = x[I]*Weight\n\n #perform the transform\n y = my_transform(y,+1)\n\n else:\n x = my_transform(x,-1)\n x = np.real(x*Weight)\n y = np.zeros(n)\n for i in range(p):\n y[I[:,i]] = y[I[:,i]] + x[:,i]\n\n return y", "def Bootstrap_TS(x, y):\n\n ycrv_swap = ycrv_construct(x,y)\n splf = get_spot_rates(ycrv_swap, Thirty360(), TARGET(), 1+len(y)*12)\n\n return splf", "def kinetics_eo_smooth(data):\n new_data = {}\n # Generate new_data\n for kw in data:\n if kw.startswith('t') == False:\n sigma = int(len(data[kw]) / 15) + 1\n new_data[kw] = gaussian_filter1d(data[kw], sigma)\n# new_data[kw] = uniform_filter1d(data[kw], sigma) \n else:\n new_data[kw] = data[kw]\n \n # plot new_data\n fig, ax1 = plt.subplots(dpi=300)\n \n color = 'black'\n ax1.set_xlabel('$t$ [s]')\n ax1.set_ylabel('$\\\\alpha$', color=color)\n ax1.plot(new_data['t0'], new_data['alpha'], color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n \n color = wowcolor(2)\n ax2 = ax1.twinx()\n ax2.set_ylabel('$E$ [$\\mu$m$^2$/s$^2$]', color=color)\n ax2.plot(new_data['t2'], new_data['E'], color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n\n \n color = wowcolor(8)\n ax3 = ax1.twinx()\n ax3.set_ylabel('$OP$', color=color)\n ax3.plot(new_data['t2'], new_data['OP'], color=color)\n ax3.tick_params(axis='y', labelcolor=color)\n ax3.spines[\"right\"].set_position((\"axes\", 1.2))\n \n ax = [ax1, ax2, ax3]\n \n return new_data, fig, ax", "def testTinttsysMapLL(self):\n self._runTest('tinttsys', False, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def setSplineMode(order=3,npts=200):\n dislin.splmod(order,npts)", "def tablebroad(w, s, xip, yip):\n \"\"\"\n History\n -------\n 22-May-92 JAV\n Switched instrumental profile from multiple gaussians\n to gaussian with power-law wings.\n 04-Aug-92 JAV\n Renamed from ipsmo.pro# changed f/ procedure to function.\n Switched f/ 10 to 15 Hamilton pixels in each wing.\n 20-Oct-92 JAV\n Switched from gpfunc to ipfun (4 to 5 par).\n 23-Aug-94 JAV\n Switched to explicitly passed IPs.\n Oct-18 AW\n Python version\n \"\"\"\n\n # Define sizes\n dsdh = np.abs(np.min(np.diff(xip)))\n nip = 2 * int(15 / dsdh) + 1 ## profile points\n\n # Generate instrumental profile on model pixel scale.\n x = (\n np.arange(nip, dtype=float) - (nip - 1) / 2\n ) * dsdh # offset in Hamilton pixels\n ip = interp1d(xip, yip, kind=\"cubic\")(x)\n # ip = bezier_interp(xip, yip, x) # spline onto new scale\n ip = ip[::-1] # reverse for convolution\n ip = ip / np.sum(ip) # ensure unit area\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, ip, mode=\"nearest\")\n\n return sout # return convolved spectrum", "def test_cdtw(self):\n np.random.seed(1)\n M = 100\n N = 150\n t1 = np.linspace(0, 1, M)\n X = np.zeros((M, 2), dtype=np.float32)\n X[:, 0] = np.cos(2*np.pi*t1)\n X[:, 1] = np.sin(8*np.pi*t1)\n ## Sample an element from a dictionary of parameterizations\n ## and use this parameterization to interpolate the original\n ## time series\n D = linmdtw.alignmenttools.get_parameterization_dict(N)\n s = linmdtw.alignmenttools.sample_parameterization_dict(D, 4)\n Y = linmdtw.alignmenttools.get_interpolated_euclidean_timeseries(X, s)\n\n cost10 = linmdtw.get_path_cost(X, Y, linmdtw.cdtw(X, Y, 10))\n cost10_T = linmdtw.get_path_cost(Y, X, linmdtw.cdtw(Y, X, 10))\n assert(cost10 == cost10_T)\n cost4 = linmdtw.get_path_cost(X, Y, linmdtw.cdtw(X, Y, 4))\n cost4_T = linmdtw.get_path_cost(Y, X, linmdtw.cdtw(Y, X, 4))\n assert(cost4 == cost4_T)\n assert(cost10 < cost4)\n assert(cost10_T < cost4_T)", "def fit_spline(x, y, **kwargs):\n xf, yf = get_finite(x,y)\n iisort = np.argsort(xf)\n return interpolate.UnivariateSpline(xf[iisort],yf[iisort], **kwargs)", "def smooth(self):\n \n self.te = self._spline(self.rho_in, self.te_in, self.rho)\n self.ne = self._spline(self.rho_in, self.ne_in, self.rho)\n self.ti = self._spline(self.rho_in, self.ti_in, self.rho)\n self.vt = self._spline(self.rho_in, self.vt_in, self.rho)\n for i in range(self.nion):\n self.ni[i,:]=self._spline(self.rho_in, self.ni_in[i,:], self.rho)\n\n #self.zeff = self._spline(self.rho_in, self.zeff_in, self.rho)\n\n self._extrapolate()", "def smooth_series(y,p = 6.25):\n cycle, trend = sm.tsa.filters.hpfilter(y, p)\n return trend", "def test_synth_simple():\n \n twd = tempfile.mkdtemp(dir=os.getcwd()+\"/tmp\")\n print(twd)\n \n wmin, wmax, dwl = 6700, 6720, 0.01\n ll = turbopy.TSLineList(os.path.join(data_path, \"vald-6700-6720.list\"))\n atmo = turbopy.MARCSModel.load(os.path.join(data_path, \"sun.mod\"))\n atmo.Teff = 5777\n atmo.logg = 4.44\n atmo.MH = 0.0\n atmo.AM = 0.0\n wave, norm, flux = turbopy.run_synth(wmin, wmax, dwl,\n atmosphere=atmo, vt=1.0,\n linelist=ll, twd=twd)\n \n wave2, norm2, flux2 = turbopy.run_synth(wmin, wmax, dwl,\n [12.0, 0.4], [6.0, 1.0], [8.0, 1.0],\n atmosphere=atmo, vt=1.0,\n linelist=ll, twd=twd)\n \"\"\"\n import matplotlib.pyplot as plt\n fig = plt.figure()\n plt.plot(wave, norm, 'k-', lw=3)\n plt.plot(wave, norm2, 'r-', lw=1)\n fig.savefig(\"test3.pdf\")\n plt.close(fig)\n \"\"\"", "def testTsysMapNN(self):\n self._runTest('tsys', False, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)" ]
[ "0.6915214", "0.6593633", "0.6186842", "0.6147206", "0.6009761", "0.5953264", "0.5819722", "0.56637067", "0.56173426", "0.560701", "0.558406", "0.54812276", "0.5471691", "0.5468226", "0.5444616", "0.5434842", "0.5378455", "0.5334122", "0.528345", "0.51883113", "0.5174286", "0.5144214", "0.5139434", "0.51369673", "0.5125318", "0.51021445", "0.5084083", "0.5082134", "0.5075914", "0.50741017" ]
0.6803435
1
Test spwmap wtmode='tinttsys', interp='nearest,nearest'
def testTinttsysMapNN(self): self._runTest('tinttsys', False, [1,3,5,7,15], 'nearest,nearest',self.spwmap)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testTinttsysMapNNSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTinttsysNNSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTsysMapNNSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTsysMapNN(self):\n self._runTest('tsys', False, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTinttsysLLSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysLCSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysMapLLSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTinttsysMapLCSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTinttsysMapLC(self):\n self._runTest('tinttsys', False, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTsysNNSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysMapLL(self):\n self._runTest('tinttsys', False, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def dmsp_map_interpolate_NN_smooth(X_dmsp, Y_dmsp, X_map, Y_map, Obs_map, k = 5, tol = 3):\n #reshape to N by 2 array where each row is (X, Y)\n dmsp_points = np.hstack((X_dmsp.flatten().reshape(-1,1),Y_dmsp.flatten().reshape(-1,1)))\n map_points = np.hstack((X_map.flatten().reshape(-1,1), Y_map.flatten().reshape(-1,1)))\n N_points = dmsp_points.shape[0]\n obs_val = Obs_map.flatten()\n model = sklearn.neighbors.BallTree(map_points,leaf_size = 40 )\n dists, inds = model.query(dmsp_points, k=k) \n\n obs_interp = np.empty(N_points)\n for i in range(N_points):\n norm = LA.norm(dists[i])\n if (norm > tol):\n obs_interp[i] = np.nan\n else:\n# weights = dists[i]/norm\n\n weights = dists[i]/np.nansum(dists[i])\n obs_interp[i] = np.nansum( obs_val[inds[i]] * weights )\n\n return obs_interp", "def testMatchSwarpNearestExposure(self):\n self.compareToSwarp(\"nearest\", useWarpExposure=True, atol=60)", "def testTransformBasedWarp(self):\n for interpLength in (0, 1, 2, 4):\n kernelName = \"lanczos3\"\n rtol = 4e-5\n atol = 1e-2\n warpingControl = afwMath.WarpingControl(\n warpingKernelName=kernelName,\n interpLength=interpLength,\n )\n\n originalExposure = afwImage.ExposureF(originalExposurePath)\n originalMetadata = afwImage.DecoratedImageF(originalExposurePath).getMetadata()\n originalSkyWcs = afwGeom.makeSkyWcs(originalMetadata)\n\n swarpedImageName = f\"medswarp1{kernelName}.fits\"\n swarpedImagePath = os.path.join(dataDir, swarpedImageName)\n swarpedDecoratedImage = afwImage.DecoratedImageF(swarpedImagePath)\n swarpedImage = swarpedDecoratedImage.getImage()\n\n swarpedMetadata = swarpedDecoratedImage.getMetadata()\n warpedSkyWcs = afwGeom.makeSkyWcs(swarpedMetadata)\n\n # original image is source, warped image is destination\n srcToDest = afwGeom.makeWcsPairTransform(originalSkyWcs, warpedSkyWcs)\n\n afwWarpedMaskedImage = afwImage.MaskedImageF(swarpedImage.getDimensions())\n originalMaskedImage = originalExposure.getMaskedImage()\n\n numGoodPix = afwMath.warpImage(afwWarpedMaskedImage, originalMaskedImage,\n srcToDest, warpingControl)\n self.assertGreater(numGoodPix, 50)\n\n afwWarpedImage = afwWarpedMaskedImage.getImage()\n afwWarpedImageArr = afwWarpedImage.getArray()\n noDataMaskArr = np.isnan(afwWarpedImageArr)\n self.assertImagesAlmostEqual(afwWarpedImage, swarpedImage,\n skipMask=noDataMaskArr, rtol=rtol, atol=atol)", "def _runTest(self, wtmode, dowtsp, testspw, interpolation=\"\", spwmap=[],\n atol=1.e-5, rtol=1.e-5):\n had_wtsp = self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\")\n had_sigsp = self._column_exists(self.inputms, \"SIGMA_SPECTRUM\")\n initweights(vis=self.inputms,wtmode=wtmode,\n tsystable=self.tsystable,\n interp=interpolation,spwmap=spwmap, dowtsp=dowtsp)\n # Test existence of MS and columns\n if self.verbose: print(\"Test if MS exists.\")\n self._check_file(self.inputms)\n # WEIGHT_SPECTRUM should exist when dowtsp=True or it pre-exists in MS\n if (dowtsp or had_wtsp) and not wtmode == \"delwtsp\":\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM does not exist even though dowtsp=True\")\n else:\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM exists when it shouldn't\")\n # test if SIGMA_SPECTRUM column exists\n # The column should exist if\n # (a) dowtsp = True AND wtmode='tsys' or 'tinttsys', OR\n # (b) SIGMA_SPECTRUM pre-exists and wtmode='delwtsp'\n # otherwise, the column will be removed from MS if exists\n sigsp_should_exists = (dowtsp and wtmode.find('tsys') > -1) or \\\n (had_sigsp and wtmode=='delwtsp')\n if sigsp_should_exists:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM does not exist\")\n else:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM exists when it shouldn't\")\n # more tests\n \n # if running on MMS, the following checks do not work because of\n # the different sorting order between MS and MMS\n if not self.testmms:\n self._test_results(wtmode, dowtsp, testspw, interpolation, atol, rtol)", "def tablebroad(w, s, xip, yip):\n \"\"\"\n History\n -------\n 22-May-92 JAV\n Switched instrumental profile from multiple gaussians\n to gaussian with power-law wings.\n 04-Aug-92 JAV\n Renamed from ipsmo.pro# changed f/ procedure to function.\n Switched f/ 10 to 15 Hamilton pixels in each wing.\n 20-Oct-92 JAV\n Switched from gpfunc to ipfun (4 to 5 par).\n 23-Aug-94 JAV\n Switched to explicitly passed IPs.\n Oct-18 AW\n Python version\n \"\"\"\n\n # Define sizes\n dsdh = np.abs(np.min(np.diff(xip)))\n nip = 2 * int(15 / dsdh) + 1 ## profile points\n\n # Generate instrumental profile on model pixel scale.\n x = (\n np.arange(nip, dtype=float) - (nip - 1) / 2\n ) * dsdh # offset in Hamilton pixels\n ip = interp1d(xip, yip, kind=\"cubic\")(x)\n # ip = bezier_interp(xip, yip, x) # spline onto new scale\n ip = ip[::-1] # reverse for convolution\n ip = ip / np.sum(ip) # ensure unit area\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, ip, mode=\"nearest\")\n\n return sout # return convolved spectrum", "def dmsp_map_interpolate(X_dmsp, Y_dmsp, X_map, Y_map, tolerance = 0.5):\n\n #indices of the map that fit the dmsp map\n indices = scipy.interpolate.griddata((X_map,Y_map), np.arange(len(X_map.flatten())), (X_dmsp,Y_dmsp), method = 'nearest')\n\n #get mask for map elements that are within distance tolerance \n mask = (abs(X_map[indices] - X_dmsp) < tolerance) & (abs(Y_map[indices] - Y_dmsp) < tolerance)\n\n return indices,mask", "def testTsysMapLLSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testMatchSwarpBilinearImage(self):\n self.compareToSwarp(\"bilinear\", useWarpExposure=False, atol=0.15)", "def test_isentropic_pressure_tmp_out_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 297.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 291.4579 * units.kelvin\n assert_almost_equal(isentprs[1][1], truetmp, 3)", "def _get_interpolated_wtsp(self, *args, **kwargs):\n raise NotImplementedError", "def test_interpolate_to_grid_pandas():\n df = pd.DataFrame({\n 'lat': [38, 39, 31, 30, 41, 35],\n 'lon': [-106, -105, -86, -96, -74, -70],\n 'tmp': [-10, -16, 13, 16, 0, 3.5]\n }, index=[1, 2, 3, 4, 5, 6])\n interpolate_to_grid(\n df['lon'], df['lat'], df['tmp'],\n interp_type='natural_neighbor', hres=0.6)", "def testTsysMapLC(self):\n self._runTest('tsys', False, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testStatisticsRamp(self):\n\n \n nx = 101\n ny = 64\n img = afwImage.ImageF(afwGeom.Extent2I(nx, ny))\n \n z0 = 10.0\n dzdx = 1.0\n mean = z0 + (nx/2)*dzdx\n stdev = 0.0\n for y in range(ny):\n for x in range(nx):\n z = z0 + dzdx*x\n img.set(x, y, z)\n stdev += (z - mean)*(z - mean)\n\n stdev = math.sqrt(stdev/(nx*ny - 1))\n \n stats = afwMath.makeStatistics(img, afwMath.NPOINT | afwMath.STDEV | afwMath.MEAN)\n testmean = stats.getValue(afwMath.MEAN)\n teststdev = stats.getValue(afwMath.STDEV)\n \n self.assertEqual(stats.getValue(afwMath.NPOINT), nx*ny)\n self.assertEqual(testmean, mean)\n self.assertEqual(teststdev, stdev )\n \n stats = afwMath.makeStatistics(img, afwMath.STDEV | afwMath.MEAN | afwMath.ERRORS)\n mean, meanErr = stats.getResult(afwMath.MEAN)\n sd = stats.getValue(afwMath.STDEV)\n \n self.assertEqual(mean, img.get(nx/2, ny/2))\n self.assertEqual(meanErr, sd/math.sqrt(img.getWidth()*img.getHeight()))\n \n # ===============================================================================\n # sjb code for percentiles and clipped stats\n \n stats = afwMath.makeStatistics(img, afwMath.MEDIAN)\n self.assertEqual(z0 + dzdx*(nx - 1)/2.0, stats.getValue(afwMath.MEDIAN))\n \n stats = afwMath.makeStatistics(img, afwMath.IQRANGE)\n self.assertEqual(dzdx*(nx - 1)/2.0, stats.getValue(afwMath.IQRANGE))\n \n stats = afwMath.makeStatistics(img, afwMath.MEANCLIP)\n self.assertEqual(z0 + dzdx*(nx - 1)/2.0, stats.getValue(afwMath.MEANCLIP))", "def testMatchSwarpBilinearExposure(self):\n self.compareToSwarp(\"bilinear\", useWarpExposure=True,\n useSubregion=False, useDeepCopy=True)", "def test_isentropic_pressure_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 297] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk)\n trueprs = 936.213 * units.hPa\n assert_almost_equal(isentprs[0][1], trueprs, 3)", "def test_linear_interp_to_densepred_is_similar_for_two_levels(mock_amg):\n\n # split all the cells so that we have two tiers\n mock_amg.split_all_cells()\n coarse_grid, fine_grid = mock_amg.grids[0], mock_amg.grids[1]\n\n # obtain the reference solution for the whole domain\n nyf, nxf = fine_grid.ny, fine_grid.nx\n u_ref, v_ref = np.random.rand(nyf, nxf), np.random.rand(nyf, nxf)\n\n # set all the window values\n for ii in range(nyf):\n # determine if the value should go onto the coarse or fine tier\n # coarse tiers are 0, 2, 4, etc and so will return 0 (False) when\n # modded with 2\n coarse_i = False if ii % 2 else True\n for jj in range(nxf):\n # determine if the value should go onto the coarse or fine tier\n coarse_j = False if jj % 2 else True\n # if both coarse_i, coarse_j are True, then we are on the coarse\n # grid, else we are on the fine grid\n if coarse_i and coarse_j:\n mock_amg.grids[0]._array[ii//2][jj//2].u = u_ref[ii][jj]\n mock_amg.grids[0]._array[ii//2][jj//2].v = v_ref[ii][jj]\n else:\n mock_amg.grids[1]._array[ii][jj].u = u_ref[ii][jj]\n mock_amg.grids[1]._array[ii][jj].v = v_ref[ii][jj]\n\n # get the expected interpolations\n xe, ye = np.arange(mock_amg.img_dim[1]), np.arange(mock_amg.img_dim[0])\n f_u = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n u_ref, kind='linear')\n u_exp = f_u(xe, ye)\n f_v = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n v_ref, kind='linear')\n v_exp = f_v(xe, ye)\n\n dp_soln = mock_amg.interp_to_densepred(method='linear')\n\n assert np.allclose(u_exp, dp_soln.u)\n assert np.allclose(v_exp, dp_soln.v)", "def testNullWcs(self, interpLength=10):\n exposureWithWcs = afwImage.ExposureF(originalExposurePath)\n mi = exposureWithWcs.getMaskedImage()\n exposureWithoutWcs = afwImage.ExposureF(mi.getDimensions())\n warpingControl = afwMath.WarpingControl(\n \"bilinear\", \"\", 0, interpLength)\n\n with self.assertRaises(pexExcept.InvalidParameterError):\n afwMath.warpExposure(exposureWithWcs, exposureWithoutWcs, warpingControl)\n\n with self.assertRaises(pexExcept.InvalidParameterError):\n afwMath.warpExposure(exposureWithoutWcs, exposureWithWcs, warpingControl)", "def test_isentropic_pressure_tmp_out():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 296. * units.kelvin\n assert_almost_equal(isentprs[1], truetmp, 3)", "def testTsysMapLL(self):\n self._runTest('tsys', False, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)" ]
[ "0.7464721", "0.694387", "0.6688153", "0.6513818", "0.6375435", "0.6351056", "0.62163407", "0.5984313", "0.59639287", "0.59157306", "0.582448", "0.56327325", "0.5543462", "0.54693025", "0.54282254", "0.540566", "0.53477836", "0.53318286", "0.5310574", "0.52897173", "0.5219775", "0.5193221", "0.51811653", "0.51810634", "0.5146438", "0.512847", "0.51031643", "0.51001614", "0.5066941", "0.5057871" ]
0.7247381
1
Test spwmap wtmode='tinttsys', interp='linear,cspline'
def testTinttsysMapLC(self): self._runTest('tinttsys', False, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testTinttsysMapLCSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTinttsysMapNNSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTinttsysMapLLSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTinttsysLCSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysNNSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysMapNN(self):\n self._runTest('tinttsys', False, [1,3,5,7,15], 'nearest,nearest',self.spwmap)", "def testTinttsysLLSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTsysMapLCSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTinttsysMapLL(self):\n self._runTest('tinttsys', False, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTsysMapLC(self):\n self._runTest('tsys', False, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTsysMapNNSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTsysMapLLSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTsysMapNN(self):\n self._runTest('tsys', False, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTsysMapLL(self):\n self._runTest('tsys', False, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTsysNNSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTsysLCSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'linear,cspline')", "def tablebroad(w, s, xip, yip):\n \"\"\"\n History\n -------\n 22-May-92 JAV\n Switched instrumental profile from multiple gaussians\n to gaussian with power-law wings.\n 04-Aug-92 JAV\n Renamed from ipsmo.pro# changed f/ procedure to function.\n Switched f/ 10 to 15 Hamilton pixels in each wing.\n 20-Oct-92 JAV\n Switched from gpfunc to ipfun (4 to 5 par).\n 23-Aug-94 JAV\n Switched to explicitly passed IPs.\n Oct-18 AW\n Python version\n \"\"\"\n\n # Define sizes\n dsdh = np.abs(np.min(np.diff(xip)))\n nip = 2 * int(15 / dsdh) + 1 ## profile points\n\n # Generate instrumental profile on model pixel scale.\n x = (\n np.arange(nip, dtype=float) - (nip - 1) / 2\n ) * dsdh # offset in Hamilton pixels\n ip = interp1d(xip, yip, kind=\"cubic\")(x)\n # ip = bezier_interp(xip, yip, x) # spline onto new scale\n ip = ip[::-1] # reverse for convolution\n ip = ip / np.sum(ip) # ensure unit area\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, ip, mode=\"nearest\")\n\n return sout # return convolved spectrum", "def _get_interpolated_wtsp(self, *args, **kwargs):\n raise NotImplementedError", "def testDoubleMIP(self):\n\n self.M.render(self.testoutput[2], wide=True)", "def test_linear_interp_to_densepred_is_similar_for_two_levels(mock_amg):\n\n # split all the cells so that we have two tiers\n mock_amg.split_all_cells()\n coarse_grid, fine_grid = mock_amg.grids[0], mock_amg.grids[1]\n\n # obtain the reference solution for the whole domain\n nyf, nxf = fine_grid.ny, fine_grid.nx\n u_ref, v_ref = np.random.rand(nyf, nxf), np.random.rand(nyf, nxf)\n\n # set all the window values\n for ii in range(nyf):\n # determine if the value should go onto the coarse or fine tier\n # coarse tiers are 0, 2, 4, etc and so will return 0 (False) when\n # modded with 2\n coarse_i = False if ii % 2 else True\n for jj in range(nxf):\n # determine if the value should go onto the coarse or fine tier\n coarse_j = False if jj % 2 else True\n # if both coarse_i, coarse_j are True, then we are on the coarse\n # grid, else we are on the fine grid\n if coarse_i and coarse_j:\n mock_amg.grids[0]._array[ii//2][jj//2].u = u_ref[ii][jj]\n mock_amg.grids[0]._array[ii//2][jj//2].v = v_ref[ii][jj]\n else:\n mock_amg.grids[1]._array[ii][jj].u = u_ref[ii][jj]\n mock_amg.grids[1]._array[ii][jj].v = v_ref[ii][jj]\n\n # get the expected interpolations\n xe, ye = np.arange(mock_amg.img_dim[1]), np.arange(mock_amg.img_dim[0])\n f_u = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n u_ref, kind='linear')\n u_exp = f_u(xe, ye)\n f_v = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n v_ref, kind='linear')\n v_exp = f_v(xe, ye)\n\n dp_soln = mock_amg.interp_to_densepred(method='linear')\n\n assert np.allclose(u_exp, dp_soln.u)\n assert np.allclose(v_exp, dp_soln.v)", "def test_cubic_interp_to_densepred_is_similar_for_two_levels(mock_amg):\n\n # split all the cells so that we have two tiers\n mock_amg.split_all_cells()\n coarse_grid, fine_grid = mock_amg.grids[0], mock_amg.grids[1]\n\n # obtain the reference solution for the whole domain\n nyf, nxf = fine_grid.ny, fine_grid.nx\n u_ref, v_ref = np.random.rand(nyf, nxf), np.random.rand(nyf, nxf)\n\n # set all the window values\n for ii in range(nyf):\n # determine if the value should go onto the coarse or fine tier\n # coarse tiers are 0, 2, 4, etc and so will return 0 (False) when\n # modded with 2\n coarse_i = False if ii % 2 else True\n for jj in range(nxf):\n # determine if the value should go onto the coarse or fine tier\n coarse_j = False if jj % 2 else True\n # if both coarse_i, coarse_j are True, then we are on the coarse\n # grid, else we are on the fine grid\n if coarse_i and coarse_j:\n mock_amg.grids[0]._array[ii//2][jj//2].u = u_ref[ii][jj]\n mock_amg.grids[0]._array[ii//2][jj//2].v = v_ref[ii][jj]\n else:\n mock_amg.grids[1]._array[ii][jj].u = u_ref[ii][jj]\n mock_amg.grids[1]._array[ii][jj].v = v_ref[ii][jj]\n\n # get the expected interpolations\n xe, ye = np.arange(mock_amg.img_dim[1]), np.arange(mock_amg.img_dim[0])\n f_u = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n u_ref, kind='cubic')\n u_exp = f_u(xe, ye)\n f_v = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n v_ref, kind='cubic')\n v_exp = f_v(xe, ye)\n\n dp_soln = mock_amg.interp_to_densepred()\n\n assert np.allclose(u_exp, dp_soln.u)\n assert np.allclose(v_exp, dp_soln.v)", "def test_interpolation():\n spiral_arm = survey.get_spiral_slice(track = \"perseus\", \n interpolate = True)\n spiral_arm2 = survey.get_spiral_slice(track = \"Per\", \n interpolate = False)\n\n assert np.allclose(spiral_arm[\"INTEN\"], spiral_arm2[\"INTEN\"], equal_nan = True)", "def setSplineMode(order=3,npts=200):\n dislin.splmod(order,npts)", "def apply(lut, courbe):\n fct = interp1d(courbe[::2], courbe[1::2])\n if len(courbe) > 4:\n order, value = (\n [(2, 0)],\n [(2, 0)],\n ) # natural spline boundary conditions\n fct = make_interp_spline(\n courbe[::2], courbe[1::2], k=3, bc_type=(order, value)\n )\n for level in range(256):\n lut[level] = np.round(fct(lut[level]))", "def _runTest(self, wtmode, dowtsp, testspw, interpolation=\"\", spwmap=[],\n atol=1.e-5, rtol=1.e-5):\n had_wtsp = self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\")\n had_sigsp = self._column_exists(self.inputms, \"SIGMA_SPECTRUM\")\n initweights(vis=self.inputms,wtmode=wtmode,\n tsystable=self.tsystable,\n interp=interpolation,spwmap=spwmap, dowtsp=dowtsp)\n # Test existence of MS and columns\n if self.verbose: print(\"Test if MS exists.\")\n self._check_file(self.inputms)\n # WEIGHT_SPECTRUM should exist when dowtsp=True or it pre-exists in MS\n if (dowtsp or had_wtsp) and not wtmode == \"delwtsp\":\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM does not exist even though dowtsp=True\")\n else:\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM exists when it shouldn't\")\n # test if SIGMA_SPECTRUM column exists\n # The column should exist if\n # (a) dowtsp = True AND wtmode='tsys' or 'tinttsys', OR\n # (b) SIGMA_SPECTRUM pre-exists and wtmode='delwtsp'\n # otherwise, the column will be removed from MS if exists\n sigsp_should_exists = (dowtsp and wtmode.find('tsys') > -1) or \\\n (had_sigsp and wtmode=='delwtsp')\n if sigsp_should_exists:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM does not exist\")\n else:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM exists when it shouldn't\")\n # more tests\n \n # if running on MMS, the following checks do not work because of\n # the different sorting order between MS and MMS\n if not self.testmms:\n self._test_results(wtmode, dowtsp, testspw, interpolation, atol, rtol)", "def drawRegularSurface(matrix, nx, ny, xinterp, yinterp):\n dislin.surmat(matrix, nx, ny, xinterp, yinterp)", "def set_tlines(ty,slist):\r\n t = []\r\n for i in range(numpops-1):\r\n t.append([slist[5][4][i][1],slist[5][4][i][2],slist[5][4][i][3]]) ## [time, upper ci, lower ci]\r\n ty = []\r\n if gv[\"localyscale\"] == -1:\r\n yint = gv[\"line0y\"] - gv[\"lastt_lower_y\"]\r\n for i in range(numpops-1):\r\n ty.append([])\r\n if gv[\"eventimes\"] == False:\r\n tmax = slist[5][4][numpops-2][3] ## bottom of confidence interval of largest(oldest) t\r\n for j in range(3):\r\n ty[i].append(gv[\"line0y\"] - (t[i][j]*yint)/tmax)\r\n else:\r\n## ty[i].append(gv[\"line0y\"] - ((i+1)/float(numpops+1)*yint)/tmax)\r\n ty[i].append(gv[\"line0y\"] - yint * (i+1)/float(numpops) )\r\n else:\r\n timeumean = slist[7][4][1]\r\n scaleumean = slist[7][4][2]\r\n for i in range(numpops-1):\r\n ty.append([])\r\n for j in range(3):\r\n ty[i].append(gv[\"line0y\"] - (t[i][j] * (scaleumean/timeumean/1e6)* gv[\"localyscale\"]))\r\n if ty[i][j] < gv[\"lineINFy\"]:\r\n print ( \" time line too low in graph, reduce local y scale (-y value) \")\r\n gv[\"lastt_lower_y\"] = ty[numpops-2][2]\r\n## print \"ty : \",ty\r\n return ty", "def spliner(x, y, k=3, sig=5, s=None, fev=100, w=None, clip='both', \\\n verbose=False, plotfit=False, plotall=False, diag=False):\n # 2010-07-05 13:51 IJC: Adapted from polyfitr\n from numpy import polyfit, polyval, isfinite, ones\n from scipy import interpolate\n from pylab import plot, legend, title\n\n xx = array(x, copy=True)\n yy = array(y, copy=True)\n noweights = (w==None)\n if noweights:\n ww = ones(xx.shape, float)\n else:\n ww = array(w, copy=True)\n\n #ww = 1./err**2\n\n ii = 0\n nrej = 1\n\n goodind = isfinite(xx)*isfinite(yy)*isfinite(ww)\n \n #xx = xx[goodind]\n #yy = yy[goodind]\n #ww = ww[goodind]\n\n while (ii<fev and (nrej<>0)):\n spline = interpolate.UnivariateSpline(xx[goodind],yy[goodind],w=ww[goodind],s=s,k=k)\n residual = yy[goodind] - spline(xx[goodind])\n stdResidual = std(residual)\n #if verbose: print stdResidual\n if clip=='both':\n ind = abs(residual) <= (sig*stdResidual) \n elif clip=='above':\n ind = residual < sig*stdResidual\n elif clip=='below':\n ind = residual > -sig*stdResidual\n else:\n ind = ones(residual.shape, bool)\n\n goodind *= ind\n #xx = xx[ind]\n #yy = yy[ind]\n #ww = ww[ind]\n\n ii += 1\n nrej = len(residual) - len(xx)\n if plotall:\n plot(x,y, '.', xx[goodind],yy[goodind], 'x', x, spline(x), '--')\n legend(['data', 'fit data', 'fit'])\n title('Iter. #' + str(ii) + ' -- Close all windows to continue....')\n\n if verbose:\n print str(len(x)-len(xx[goodind])) + ' points rejected on iteration #' + str(ii)\n\n if (plotfit or plotall):\n plot(x,y, '.', xx[goodind],yy[goodind], 'x', x, spline(x), '--')\n legend(['data', 'fit data', 'fit'])\n title('Close window to continue....')\n\n if diag:\n chisq = ( (residual)**2 / yy )[goodind].sum()\n spline = (spline, chisq, ii, goodind)\n\n return spline", "def _splineloc(self, coa_map, win=5, upscale=10):\n\n # Get shape of 3-D coalescence map\n nx, ny, nz = coa_map.shape\n n = np.array([nx, ny, nz])\n\n # Find maximum coalescence location in grid\n mx, my, mz = np.unravel_index(np.nanargmax(coa_map), coa_map.shape)\n i = np.array([mx, my, mz])\n\n # Determining window about maximum value and trimming coa grid\n w2 = (win - 1)//2\n x1, y1, z1 = np.clip(i - w2, 0 * n, n)\n x2, y2, z2 = np.clip(i + w2 + 1, 0 * n, n)\n\n # If subgrid is not close to the edge\n if (x2 - x1) == (y2 - y1) == (z2 - z1):\n coa_map_trim = coa_map[x1:x2, y1:y2, z1:z2]\n\n # Defining the original interpolation function\n xo = np.linspace(0, coa_map_trim.shape[0] - 1,\n coa_map_trim.shape[0])\n yo = np.linspace(0, coa_map_trim.shape[1] - 1,\n coa_map_trim.shape[1])\n zo = np.linspace(0, coa_map_trim.shape[2] - 1,\n coa_map_trim.shape[2])\n xog, yog, zog = np.meshgrid(xo, yo, zo)\n interpgrid = Rbf(xog.flatten(), yog.flatten(), zog.flatten(),\n coa_map_trim.flatten(),\n function=\"cubic\")\n\n # Creating the new interpolated grid\n xx = np.linspace(0, coa_map_trim.shape[0] - 1,\n (coa_map_trim.shape[0] - 1) * upscale + 1)\n yy = np.linspace(0, coa_map_trim.shape[1] - 1,\n (coa_map_trim.shape[1] - 1) * upscale + 1)\n zz = np.linspace(0, coa_map_trim.shape[2] - 1,\n (coa_map_trim.shape[2] - 1) * upscale + 1)\n xxg, yyg, zzg = np.meshgrid(xx, yy, zz)\n\n # Interpolate spline function on new grid\n coa_map_int = interpgrid(xxg.flatten(), yyg.flatten(),\n zzg.flatten()).reshape(xxg.shape)\n\n # Calculate max coalescence location on interpolated grid\n mxi, myi, mzi = np.unravel_index(np.nanargmax(coa_map_int),\n coa_map_int.shape)\n mxi = mxi/upscale + x1\n myi = myi/upscale + y1\n mzi = mzi/upscale + z1\n self.output.log(\"\\t\\tGridded loc: {} {} {}\".format(mx, my, mz), self.log)\n self.output.log(\"\\t\\tSpline loc: {} {} {}\".format(mxi, myi, mzi), self.log)\n\n # Run check that spline location is within grid-cell\n if (abs(mx - mxi) > 1) or (abs(my - myi) > 1) or \\\n (abs(mz - mzi) > 1):\n msg = \"\\tSpline warning: spline location outside grid cell\"\n msg += \"with maximum coalescence value\"\n self.output.log(msg, self.log)\n\n xyz = self.lut.xyz2loc(np.array([[mxi, myi, mzi]]), inverse=True)\n loc_spline = self.lut.xyz2coord(xyz)[0]\n\n # Run check that spline location is within window\n if (abs(mx - mxi) > w2) or (abs(my - myi) > w2) or \\\n (abs(mz - mzi) > w2):\n msg = \"\\t !!!! Spline error: location outside interpolation \"\n msg += \"window !!!!\\n\\t\\t\\tGridded Location returned\"\n self.output.log(msg, self.log)\n\n xyz = self.lut.xyz2loc(np.array([[mx, my, mz]]), inverse=True)\n loc_spline = self.lut.xyz2coord(xyz)[0]\n\n else:\n msg = \"\\t !!!! Spline error: interpolation window crosses edge of \"\n msg += \"grid !!!!\\n\\t\\t\\tGridded Location returned\"\n self.output.log(msg, self.log)\n\n xyz = self.lut.xyz2loc(np.array([[mx, my, mz]]), inverse=True)\n loc_spline = self.lut.xyz2coord(xyz)[0]\n\n return loc_spline", "def dmsp_map_interpolate_NN_smooth(X_dmsp, Y_dmsp, X_map, Y_map, Obs_map, k = 5, tol = 3):\n #reshape to N by 2 array where each row is (X, Y)\n dmsp_points = np.hstack((X_dmsp.flatten().reshape(-1,1),Y_dmsp.flatten().reshape(-1,1)))\n map_points = np.hstack((X_map.flatten().reshape(-1,1), Y_map.flatten().reshape(-1,1)))\n N_points = dmsp_points.shape[0]\n obs_val = Obs_map.flatten()\n model = sklearn.neighbors.BallTree(map_points,leaf_size = 40 )\n dists, inds = model.query(dmsp_points, k=k) \n\n obs_interp = np.empty(N_points)\n for i in range(N_points):\n norm = LA.norm(dists[i])\n if (norm > tol):\n obs_interp[i] = np.nan\n else:\n# weights = dists[i]/norm\n\n weights = dists[i]/np.nansum(dists[i])\n obs_interp[i] = np.nansum( obs_val[inds[i]] * weights )\n\n return obs_interp" ]
[ "0.7193978", "0.68954045", "0.67678285", "0.645622", "0.64222103", "0.6354744", "0.63472885", "0.62745875", "0.6229267", "0.6142785", "0.61396414", "0.5909097", "0.56487125", "0.5501776", "0.54098845", "0.53817445", "0.53567433", "0.52819806", "0.5244804", "0.5152128", "0.515077", "0.50746024", "0.5027545", "0.5011284", "0.50016546", "0.4988849", "0.49802378", "0.49587792", "0.49482802", "0.49471328" ]
0.6927376
1
Test spwmap wtmode='tsys', interp='nearest,nearest', dowtsp=True
def testTsysMapNNSp(self): self._runTest('tsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testTinttsysMapNNSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTsysMapNN(self):\n self._runTest('tsys', False, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTsysNNSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysNNSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysMapNN(self):\n self._runTest('tinttsys', False, [1,3,5,7,15], 'nearest,nearest',self.spwmap)", "def testTinttsysLCSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysLLSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTsysMapLLSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def _runTest(self, wtmode, dowtsp, testspw, interpolation=\"\", spwmap=[],\n atol=1.e-5, rtol=1.e-5):\n had_wtsp = self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\")\n had_sigsp = self._column_exists(self.inputms, \"SIGMA_SPECTRUM\")\n initweights(vis=self.inputms,wtmode=wtmode,\n tsystable=self.tsystable,\n interp=interpolation,spwmap=spwmap, dowtsp=dowtsp)\n # Test existence of MS and columns\n if self.verbose: print(\"Test if MS exists.\")\n self._check_file(self.inputms)\n # WEIGHT_SPECTRUM should exist when dowtsp=True or it pre-exists in MS\n if (dowtsp or had_wtsp) and not wtmode == \"delwtsp\":\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM does not exist even though dowtsp=True\")\n else:\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM exists when it shouldn't\")\n # test if SIGMA_SPECTRUM column exists\n # The column should exist if\n # (a) dowtsp = True AND wtmode='tsys' or 'tinttsys', OR\n # (b) SIGMA_SPECTRUM pre-exists and wtmode='delwtsp'\n # otherwise, the column will be removed from MS if exists\n sigsp_should_exists = (dowtsp and wtmode.find('tsys') > -1) or \\\n (had_sigsp and wtmode=='delwtsp')\n if sigsp_should_exists:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM does not exist\")\n else:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM exists when it shouldn't\")\n # more tests\n \n # if running on MMS, the following checks do not work because of\n # the different sorting order between MS and MMS\n if not self.testmms:\n self._test_results(wtmode, dowtsp, testspw, interpolation, atol, rtol)", "def testTinttsysMapLLSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTsysMapLCSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTinttsysMapLCSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTsysMapLC(self):\n self._runTest('tsys', False, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTsysMapLL(self):\n self._runTest('tsys', False, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTinttsysMapLC(self):\n self._runTest('tinttsys', False, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def time_windows(baz, arriv_p, arriv_s, init_sec, is_local):\n\n # TIME WINDOWS (for arrivals and subplots)\n # Window lengths dependent on event distance\n if is_local == 'non-local':\n min_pw = arriv_p\n max_pw = min_pw + (arriv_s - arriv_p) // 4\n min_sw = arriv_s - 0.001 * (arriv_s - arriv_p)\n max_sw = arriv_s + 150\n min_lwi = surf_tts(baz[0], init_sec) - 20\n t1 = (baz[0]/1000000) * 50\n # window length grows 50 sec per 1000 km.\n max_lwi = min_lwi + t1\n min_lwf = max_lwi\n t2 = (baz[0]/1000000) * 60\n # window length grows 60 sec per 1000 km.\n max_lwf = min_lwf + t2\n elif is_local == 'local':\n min_pw = arriv_p\n max_pw = min_pw + 20\n min_sw = arriv_s - 5\n max_sw = min_sw + 20\n min_lwi = surf_tts(baz[0], init_sec) + 20\n max_lwi = min_lwi + 50\n min_lwf = max_lwi\n max_lwf = min_lwf + 80\n else:\n min_pw = arriv_p\n max_pw = min_pw + 7\n min_sw = arriv_s\n max_sw = min_sw + 7\n min_lwi = surf_tts(baz[0], init_sec) + 5\n max_lwi = min_lwi + 12\n min_lwf = max_lwi\n max_lwf = min_lwf + 80\n\n return min_pw, max_pw, min_sw, max_sw, min_lwi, max_lwi, min_lwf, max_lwf", "def _get_interpolated_wtsp(self, *args, **kwargs):\n raise NotImplementedError", "def dmsp_map_interpolate_NN_smooth(X_dmsp, Y_dmsp, X_map, Y_map, Obs_map, k = 5, tol = 3):\n #reshape to N by 2 array where each row is (X, Y)\n dmsp_points = np.hstack((X_dmsp.flatten().reshape(-1,1),Y_dmsp.flatten().reshape(-1,1)))\n map_points = np.hstack((X_map.flatten().reshape(-1,1), Y_map.flatten().reshape(-1,1)))\n N_points = dmsp_points.shape[0]\n obs_val = Obs_map.flatten()\n model = sklearn.neighbors.BallTree(map_points,leaf_size = 40 )\n dists, inds = model.query(dmsp_points, k=k) \n\n obs_interp = np.empty(N_points)\n for i in range(N_points):\n norm = LA.norm(dists[i])\n if (norm > tol):\n obs_interp[i] = np.nan\n else:\n# weights = dists[i]/norm\n\n weights = dists[i]/np.nansum(dists[i])\n obs_interp[i] = np.nansum( obs_val[inds[i]] * weights )\n\n return obs_interp", "def testTinttsysMapLL(self):\n self._runTest('tinttsys', False, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def times_map(self, value):\n cdef StdVectorFst result = StdVectorFst(isyms=self.isyms, osyms=self.osyms)\n if not isinstance(value, TropicalWeight):\n value = TropicalWeight(value)\n openfst.ArcMap(self.fst[0], result.fst,\n openfst.TimesStdArcMapper((<TropicalWeight> value).weight[0]))\n return result", "def test_temporal_smoothing_how(perfectModelEnsemble_initialized_control_1d_ym_cftime):\r\n pm = perfectModelEnsemble_initialized_control_1d_ym_cftime\r\n pm_smoothed_mean = pm.smooth({\"lead\": 4}, how=\"mean\")\r\n pm_smoothed_sum = pm.smooth({\"lead\": 4}, how=\"sum\")\r\n assert (\r\n pm_smoothed_sum.get_initialized().mean()\r\n > pm_smoothed_mean.get_initialized().mean() * 2\r\n )", "def test_2d_time_tran():\n dic,data = ng.pipe.read(\"common_data/2d_pipe/test_tp.ft2\")\n assert data.shape == (4096, 2048)\n assert data.dtype == 'float32'\n assert round(data[0,1],2) == -1525.10\n assert round(data[10,22],2) == 1731.94\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[253.90, -143.80])\n check_ppm_limits(dic,data,1,[174.84, 65.21])", "def test2Samp():\n\n sigmax = 1.0\n sigmay = 3.0\n mux = 0.0\n muy = 3.0\n nx = 10\n ny = 10\n # Update\n np.random.RandomState(0) # set seed to 0\n datax = sigmax * np.random.randn(nx) + mux\n datay = sigmay * np.random.randn(ny) + muy\n datadict = {'x': datax, 'y': datay}\n ranksums(datadict, dataLabel='Test Rank Sums (scipy)')\n ranksums(datadict, dataLabel='Test Rank Sums, Paired (scipy)', paired=True)\n ttest(datadict, dataLabel='Standard t-test (scipy)', \n textline=True, decimals=3, units='mV')\n ttest(datadict, dataLabel='Standard t-test (scipy), paired', paired=True,\n textline=True, decimals=3)\n (p, n) = permTS(datadict, dataLabel='R permTS')\n permutation(datadict, dataLabel='Test simple permute')\n KS(datadict, dataLabel='Test with KS')", "def testMatchSwarpNearestExposure(self):\n self.compareToSwarp(\"nearest\", useWarpExposure=True, atol=60)", "def moving_window_spl(data, tvec, wn, s=.5):\n\n posx, posz = data.T\n npts = len(posx)\n spos = np.zeros((npts, 2))\n svel = np.zeros((npts, 2))\n sacc = np.zeros((npts, 2))\n\n for i in range(npts):\n start, stop, at_end = window_bounds(i, npts, wn)\n\n t = tvec[start:stop]\n x = posx[start:stop]\n z = posz[start:stop]\n\n px = interpolate.UnivariateSpline(t, x, k=5, s=s)\n pz = interpolate.UnivariateSpline(t, z, k=5, s=s)\n vx = px.derivative(1)\n vz = pz.derivative(1)\n ax = px.derivative(2)\n az = pz.derivative(2)\n\n tval = tvec[i]\n spos[i] = px(tval), pz(tval)\n svel[i] = vx(tval), vz(tval)\n sacc[i] = ax(tval), az(tval)\n\n return spos, svel, sacc", "def wt_time_Locs(wt, loc):\n return (wt * loc)", "def testTsysLLSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'linear,linear')", "def test_plt_v2offset_time():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n result = ta.plt_v2offset_time()\n\n assert bokeh_plot_type == type(result)", "def dmsp_map_interpolate(X_dmsp, Y_dmsp, X_map, Y_map, tolerance = 0.5):\n\n #indices of the map that fit the dmsp map\n indices = scipy.interpolate.griddata((X_map,Y_map), np.arange(len(X_map.flatten())), (X_dmsp,Y_dmsp), method = 'nearest')\n\n #get mask for map elements that are within distance tolerance \n mask = (abs(X_map[indices] - X_dmsp) < tolerance) & (abs(Y_map[indices] - Y_dmsp) < tolerance)\n\n return indices,mask", "def testTransformBasedWarp(self):\n for interpLength in (0, 1, 2, 4):\n kernelName = \"lanczos3\"\n rtol = 4e-5\n atol = 1e-2\n warpingControl = afwMath.WarpingControl(\n warpingKernelName=kernelName,\n interpLength=interpLength,\n )\n\n originalExposure = afwImage.ExposureF(originalExposurePath)\n originalMetadata = afwImage.DecoratedImageF(originalExposurePath).getMetadata()\n originalSkyWcs = afwGeom.makeSkyWcs(originalMetadata)\n\n swarpedImageName = f\"medswarp1{kernelName}.fits\"\n swarpedImagePath = os.path.join(dataDir, swarpedImageName)\n swarpedDecoratedImage = afwImage.DecoratedImageF(swarpedImagePath)\n swarpedImage = swarpedDecoratedImage.getImage()\n\n swarpedMetadata = swarpedDecoratedImage.getMetadata()\n warpedSkyWcs = afwGeom.makeSkyWcs(swarpedMetadata)\n\n # original image is source, warped image is destination\n srcToDest = afwGeom.makeWcsPairTransform(originalSkyWcs, warpedSkyWcs)\n\n afwWarpedMaskedImage = afwImage.MaskedImageF(swarpedImage.getDimensions())\n originalMaskedImage = originalExposure.getMaskedImage()\n\n numGoodPix = afwMath.warpImage(afwWarpedMaskedImage, originalMaskedImage,\n srcToDest, warpingControl)\n self.assertGreater(numGoodPix, 50)\n\n afwWarpedImage = afwWarpedMaskedImage.getImage()\n afwWarpedImageArr = afwWarpedImage.getArray()\n noDataMaskArr = np.isnan(afwWarpedImageArr)\n self.assertImagesAlmostEqual(afwWarpedImage, swarpedImage,\n skipMask=noDataMaskArr, rtol=rtol, atol=atol)" ]
[ "0.72670513", "0.71550035", "0.6951513", "0.6920037", "0.6804798", "0.64883035", "0.6429448", "0.63238674", "0.62219316", "0.61764634", "0.60540795", "0.60352385", "0.6020373", "0.5842893", "0.5733396", "0.5580416", "0.5545951", "0.5537896", "0.55318445", "0.54686", "0.54531264", "0.5407578", "0.5401443", "0.5336084", "0.53308374", "0.53061897", "0.5243929", "0.5243171", "0.52341866", "0.523009" ]
0.7532484
0
Test spwmap wtmode='tinttsys', interp='nearest,nearest', dowtsp=True
def testTinttsysMapNNSp(self): self._runTest('tinttsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testTsysMapNNSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTinttsysMapNN(self):\n self._runTest('tinttsys', False, [1,3,5,7,15], 'nearest,nearest',self.spwmap)", "def testTinttsysNNSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTsysMapNN(self):\n self._runTest('tsys', False, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTinttsysLCSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysLLSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTsysNNSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysMapLLSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTinttsysMapLCSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTinttsysMapLC(self):\n self._runTest('tinttsys', False, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def _runTest(self, wtmode, dowtsp, testspw, interpolation=\"\", spwmap=[],\n atol=1.e-5, rtol=1.e-5):\n had_wtsp = self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\")\n had_sigsp = self._column_exists(self.inputms, \"SIGMA_SPECTRUM\")\n initweights(vis=self.inputms,wtmode=wtmode,\n tsystable=self.tsystable,\n interp=interpolation,spwmap=spwmap, dowtsp=dowtsp)\n # Test existence of MS and columns\n if self.verbose: print(\"Test if MS exists.\")\n self._check_file(self.inputms)\n # WEIGHT_SPECTRUM should exist when dowtsp=True or it pre-exists in MS\n if (dowtsp or had_wtsp) and not wtmode == \"delwtsp\":\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM does not exist even though dowtsp=True\")\n else:\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM exists when it shouldn't\")\n # test if SIGMA_SPECTRUM column exists\n # The column should exist if\n # (a) dowtsp = True AND wtmode='tsys' or 'tinttsys', OR\n # (b) SIGMA_SPECTRUM pre-exists and wtmode='delwtsp'\n # otherwise, the column will be removed from MS if exists\n sigsp_should_exists = (dowtsp and wtmode.find('tsys') > -1) or \\\n (had_sigsp and wtmode=='delwtsp')\n if sigsp_should_exists:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM does not exist\")\n else:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM exists when it shouldn't\")\n # more tests\n \n # if running on MMS, the following checks do not work because of\n # the different sorting order between MS and MMS\n if not self.testmms:\n self._test_results(wtmode, dowtsp, testspw, interpolation, atol, rtol)", "def testTsysMapLLSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTinttsysMapLL(self):\n self._runTest('tinttsys', False, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTsysMapLC(self):\n self._runTest('tsys', False, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTsysMapLCSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTsysMapLL(self):\n self._runTest('tsys', False, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testMatchSwarpNearestExposure(self):\n self.compareToSwarp(\"nearest\", useWarpExposure=True, atol=60)", "def dmsp_map_interpolate_NN_smooth(X_dmsp, Y_dmsp, X_map, Y_map, Obs_map, k = 5, tol = 3):\n #reshape to N by 2 array where each row is (X, Y)\n dmsp_points = np.hstack((X_dmsp.flatten().reshape(-1,1),Y_dmsp.flatten().reshape(-1,1)))\n map_points = np.hstack((X_map.flatten().reshape(-1,1), Y_map.flatten().reshape(-1,1)))\n N_points = dmsp_points.shape[0]\n obs_val = Obs_map.flatten()\n model = sklearn.neighbors.BallTree(map_points,leaf_size = 40 )\n dists, inds = model.query(dmsp_points, k=k) \n\n obs_interp = np.empty(N_points)\n for i in range(N_points):\n norm = LA.norm(dists[i])\n if (norm > tol):\n obs_interp[i] = np.nan\n else:\n# weights = dists[i]/norm\n\n weights = dists[i]/np.nansum(dists[i])\n obs_interp[i] = np.nansum( obs_val[inds[i]] * weights )\n\n return obs_interp", "def testTransformBasedWarp(self):\n for interpLength in (0, 1, 2, 4):\n kernelName = \"lanczos3\"\n rtol = 4e-5\n atol = 1e-2\n warpingControl = afwMath.WarpingControl(\n warpingKernelName=kernelName,\n interpLength=interpLength,\n )\n\n originalExposure = afwImage.ExposureF(originalExposurePath)\n originalMetadata = afwImage.DecoratedImageF(originalExposurePath).getMetadata()\n originalSkyWcs = afwGeom.makeSkyWcs(originalMetadata)\n\n swarpedImageName = f\"medswarp1{kernelName}.fits\"\n swarpedImagePath = os.path.join(dataDir, swarpedImageName)\n swarpedDecoratedImage = afwImage.DecoratedImageF(swarpedImagePath)\n swarpedImage = swarpedDecoratedImage.getImage()\n\n swarpedMetadata = swarpedDecoratedImage.getMetadata()\n warpedSkyWcs = afwGeom.makeSkyWcs(swarpedMetadata)\n\n # original image is source, warped image is destination\n srcToDest = afwGeom.makeWcsPairTransform(originalSkyWcs, warpedSkyWcs)\n\n afwWarpedMaskedImage = afwImage.MaskedImageF(swarpedImage.getDimensions())\n originalMaskedImage = originalExposure.getMaskedImage()\n\n numGoodPix = afwMath.warpImage(afwWarpedMaskedImage, originalMaskedImage,\n srcToDest, warpingControl)\n self.assertGreater(numGoodPix, 50)\n\n afwWarpedImage = afwWarpedMaskedImage.getImage()\n afwWarpedImageArr = afwWarpedImage.getArray()\n noDataMaskArr = np.isnan(afwWarpedImageArr)\n self.assertImagesAlmostEqual(afwWarpedImage, swarpedImage,\n skipMask=noDataMaskArr, rtol=rtol, atol=atol)", "def tablebroad(w, s, xip, yip):\n \"\"\"\n History\n -------\n 22-May-92 JAV\n Switched instrumental profile from multiple gaussians\n to gaussian with power-law wings.\n 04-Aug-92 JAV\n Renamed from ipsmo.pro# changed f/ procedure to function.\n Switched f/ 10 to 15 Hamilton pixels in each wing.\n 20-Oct-92 JAV\n Switched from gpfunc to ipfun (4 to 5 par).\n 23-Aug-94 JAV\n Switched to explicitly passed IPs.\n Oct-18 AW\n Python version\n \"\"\"\n\n # Define sizes\n dsdh = np.abs(np.min(np.diff(xip)))\n nip = 2 * int(15 / dsdh) + 1 ## profile points\n\n # Generate instrumental profile on model pixel scale.\n x = (\n np.arange(nip, dtype=float) - (nip - 1) / 2\n ) * dsdh # offset in Hamilton pixels\n ip = interp1d(xip, yip, kind=\"cubic\")(x)\n # ip = bezier_interp(xip, yip, x) # spline onto new scale\n ip = ip[::-1] # reverse for convolution\n ip = ip / np.sum(ip) # ensure unit area\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, ip, mode=\"nearest\")\n\n return sout # return convolved spectrum", "def _get_interpolated_wtsp(self, *args, **kwargs):\n raise NotImplementedError", "def times_map(self, value):\n cdef StdVectorFst result = StdVectorFst(isyms=self.isyms, osyms=self.osyms)\n if not isinstance(value, TropicalWeight):\n value = TropicalWeight(value)\n openfst.ArcMap(self.fst[0], result.fst,\n openfst.TimesStdArcMapper((<TropicalWeight> value).weight[0]))\n return result", "def test_isentropic_pressure_tmp_out_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 297.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 291.4579 * units.kelvin\n assert_almost_equal(isentprs[1][1], truetmp, 3)", "def test_twostep_tapering(self):\n qubit_op = SparsePauliOp.from_list(\n [\n (\"II\", -1.0537076071291125),\n (\"IZ\", 0.393983679438514),\n (\"ZI\", -0.39398367943851387),\n (\"ZZ\", -0.01123658523318205),\n (\"XX\", 0.1812888082114961),\n ]\n )\n z2_symmetries = Z2Symmetries.find_z2_symmetries(qubit_op)\n converted_op_firststep = z2_symmetries.convert_clifford(qubit_op)\n tapered_op_secondstep = z2_symmetries.taper_clifford(converted_op_firststep)\n\n with self.subTest(\"Check first step: Clifford transformation\"):\n converted_op_expected = SparsePauliOp.from_list(\n [\n (\"II\", -1.0537076071291125),\n (\"ZX\", 0.393983679438514),\n (\"ZI\", -0.39398367943851387),\n (\"IX\", -0.01123658523318205),\n (\"XX\", 0.1812888082114961),\n ]\n )\n\n self.assertEqual(converted_op_expected, converted_op_firststep)\n\n with self.subTest(\"Check second step: Tapering\"):\n tapered_op = z2_symmetries.taper(qubit_op)\n self.assertEqual(tapered_op, tapered_op_secondstep)", "def test_2d_time_tran():\n dic,data = ng.pipe.read(\"common_data/2d_pipe/test_tp.ft2\")\n assert data.shape == (4096, 2048)\n assert data.dtype == 'float32'\n assert round(data[0,1],2) == -1525.10\n assert round(data[10,22],2) == 1731.94\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[253.90, -143.80])\n check_ppm_limits(dic,data,1,[174.84, 65.21])", "def time_windows(baz, arriv_p, arriv_s, init_sec, is_local):\n\n # TIME WINDOWS (for arrivals and subplots)\n # Window lengths dependent on event distance\n if is_local == 'non-local':\n min_pw = arriv_p\n max_pw = min_pw + (arriv_s - arriv_p) // 4\n min_sw = arriv_s - 0.001 * (arriv_s - arriv_p)\n max_sw = arriv_s + 150\n min_lwi = surf_tts(baz[0], init_sec) - 20\n t1 = (baz[0]/1000000) * 50\n # window length grows 50 sec per 1000 km.\n max_lwi = min_lwi + t1\n min_lwf = max_lwi\n t2 = (baz[0]/1000000) * 60\n # window length grows 60 sec per 1000 km.\n max_lwf = min_lwf + t2\n elif is_local == 'local':\n min_pw = arriv_p\n max_pw = min_pw + 20\n min_sw = arriv_s - 5\n max_sw = min_sw + 20\n min_lwi = surf_tts(baz[0], init_sec) + 20\n max_lwi = min_lwi + 50\n min_lwf = max_lwi\n max_lwf = min_lwf + 80\n else:\n min_pw = arriv_p\n max_pw = min_pw + 7\n min_sw = arriv_s\n max_sw = min_sw + 7\n min_lwi = surf_tts(baz[0], init_sec) + 5\n max_lwi = min_lwi + 12\n min_lwf = max_lwi\n max_lwf = min_lwf + 80\n\n return min_pw, max_pw, min_sw, max_sw, min_lwi, max_lwi, min_lwf, max_lwf", "def dmsp_map_interpolate(X_dmsp, Y_dmsp, X_map, Y_map, tolerance = 0.5):\n\n #indices of the map that fit the dmsp map\n indices = scipy.interpolate.griddata((X_map,Y_map), np.arange(len(X_map.flatten())), (X_dmsp,Y_dmsp), method = 'nearest')\n\n #get mask for map elements that are within distance tolerance \n mask = (abs(X_map[indices] - X_dmsp) < tolerance) & (abs(Y_map[indices] - Y_dmsp) < tolerance)\n\n return indices,mask", "def test_temporal_smoothing_how(perfectModelEnsemble_initialized_control_1d_ym_cftime):\r\n pm = perfectModelEnsemble_initialized_control_1d_ym_cftime\r\n pm_smoothed_mean = pm.smooth({\"lead\": 4}, how=\"mean\")\r\n pm_smoothed_sum = pm.smooth({\"lead\": 4}, how=\"sum\")\r\n assert (\r\n pm_smoothed_sum.get_initialized().mean()\r\n > pm_smoothed_mean.get_initialized().mean() * 2\r\n )", "def test_isentropic_pressure_tmp_out():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 296. * units.kelvin\n assert_almost_equal(isentprs[1], truetmp, 3)", "def test_isentropic_pressure_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 297] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk)\n trueprs = 936.213 * units.hPa\n assert_almost_equal(isentprs[0][1], trueprs, 3)" ]
[ "0.7297252", "0.726566", "0.71029043", "0.7018207", "0.6625115", "0.6614049", "0.6548781", "0.64842147", "0.62953097", "0.6115125", "0.6074345", "0.6069492", "0.5952319", "0.5844141", "0.57873845", "0.56899405", "0.55286527", "0.55151045", "0.54746234", "0.54596645", "0.5424294", "0.53513545", "0.5336818", "0.52917296", "0.52854115", "0.52793914", "0.5196716", "0.51731294", "0.5169486", "0.5140522" ]
0.76174855
0
Test spwmap wtmode='tinttsys', interp='linear,linear', dowtsp=True
def testTinttsysMapLLSp(self): self._runTest('tinttsys', True, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testTinttsysMapNNSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTsysMapNNSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTinttsysMapNN(self):\n self._runTest('tinttsys', False, [1,3,5,7,15], 'nearest,nearest',self.spwmap)", "def testTinttsysMapLCSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTinttsysNNSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysLLSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysMapLC(self):\n self._runTest('tinttsys', False, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTinttsysLCSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTsysMapNN(self):\n self._runTest('tsys', False, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTsysMapLLSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTinttsysMapLL(self):\n self._runTest('tinttsys', False, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTsysMapLC(self):\n self._runTest('tsys', False, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTsysMapLCSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTsysMapLL(self):\n self._runTest('tsys', False, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTsysNNSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def _runTest(self, wtmode, dowtsp, testspw, interpolation=\"\", spwmap=[],\n atol=1.e-5, rtol=1.e-5):\n had_wtsp = self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\")\n had_sigsp = self._column_exists(self.inputms, \"SIGMA_SPECTRUM\")\n initweights(vis=self.inputms,wtmode=wtmode,\n tsystable=self.tsystable,\n interp=interpolation,spwmap=spwmap, dowtsp=dowtsp)\n # Test existence of MS and columns\n if self.verbose: print(\"Test if MS exists.\")\n self._check_file(self.inputms)\n # WEIGHT_SPECTRUM should exist when dowtsp=True or it pre-exists in MS\n if (dowtsp or had_wtsp) and not wtmode == \"delwtsp\":\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM does not exist even though dowtsp=True\")\n else:\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM exists when it shouldn't\")\n # test if SIGMA_SPECTRUM column exists\n # The column should exist if\n # (a) dowtsp = True AND wtmode='tsys' or 'tinttsys', OR\n # (b) SIGMA_SPECTRUM pre-exists and wtmode='delwtsp'\n # otherwise, the column will be removed from MS if exists\n sigsp_should_exists = (dowtsp and wtmode.find('tsys') > -1) or \\\n (had_sigsp and wtmode=='delwtsp')\n if sigsp_should_exists:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM does not exist\")\n else:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM exists when it shouldn't\")\n # more tests\n \n # if running on MMS, the following checks do not work because of\n # the different sorting order between MS and MMS\n if not self.testmms:\n self._test_results(wtmode, dowtsp, testspw, interpolation, atol, rtol)", "def testTsysLLSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'linear,linear')", "def _get_interpolated_wtsp(self, *args, **kwargs):\n raise NotImplementedError", "def tablebroad(w, s, xip, yip):\n \"\"\"\n History\n -------\n 22-May-92 JAV\n Switched instrumental profile from multiple gaussians\n to gaussian with power-law wings.\n 04-Aug-92 JAV\n Renamed from ipsmo.pro# changed f/ procedure to function.\n Switched f/ 10 to 15 Hamilton pixels in each wing.\n 20-Oct-92 JAV\n Switched from gpfunc to ipfun (4 to 5 par).\n 23-Aug-94 JAV\n Switched to explicitly passed IPs.\n Oct-18 AW\n Python version\n \"\"\"\n\n # Define sizes\n dsdh = np.abs(np.min(np.diff(xip)))\n nip = 2 * int(15 / dsdh) + 1 ## profile points\n\n # Generate instrumental profile on model pixel scale.\n x = (\n np.arange(nip, dtype=float) - (nip - 1) / 2\n ) * dsdh # offset in Hamilton pixels\n ip = interp1d(xip, yip, kind=\"cubic\")(x)\n # ip = bezier_interp(xip, yip, x) # spline onto new scale\n ip = ip[::-1] # reverse for convolution\n ip = ip / np.sum(ip) # ensure unit area\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, ip, mode=\"nearest\")\n\n return sout # return convolved spectrum", "def test_2d_time_tran():\n dic,data = ng.pipe.read(\"common_data/2d_pipe/test_tp.ft2\")\n assert data.shape == (4096, 2048)\n assert data.dtype == 'float32'\n assert round(data[0,1],2) == -1525.10\n assert round(data[10,22],2) == 1731.94\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[253.90, -143.80])\n check_ppm_limits(dic,data,1,[174.84, 65.21])", "def tsMap(self):\n mapplt = FermiMap()\n mapplt.savepath = self.workpath\n mapplt.image = self.outtsmap\n mapplt.figname = 'TSMAP.pdf'\n mapplt.cbarlabel = r'TS'\n mapplt.mapSky()\n if showSrc:\n srcs = self.getSrc()\n srcs = srcs[(srcs['Separation'] <= 3.) & ([not i.endswith('c') for i in srcs['Name']])]\n mapplt.srcSky(srcs['RA'], srcs['DEC'], srcs['Name'])\n mapplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format( os.path.join(mapplt.savepath, mapplt.figname) ))\n return", "def testDoubleMIP(self):\n\n self.M.render(self.testoutput[2], wide=True)", "def test_linear_interp_to_densepred_is_similar_for_two_levels(mock_amg):\n\n # split all the cells so that we have two tiers\n mock_amg.split_all_cells()\n coarse_grid, fine_grid = mock_amg.grids[0], mock_amg.grids[1]\n\n # obtain the reference solution for the whole domain\n nyf, nxf = fine_grid.ny, fine_grid.nx\n u_ref, v_ref = np.random.rand(nyf, nxf), np.random.rand(nyf, nxf)\n\n # set all the window values\n for ii in range(nyf):\n # determine if the value should go onto the coarse or fine tier\n # coarse tiers are 0, 2, 4, etc and so will return 0 (False) when\n # modded with 2\n coarse_i = False if ii % 2 else True\n for jj in range(nxf):\n # determine if the value should go onto the coarse or fine tier\n coarse_j = False if jj % 2 else True\n # if both coarse_i, coarse_j are True, then we are on the coarse\n # grid, else we are on the fine grid\n if coarse_i and coarse_j:\n mock_amg.grids[0]._array[ii//2][jj//2].u = u_ref[ii][jj]\n mock_amg.grids[0]._array[ii//2][jj//2].v = v_ref[ii][jj]\n else:\n mock_amg.grids[1]._array[ii][jj].u = u_ref[ii][jj]\n mock_amg.grids[1]._array[ii][jj].v = v_ref[ii][jj]\n\n # get the expected interpolations\n xe, ye = np.arange(mock_amg.img_dim[1]), np.arange(mock_amg.img_dim[0])\n f_u = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n u_ref, kind='linear')\n u_exp = f_u(xe, ye)\n f_v = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n v_ref, kind='linear')\n v_exp = f_v(xe, ye)\n\n dp_soln = mock_amg.interp_to_densepred(method='linear')\n\n assert np.allclose(u_exp, dp_soln.u)\n assert np.allclose(v_exp, dp_soln.v)", "def testTsysLCSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'linear,cspline')", "def test_isentropic_pressure_tmp_out_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 297.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 291.4579 * units.kelvin\n assert_almost_equal(isentprs[1][1], truetmp, 3)", "def times_map(self, value):\n cdef StdVectorFst result = StdVectorFst(isyms=self.isyms, osyms=self.osyms)\n if not isinstance(value, TropicalWeight):\n value = TropicalWeight(value)\n openfst.ArcMap(self.fst[0], result.fst,\n openfst.TimesStdArcMapper((<TropicalWeight> value).weight[0]))\n return result", "def dmsp_map_interpolate_NN_smooth(X_dmsp, Y_dmsp, X_map, Y_map, Obs_map, k = 5, tol = 3):\n #reshape to N by 2 array where each row is (X, Y)\n dmsp_points = np.hstack((X_dmsp.flatten().reshape(-1,1),Y_dmsp.flatten().reshape(-1,1)))\n map_points = np.hstack((X_map.flatten().reshape(-1,1), Y_map.flatten().reshape(-1,1)))\n N_points = dmsp_points.shape[0]\n obs_val = Obs_map.flatten()\n model = sklearn.neighbors.BallTree(map_points,leaf_size = 40 )\n dists, inds = model.query(dmsp_points, k=k) \n\n obs_interp = np.empty(N_points)\n for i in range(N_points):\n norm = LA.norm(dists[i])\n if (norm > tol):\n obs_interp[i] = np.nan\n else:\n# weights = dists[i]/norm\n\n weights = dists[i]/np.nansum(dists[i])\n obs_interp[i] = np.nansum( obs_val[inds[i]] * weights )\n\n return obs_interp", "def testTransformBasedWarp(self):\n for interpLength in (0, 1, 2, 4):\n kernelName = \"lanczos3\"\n rtol = 4e-5\n atol = 1e-2\n warpingControl = afwMath.WarpingControl(\n warpingKernelName=kernelName,\n interpLength=interpLength,\n )\n\n originalExposure = afwImage.ExposureF(originalExposurePath)\n originalMetadata = afwImage.DecoratedImageF(originalExposurePath).getMetadata()\n originalSkyWcs = afwGeom.makeSkyWcs(originalMetadata)\n\n swarpedImageName = f\"medswarp1{kernelName}.fits\"\n swarpedImagePath = os.path.join(dataDir, swarpedImageName)\n swarpedDecoratedImage = afwImage.DecoratedImageF(swarpedImagePath)\n swarpedImage = swarpedDecoratedImage.getImage()\n\n swarpedMetadata = swarpedDecoratedImage.getMetadata()\n warpedSkyWcs = afwGeom.makeSkyWcs(swarpedMetadata)\n\n # original image is source, warped image is destination\n srcToDest = afwGeom.makeWcsPairTransform(originalSkyWcs, warpedSkyWcs)\n\n afwWarpedMaskedImage = afwImage.MaskedImageF(swarpedImage.getDimensions())\n originalMaskedImage = originalExposure.getMaskedImage()\n\n numGoodPix = afwMath.warpImage(afwWarpedMaskedImage, originalMaskedImage,\n srcToDest, warpingControl)\n self.assertGreater(numGoodPix, 50)\n\n afwWarpedImage = afwWarpedMaskedImage.getImage()\n afwWarpedImageArr = afwWarpedImage.getArray()\n noDataMaskArr = np.isnan(afwWarpedImageArr)\n self.assertImagesAlmostEqual(afwWarpedImage, swarpedImage,\n skipMask=noDataMaskArr, rtol=rtol, atol=atol)", "def test_cubic_interp_to_densepred_is_similar_for_two_levels(mock_amg):\n\n # split all the cells so that we have two tiers\n mock_amg.split_all_cells()\n coarse_grid, fine_grid = mock_amg.grids[0], mock_amg.grids[1]\n\n # obtain the reference solution for the whole domain\n nyf, nxf = fine_grid.ny, fine_grid.nx\n u_ref, v_ref = np.random.rand(nyf, nxf), np.random.rand(nyf, nxf)\n\n # set all the window values\n for ii in range(nyf):\n # determine if the value should go onto the coarse or fine tier\n # coarse tiers are 0, 2, 4, etc and so will return 0 (False) when\n # modded with 2\n coarse_i = False if ii % 2 else True\n for jj in range(nxf):\n # determine if the value should go onto the coarse or fine tier\n coarse_j = False if jj % 2 else True\n # if both coarse_i, coarse_j are True, then we are on the coarse\n # grid, else we are on the fine grid\n if coarse_i and coarse_j:\n mock_amg.grids[0]._array[ii//2][jj//2].u = u_ref[ii][jj]\n mock_amg.grids[0]._array[ii//2][jj//2].v = v_ref[ii][jj]\n else:\n mock_amg.grids[1]._array[ii][jj].u = u_ref[ii][jj]\n mock_amg.grids[1]._array[ii][jj].v = v_ref[ii][jj]\n\n # get the expected interpolations\n xe, ye = np.arange(mock_amg.img_dim[1]), np.arange(mock_amg.img_dim[0])\n f_u = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n u_ref, kind='cubic')\n u_exp = f_u(xe, ye)\n f_v = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n v_ref, kind='cubic')\n v_exp = f_v(xe, ye)\n\n dp_soln = mock_amg.interp_to_densepred()\n\n assert np.allclose(u_exp, dp_soln.u)\n assert np.allclose(v_exp, dp_soln.v)", "def smooth_template_disp_sedpy(templ, wobs_um, disp, z, velocity_fwhm=80, scale_disp=1.3, flambda=True, with_igm=True):\n from sedpy.smoothing import smoothspec\n \n wobs = templ.wave*(1+z)\n trim = (wobs > wobs_um[0]*1.e4*0.95)\n trim &= (wobs < wobs_um[-1]*1.e4*1.05)\n \n if flambda:\n fobs = templ.flux_flam(z=z)#[wclip]\n else:\n fobs = templ.flux_fnu(z=z)#[wclip]\n \n if with_igm:\n fobs *= templ.igm_absorption(z)\n \n wobs = wobs[trim]\n fobs = fobs[trim]\n \n R = np.interp(wobs, disp['WAVELENGTH']*1.e4, disp['R'],\n left=disp['R'][0], right=disp['R'][-1])*scale_disp\n \n dv = np.sqrt(velocity_fwhm**2 + (3.e5/R)**2)\n dlam_ang = wobs*dv/3.e5/2.35\n \n def _lsf(wave):\n return np.interp(wave, wobs, dlam_ang)\n \n tsmooth = smoothspec(wobs, fobs,\n smoothtype='lsf', lsf=_lsf,\n outwave=wobs_um*1.e4,\n fftsmooth=FFTSMOOTH,\n )\n \n return tsmooth" ]
[ "0.7476452", "0.7114421", "0.69962686", "0.6957557", "0.69043505", "0.6739348", "0.6736017", "0.6734121", "0.67266977", "0.6684998", "0.6584073", "0.64497477", "0.64235175", "0.63057137", "0.62779695", "0.5956183", "0.5637475", "0.5566264", "0.5442669", "0.5413177", "0.53595966", "0.5336379", "0.5270209", "0.5218097", "0.5200306", "0.5160072", "0.5152997", "0.5108336", "0.50836885", "0.5060399" ]
0.71439976
1
Test spwmap wtmode='tinttsys', interp='linear,cspline', dowtsp=True
def testTinttsysMapLCSp(self): self._runTest('tinttsys', True, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testTinttsysMapNNSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTinttsysMapLLSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTinttsysMapLC(self):\n self._runTest('tinttsys', False, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTsysMapNNSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTsysMapLCSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTinttsysNNSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysLCSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTsysMapLC(self):\n self._runTest('tsys', False, [1,3,5,7,9,11,13,15], 'linear,cspline',self.spwmap)", "def testTinttsysMapNN(self):\n self._runTest('tinttsys', False, [1,3,5,7,15], 'nearest,nearest',self.spwmap)", "def testTsysMapLLSp(self):\n self._runTest('tsys', True, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTinttsysLLSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTinttsysMapLL(self):\n self._runTest('tinttsys', False, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTsysMapNN(self):\n self._runTest('tsys', False, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTsysNNSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def testTsysMapLL(self):\n self._runTest('tsys', False, [1,3,5,7,9,11,15], 'linear,linear',self.spwmap)", "def testTsysLCSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'linear,cspline')", "def _runTest(self, wtmode, dowtsp, testspw, interpolation=\"\", spwmap=[],\n atol=1.e-5, rtol=1.e-5):\n had_wtsp = self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\")\n had_sigsp = self._column_exists(self.inputms, \"SIGMA_SPECTRUM\")\n initweights(vis=self.inputms,wtmode=wtmode,\n tsystable=self.tsystable,\n interp=interpolation,spwmap=spwmap, dowtsp=dowtsp)\n # Test existence of MS and columns\n if self.verbose: print(\"Test if MS exists.\")\n self._check_file(self.inputms)\n # WEIGHT_SPECTRUM should exist when dowtsp=True or it pre-exists in MS\n if (dowtsp or had_wtsp) and not wtmode == \"delwtsp\":\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM does not exist even though dowtsp=True\")\n else:\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM exists when it shouldn't\")\n # test if SIGMA_SPECTRUM column exists\n # The column should exist if\n # (a) dowtsp = True AND wtmode='tsys' or 'tinttsys', OR\n # (b) SIGMA_SPECTRUM pre-exists and wtmode='delwtsp'\n # otherwise, the column will be removed from MS if exists\n sigsp_should_exists = (dowtsp and wtmode.find('tsys') > -1) or \\\n (had_sigsp and wtmode=='delwtsp')\n if sigsp_should_exists:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM does not exist\")\n else:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM exists when it shouldn't\")\n # more tests\n \n # if running on MMS, the following checks do not work because of\n # the different sorting order between MS and MMS\n if not self.testmms:\n self._test_results(wtmode, dowtsp, testspw, interpolation, atol, rtol)", "def testTsysLLSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'linear,linear')", "def tablebroad(w, s, xip, yip):\n \"\"\"\n History\n -------\n 22-May-92 JAV\n Switched instrumental profile from multiple gaussians\n to gaussian with power-law wings.\n 04-Aug-92 JAV\n Renamed from ipsmo.pro# changed f/ procedure to function.\n Switched f/ 10 to 15 Hamilton pixels in each wing.\n 20-Oct-92 JAV\n Switched from gpfunc to ipfun (4 to 5 par).\n 23-Aug-94 JAV\n Switched to explicitly passed IPs.\n Oct-18 AW\n Python version\n \"\"\"\n\n # Define sizes\n dsdh = np.abs(np.min(np.diff(xip)))\n nip = 2 * int(15 / dsdh) + 1 ## profile points\n\n # Generate instrumental profile on model pixel scale.\n x = (\n np.arange(nip, dtype=float) - (nip - 1) / 2\n ) * dsdh # offset in Hamilton pixels\n ip = interp1d(xip, yip, kind=\"cubic\")(x)\n # ip = bezier_interp(xip, yip, x) # spline onto new scale\n ip = ip[::-1] # reverse for convolution\n ip = ip / np.sum(ip) # ensure unit area\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, ip, mode=\"nearest\")\n\n return sout # return convolved spectrum", "def _get_interpolated_wtsp(self, *args, **kwargs):\n raise NotImplementedError", "def testDoubleMIP(self):\n\n self.M.render(self.testoutput[2], wide=True)", "def test_2d_time_tran():\n dic,data = ng.pipe.read(\"common_data/2d_pipe/test_tp.ft2\")\n assert data.shape == (4096, 2048)\n assert data.dtype == 'float32'\n assert round(data[0,1],2) == -1525.10\n assert round(data[10,22],2) == 1731.94\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[253.90, -143.80])\n check_ppm_limits(dic,data,1,[174.84, 65.21])", "def setSplineMode(order=3,npts=200):\n dislin.splmod(order,npts)", "def tsMap(self):\n mapplt = FermiMap()\n mapplt.savepath = self.workpath\n mapplt.image = self.outtsmap\n mapplt.figname = 'TSMAP.pdf'\n mapplt.cbarlabel = r'TS'\n mapplt.mapSky()\n if showSrc:\n srcs = self.getSrc()\n srcs = srcs[(srcs['Separation'] <= 3.) & ([not i.endswith('c') for i in srcs['Name']])]\n mapplt.srcSky(srcs['RA'], srcs['DEC'], srcs['Name'])\n mapplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format( os.path.join(mapplt.savepath, mapplt.figname) ))\n return", "def test_linear_interp_to_densepred_is_similar_for_two_levels(mock_amg):\n\n # split all the cells so that we have two tiers\n mock_amg.split_all_cells()\n coarse_grid, fine_grid = mock_amg.grids[0], mock_amg.grids[1]\n\n # obtain the reference solution for the whole domain\n nyf, nxf = fine_grid.ny, fine_grid.nx\n u_ref, v_ref = np.random.rand(nyf, nxf), np.random.rand(nyf, nxf)\n\n # set all the window values\n for ii in range(nyf):\n # determine if the value should go onto the coarse or fine tier\n # coarse tiers are 0, 2, 4, etc and so will return 0 (False) when\n # modded with 2\n coarse_i = False if ii % 2 else True\n for jj in range(nxf):\n # determine if the value should go onto the coarse or fine tier\n coarse_j = False if jj % 2 else True\n # if both coarse_i, coarse_j are True, then we are on the coarse\n # grid, else we are on the fine grid\n if coarse_i and coarse_j:\n mock_amg.grids[0]._array[ii//2][jj//2].u = u_ref[ii][jj]\n mock_amg.grids[0]._array[ii//2][jj//2].v = v_ref[ii][jj]\n else:\n mock_amg.grids[1]._array[ii][jj].u = u_ref[ii][jj]\n mock_amg.grids[1]._array[ii][jj].v = v_ref[ii][jj]\n\n # get the expected interpolations\n xe, ye = np.arange(mock_amg.img_dim[1]), np.arange(mock_amg.img_dim[0])\n f_u = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n u_ref, kind='linear')\n u_exp = f_u(xe, ye)\n f_v = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n v_ref, kind='linear')\n v_exp = f_v(xe, ye)\n\n dp_soln = mock_amg.interp_to_densepred(method='linear')\n\n assert np.allclose(u_exp, dp_soln.u)\n assert np.allclose(v_exp, dp_soln.v)", "def test_cubic_interp_to_densepred_is_similar_for_two_levels(mock_amg):\n\n # split all the cells so that we have two tiers\n mock_amg.split_all_cells()\n coarse_grid, fine_grid = mock_amg.grids[0], mock_amg.grids[1]\n\n # obtain the reference solution for the whole domain\n nyf, nxf = fine_grid.ny, fine_grid.nx\n u_ref, v_ref = np.random.rand(nyf, nxf), np.random.rand(nyf, nxf)\n\n # set all the window values\n for ii in range(nyf):\n # determine if the value should go onto the coarse or fine tier\n # coarse tiers are 0, 2, 4, etc and so will return 0 (False) when\n # modded with 2\n coarse_i = False if ii % 2 else True\n for jj in range(nxf):\n # determine if the value should go onto the coarse or fine tier\n coarse_j = False if jj % 2 else True\n # if both coarse_i, coarse_j are True, then we are on the coarse\n # grid, else we are on the fine grid\n if coarse_i and coarse_j:\n mock_amg.grids[0]._array[ii//2][jj//2].u = u_ref[ii][jj]\n mock_amg.grids[0]._array[ii//2][jj//2].v = v_ref[ii][jj]\n else:\n mock_amg.grids[1]._array[ii][jj].u = u_ref[ii][jj]\n mock_amg.grids[1]._array[ii][jj].v = v_ref[ii][jj]\n\n # get the expected interpolations\n xe, ye = np.arange(mock_amg.img_dim[1]), np.arange(mock_amg.img_dim[0])\n f_u = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n u_ref, kind='cubic')\n u_exp = f_u(xe, ye)\n f_v = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n v_ref, kind='cubic')\n v_exp = f_v(xe, ye)\n\n dp_soln = mock_amg.interp_to_densepred()\n\n assert np.allclose(u_exp, dp_soln.u)\n assert np.allclose(v_exp, dp_soln.v)", "def test_temporal_smoothing_how(perfectModelEnsemble_initialized_control_1d_ym_cftime):\r\n pm = perfectModelEnsemble_initialized_control_1d_ym_cftime\r\n pm_smoothed_mean = pm.smooth({\"lead\": 4}, how=\"mean\")\r\n pm_smoothed_sum = pm.smooth({\"lead\": 4}, how=\"sum\")\r\n assert (\r\n pm_smoothed_sum.get_initialized().mean()\r\n > pm_smoothed_mean.get_initialized().mean() * 2\r\n )", "def set_tlines(ty,slist):\r\n t = []\r\n for i in range(numpops-1):\r\n t.append([slist[5][4][i][1],slist[5][4][i][2],slist[5][4][i][3]]) ## [time, upper ci, lower ci]\r\n ty = []\r\n if gv[\"localyscale\"] == -1:\r\n yint = gv[\"line0y\"] - gv[\"lastt_lower_y\"]\r\n for i in range(numpops-1):\r\n ty.append([])\r\n if gv[\"eventimes\"] == False:\r\n tmax = slist[5][4][numpops-2][3] ## bottom of confidence interval of largest(oldest) t\r\n for j in range(3):\r\n ty[i].append(gv[\"line0y\"] - (t[i][j]*yint)/tmax)\r\n else:\r\n## ty[i].append(gv[\"line0y\"] - ((i+1)/float(numpops+1)*yint)/tmax)\r\n ty[i].append(gv[\"line0y\"] - yint * (i+1)/float(numpops) )\r\n else:\r\n timeumean = slist[7][4][1]\r\n scaleumean = slist[7][4][2]\r\n for i in range(numpops-1):\r\n ty.append([])\r\n for j in range(3):\r\n ty[i].append(gv[\"line0y\"] - (t[i][j] * (scaleumean/timeumean/1e6)* gv[\"localyscale\"]))\r\n if ty[i][j] < gv[\"lineINFy\"]:\r\n print ( \" time line too low in graph, reduce local y scale (-y value) \")\r\n gv[\"lastt_lower_y\"] = ty[numpops-2][2]\r\n## print \"ty : \",ty\r\n return ty", "def test_interpolation():\n spiral_arm = survey.get_spiral_slice(track = \"perseus\", \n interpolate = True)\n spiral_arm2 = survey.get_spiral_slice(track = \"Per\", \n interpolate = False)\n\n assert np.allclose(spiral_arm[\"INTEN\"], spiral_arm2[\"INTEN\"], equal_nan = True)", "def plotPSTH(self, stimpath,\n stimdata,\n spikesdict,\n simtime,\n offset=0,\n binsize=10e-3,\n legendSuffix='',\n rate=False,\n normcells=True\n ):\n if not spikesdict:\n return 0\n stimdata = stimdata[:]\n times = []\n # It is a spike train, x values are spike times, wrap around those\n if 'spikes' in stimpath:\n times = stimdata\n # It is a stimulus: take the leadin edges\n elif 'stim' in stimpath:\n times = numpy.linspace(0, simtime, stimdata.shape[0])[numpy.r_[False, numpy.diff(stimdata) < 0].nonzero()[0]]\n else:\n stimdata = analyzer.smooth(stimdata)\n mid = numpy.mean(stimdata)\n stimdata = stimdata[stimdata > mid] # Threshold at midpoint\n times = numpy.linspace(0, simtime, stimdata.shape[0])[numpy.r_[True, stimdata[1:] > stimdata[:-1]] & numpy.r_[stimdata[:-1] > stimdata[1:], True]]\n if (times is None) or (len(times) == 0):\n return 0\n start = times + offset\n end = numpy.zeros(times.shape)\n end[:-1] = start[1:]\n end[-1] = simtime + offset # We assume\n accumulated_data = []\n for spikedata in spikesdict.values():\n tpoints = spikedata[:]\n for ii in range(len(times)):\n ix = numpy.nonzero((tpoints >= start[ii]) & (tpoints < end[ii]))[0]\n accumulated_data = numpy.r_[accumulated_data, tpoints[ix] - times[ii]]\n if len(accumulated_data) == 0:\n return 0\n # set the bins by splitting interstimulus interval\n interval = numpy.mean(numpy.diff(times))\n bins = numpy.arange(offset, interval+offset, binsize)\n bins = numpy.r_[bins, bins[-1] + binsize]\n hist = numpy.histogram(accumulated_data, bins=bins)\n xx = (hist[1][:-1] + hist[1][1:])/2.0\n if rate:\n yy = hist[0] / binsize\n else:\n yy = hist[0]\n if normcells:\n yy /= len(spikesdict)\n path = stimpath + '_psth' + legendSuffix\n new_curve = Qwt.QwtPlotCurve(path)\n new_curve.setData(xx, yy)\n pen = Qt.QPen(Qt.Qt.blue, 1, Qt.Qt.DashDotLine)\n new_curve.setStyle(Qwt.QwtPlotCurve.Lines)\n new_curve.setPen(pen)\n pen = Qt.QPen(Qt.Qt.red, 1)\n new_curve.setSymbol(Qwt.QwtSymbol(Qwt.QwtSymbol.XCross,\n Qt.QBrush(),\n pen,\n Qt.QSize(3,3))) \n new_curve.attach(self)\n self.curve_path_dict[new_curve] = path\n self.path_curve_dict[path].append(new_curve)\n path = stimpath + '_bins' + legendSuffix\n histmarkers = Qwt.QwtPlotCurve(path)\n height = int(max(yy) + 0.5)\n yy = numpy.ones(hist[1].shape) * height\n histmarkers.setData(hist[1], yy)\n pen = Qt.QPen(Qt.Qt.black, 1, Qt.Qt.DotLine)\n histmarkers.setPen(pen)\n histmarkers.setStyle(Qwt.QwtPlotCurve.Sticks)\n histmarkers.attach(self)\n self.curve_path_dict[histmarkers] = path\n self.path_curve_dict[path].append(new_curve)\n self.clearZoomStack()\n self.replot()\n return 1" ]
[ "0.7282748", "0.7065426", "0.7024528", "0.6905034", "0.6886287", "0.67218274", "0.6720227", "0.66895956", "0.66394633", "0.66167086", "0.6601417", "0.638492", "0.6343307", "0.6111405", "0.6106252", "0.58812624", "0.57545495", "0.55924857", "0.54666066", "0.5418006", "0.52466416", "0.5168207", "0.51645", "0.51398337", "0.5136531", "0.51160747", "0.50856984", "0.50818723", "0.50734115", "0.5052786" ]
0.74198663
0
Return a darker color.
def darken(color): hue, saturation, value = rgb_to_hsv(color.red, color.green, color.blue) value /= 1.5 saturation /= 1.25 return hsv_to_rgb(hue, saturation, value) + (color.alpha,)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def darken(hex_color: str) -> str:\n amount = 0.2\n hex_color = hex_color.replace(\"#\", \"\")\n red = max(0, int(hex_color[0:2], 16) - int(255 * amount))\n green = max(0, int(hex_color[2:4], 16) - int(255 * amount))\n blue = max(0, int(hex_color[4:6], 16) - int(255 * amount))\n darker_color = (\n \"#%s\" % hex(red)[2:].zfill(2) + hex(green)[2:].zfill(2) + hex(blue)[2:].zfill(2)\n )\n return darker_color", "def darken_color(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], amount * c[1], c[2])", "def dark(r, d):\n return d * 1.0 / (r + d) + d * r * 1.0 / ((r + d) ** 2)", "def dark_style(stylename):\n return dark_color(get_style_by_name(stylename).background_color)", "def driftColor(baseColor, factor=110):\n if baseColor.lightness() > 128:\n return baseColor.darker(factor)\n else:\n return baseColor.lighter(factor+10)", "def contrast_from_bg(cls, col=\"#000000\", dark_default=\"000000\", light_default=\"FFFFFF\", hashed=\"#\"):\n trigger = float(0.45) #Values greater than this result in black text\n if not col:\n return \"#000000\" #Default to black\n if col in (\"Transparent\",\"transparent\"):\n return \"#000000\" #Default to black\n if not hashed:\n hashed = \"\"\n elif hashed is True:\n hashed = \"#\"\n try:\n col_out = cls.colour_to_rgb_tuple(col)\n r,g,b = col_out\n div = 255.0 #Produces a value between 0-1 as a float\n lum = float(0.2126*pow(r/div, 2.2)) + float(0.7152*pow(g/div, 2.2)) + float(0.0722*pow(b/div, 2.2))\n except (TypeError, ValueError):\n return dark_default\n #logging.info (\"Luminosity: %s\" % lum)\n #Decision gate:\n if lum >= trigger: #Light background, need dark text\n return \"%s%s\" % (hashed, dark_default)\n else: #Dark background, need light text\n return \"%s%s\" % (hashed, light_default)", "def _get_color_brightness(self, color):\n d0, _, _ = self._get_color_dominance_indices(color)\n return color[d0]/MAX", "def dark_color(color):\n rgb = hex_to_rgb(color)\n if rgb:\n return rgb_to_hls(*rgb)[1] < 128\n else: # default to False\n return False", "def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input=\"rgb\"):\n color = _color_to_rgb(color, input)\n gray = \"#222222\"\n colors = [color, gray] if reverse else [gray, color]\n return blend_palette(colors, n_colors, as_cmap)", "def get_dark_squares_color(self) -> ColorTuple:\n return self._dark_squares_color", "def darker(image):\r\n # Demonstrate looping over all the pixels of an image,\r\n # changing each pixel to be half its original intensity.\r\n for pixel in image:\r\n pixel.red = pixel.red // 2\r\n pixel.green = pixel.green // 2\r\n pixel.blue = pixel.blue // 2", "def get_foreground(self):\n\n h = ((self._bytes[12] & 0x0F) << 8) | self._bytes[13]\n s = self._bytes[14]\n l = self._bytes[15]\n\n h = utils.map(h, 0, 4095, 0, 360)\n s = 65 - utils.map(s, 0, 255, 0, 20)\n l = 75 - utils.map(l, 0, 255, 0, 20)\n\n return utils.hsl_to_rgb(h, s, l)", "def is_dark(self):\n\n return self.red() < 125 and self.green() < 125 and self.blue() < 125", "def getFgColor(self):\n return self.fgColor", "def _get_random_color(predicted_class: str) -> str:\n colors = list(ImageColor.colormap.values())\n color = colors[\n int(hashlib.sha256(predicted_class.encode(\"utf-8\")).hexdigest(), 16)\n % len(colors)\n ]\n return darken(color)", "def ensureBrightOrDark( nColor, bBright = True ):\n #~ print( \"ensureBrightOrDark: nColor: 0x%x, bBright: %s\" % (nColor, bBright) ); \n rB = ( ( nColor & 0xFF ) );\n rG = ( ( nColor & 0xFF00 ) >> 8 );\n rR = ( ( nColor & 0xFF0000 ) >> 16 );\n \n #~ print( \"ensureBrightOrDark: comp: r=%s, g=%s, b=%s\" % (rR, rG, rB) );\n\n nMed = 0x7F+0xFF;\n if( bBright ):\n if( rB + rG + rR < nMed ):\n nColor = interpolateColor( nColor, 0xFFFFFF, 0.4 );\n else:\n if( rB + rG + rR >= nMed ):\n nColor = interpolateColor( nColor, 0x000000, 0.4 );\n #~ print( \"ensureBrightOrDark: => nColor: 0x%x\" % (nColor) );\n return nColor;", "def contrast(color):\n\n R, G, B, A = rgba(color)\n\n # See http://www.johndcook.com/blog/2009/08/24/algorithms-convert-color-grayscale/\n luminosity = 0.21 * R + 0.71 * G + 0.07 * B\n\n return '#000' if luminosity > .5 else '#fff'", "def _get_color(self):\n base_bg = int(self.STYLE_DEFAULTS[\"bg\"].lstrip(\"#\")) # dark grey\n if self.link_missing:\n base_bg += 100\n if self._active:\n base_bg += 123\n if self.row_index % 2:\n base_bg += 111\n return f\"#{base_bg}\"", "def get_color(self):\n return \"yellow\"", "def color(self):\n return rgba(self.value_of_css_property('color'))", "def reverse(color):\n return 255 - color", "def LightContrastColour(c):\r\n\r\n amount = 120\r\n\r\n # if the colour is especially dark, then\r\n # make the contrast even lighter\r\n if c.Red() < 128 and c.Green() < 128 and c.Blue() < 128:\r\n amount = 160\r\n\r\n return StepColour(c, amount)", "def get_max_brightness(self) -> float:\n return max(self._color)", "def dark_mode(app):\n palette = QPalette()\n palette.setColor(QPalette.Window, QColor(30, 30, 30))\n palette.setColor(QPalette.WindowText, QColor(225, 225, 225))\n palette.setColor(QPalette.Light, Qt.white)\n palette.setColor(QPalette.Midlight, QColor(225, 225, 225))\n palette.setColor(QPalette.Dark, QColor(65, 65, 65))\n palette.setColor(QPalette.Mid, QColor(160, 160, 160))\n palette.setColor(QPalette.BrightText, QColor(255, 51, 51))\n palette.setColor(QPalette.Button, QColor(40, 40, 40))\n palette.setColor(QPalette.Base, QColor(65, 65, 65))\n palette.setColor(QPalette.AlternateBase, QColor(50, 50, 50))\n palette.setColor(QPalette.ToolTipBase, Qt.white)\n palette.setColor(QPalette.ToolTipText, Qt.white)\n palette.setColor(QPalette.Text, QColor(225, 225, 225))\n palette.setColor(QPalette.ButtonText, QColor(225, 225, 225))\n palette.setColor(QPalette.Link, QColor(42, 130, 218))\n palette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n palette.setColor(QPalette.HighlightedText, Qt.black)\n app.setPalette(palette)\n return app", "def get_color(self):\r\n if self.color:\r\n return \"RED\"\r\n else:\r\n return \"BLACK\"", "def get_color(self):\n return self.color", "def relief_colors(color_or_ink: ColorOrInk = (0, 0, 0), darken_factors: ReliefBrightness = (0.6, 0.3)) -> ReliefColors:\n if len(color_or_ink) > 3 and not color_or_ink[3]:\n return ()\n max_col_part = max(color_or_ink[:3])\n if max_col_part == 0: # prevent zero division if color_or_ink is black/default\n lightened_color = (1.0, 1.0, 1.0)\n else:\n brighten_factor = 1 / max_col_part\n lightened_color = tuple([(col * brighten_factor) for col in color_or_ink[:3]])\n return tuple([tuple([col_part * darken for col_part in lightened_color]) for darken in darken_factors])", "def desaturated_randcol(saturation,brightness=100):\n # begin with greyscale\n brightness = clamp(brightness,40,255)\n sat = clamp(saturation,0,255-brightness)\n col = [brightness,brightness,brightness]\n \n for i in range(3):\n r = randint(-sat,sat)\n if abs(r-brightness) < sat//2: # minimum saturation = sat / 2\n if r < 0:\n r = randint(-sat,-sat//2)\n else:\n r = randint(sat//2,sat)\n col[i] += r\n col[i] = clamp(col[i],50,253)\n\n return gfx.Color(col)", "def get_color(self):\n\n return self.color", "def hs_color(self):\n return self._hs_color" ]
[ "0.7454738", "0.7330892", "0.69382864", "0.68706894", "0.6669197", "0.6634606", "0.64649427", "0.6458235", "0.64072514", "0.6381072", "0.6364707", "0.6326756", "0.6304574", "0.6232114", "0.6199844", "0.6146135", "0.61369884", "0.6134376", "0.6109642", "0.6106515", "0.6088504", "0.60746795", "0.6067246", "0.60615265", "0.60412323", "0.6019023", "0.5964868", "0.5949931", "0.5921267", "0.5901863" ]
0.7346637
1
Draw the given PageBox.
def draw_page(page, stream): bleed = { side: page.style[f'bleed_{side}'].value for side in ('top', 'right', 'bottom', 'left')} marks = page.style['marks'] stacking_context = StackingContext.from_page(page) draw_background( stream, stacking_context.box.background, clip_box=False, bleed=bleed, marks=marks) draw_background(stream, page.canvas_background, clip_box=False) draw_border(stream, page) draw_stacking_context(stream, stacking_context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _draw(self, frame, boxes, probs, landmarks, name):\n try:\n print('drawing')\n for box, prob, ld, id in zip(boxes, probs, landmarks, name):\n # Draw rectangle on frame\n\n cv2.putText(frame, id, (200, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\n\n\n except:\n print('not draw box')\n pass\n\n return frame", "def drawPages(self, pageSelection=None):\n doc = self.parent\n\n w, h, _ = doc.getMaxPageSizes(pageSelection)\n w2 = 2*w # Make spread width\n for pn, pages in doc.getSortedPages():\n #if pageSelection is not None and not page.y in pageSelection:\n # continue\n # Create a new DrawBot viewport page to draw template + page, if not already done.\n # In case the document is oversized, then make all pages the size of the document, so the\n # pages can draw their crop-marks. Otherwise make DrawBot pages of the size of each page.\n # Size depends on the size of the larges pages + optional decument padding.\n page = pages[0] # TODO: Make it work if there as multiple pages on the same page number.\n pw, ph = w, h # Copy from main (w, h), since they may be altered.\n if self.pl > self.MIN_PADDING and self.pt > self.MIN_PADDING and self.pb > self.MIN_PADDING and self.pr > self.MIN_PADDING:\n pw += self.pl + self.pr\n ph += self.pt + self.pb\n if self.originTop:\n origin = self.pl, self.pt, 0\n else:\n origin = self.pl, self.pb, 0\n else:\n pw = page.w # No padding defined, follow the size of the page.\n ph = page.h\n origin = (0, 0, 0)\n pw2 = 2*pw\n\n if (pn % 2 == 0): # Is even?\n newPage(pw2, ph) # Make page in DrawBot of self size, actual page may be smaller if showing cropmarks.\n # View may have defined a background\n if self.style.get('fill') is not None:\n setFillColor(self.style['fill'])\n rect(0, 0, pw2, ph)\n else: # Odd, shift origin to right\n origin = origin[0]+pw, origin[1], origin[2]\n\n if self.drawBefore is not None: # Call if defined\n self.drawBefore(page, origin, self)\n\n self.drawPageFrame(page, origin)\n\n # Use the (docW, docH) as offset, in case cropmarks need to be displayed.\n page.draw(origin, self)\n\n if self.drawAfter is not None: # Call if defined\n self.drawAfter(page, origin, self)\n\n # Self.infoElements now may have collected elements needed info to be drawn, after all drawing is done.\n # So the info boxes don't get covered by regular page content.\n for e in self.elementsNeedingInfo.values():\n self._drawElementsNeedingInfo()", "def drawPageFrame(self, page, origin):\n if self.showPageFrame: # Different from base View, no check on padding.\n fill(None)\n stroke(0.5)\n strokeWidth(0.5)\n rect(origin[0], origin[1], page.w, page.h)\n #page.drawFrame(origin, self)", "def drawBox (self, left, top, width, height, colour):\r\n w = self.bih_vals [bih_Width]\r\n h = self.bih_vals [bih_Height]\r\n\r\n cols = [left, left + width - 1]\r\n rows = [top, top + height - 1]\r\n \r\n x0 = max ((0,left))\r\n x1 = min ((cols[1]+1, w))\r\n y0 = max ((0,top))\r\n y1 = min ((rows [1]+1, h))\r\n\r\n # rows\r\n\r\n for r in rows:\r\n if r >= 0 and r < h:\r\n row = self.image [r]\r\n for x in range (x0, x1):\r\n row [x] = colour\r\n\r\n # columns\r\n \r\n for y in range (y0, y1):\r\n row = self.image [y]\r\n for c in cols:\r\n if c >= 0 and c < w :\r\n row [c] = colour", "def draw_box(\n draw,\n box,\n img_width,\n img_height,\n text=\"\",\n color=(255, 255, 0),\n) -> None:\n\n line_width = 3\n font_height = 8\n y_min, x_min, y_max, x_max = box\n (left, right, top, bottom) = (\n x_min * img_width,\n x_max * img_width,\n y_min * img_height,\n y_max * img_height,\n )\n draw.line(\n [(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],\n width=line_width,\n fill=color,\n )\n if text:\n draw.text(\n (left + line_width, abs(top - line_width - font_height)), text, fill=color\n )", "def draw_rectangle(self, can_page, x_start, y_start, width_rect, height_rect, line_width, stroke_color, fill_color,\n dash_style, stroke_mode, fill_mode, text_color):\n # ログ\n log.debug(self)\n try:\n can_page.setLineWidth(line_width)\n can_page.setStrokeColor(stroke_color)\n if fill_color is None:\n fill_mode = 0\n else:\n can_page.setFillColor(fill_color)\n can_page.setDash(dash_style)\n can_page.rect(x_start, y_start, width_rect, height_rect, stroke=stroke_mode, fill=fill_mode)\n can_page.setFillColor(text_color, alpha=None)\n except:\n # 例外処理\n # log.error(traceback.format_exc())\n constant.get_error(constant.ERROR_003)", "def render(self, context):\n pygame.draw.rect(context, (255, 0, 0), self.box)", "def draw_check_boxes(self,\n boxes=1,\n completeline=0,\n lines=0,\n seek=0,\n continuetext=0,\n fontsize=15,\n gray=0,\n style=\"\",\n ):\n\n if not continuetext and not self.pagebegin:\n self.resetx()\n self.nextline()\n self.pagebegin = 0\n self.fontsize = fontsize\n c = self.canvas\n c.setLineWidth(0.90)\n c.setStrokeGray(gray)\n if style == \"center\":\n self.x = self.width / 2\n elif style == \"right\":\n self.x = self.width - self.marginsides - self.fontsize\n if seek > (self.width - (self.marginsides + self.fontsize)):\n seek = 0\n if (self.y - self.fontsize) < 40:\n self.set_new_page()\n #if continuetext == 1:\n # self.y = self.y + self.fontsize\n # self.x = self.lastx\n #else:\n # self.x = self.marginsides\n if seek != 0:\n self.x = self.x + seek\n if fontsize == 0:\n fontsize = self.fontsize\n else:\n self.fontsize = fontsize\n if completeline == 1:\n boxes = int(self.width / self.fontsize)\n for i in range(boxes):\n c.rect(self.x, self.y, self.fontsize, self.fontsize)\n self.x = self.x + self.fontsize\n if self.x > (self.width - (self.marginsides + self.fontsize)):\n break\n self.lastx = self.x\n #self.x = self.marginsides\n #self.y = self.y - self.fontsize\n #if isdate:\n # t = c.beginText(self.x, self.y)\n # t.setFont(Helvetica, 13)\n # t.setFillGray(0)\n # t.textOut(\" D D M M Y Y Y Y\")\n # c.drawText(t)\n # self.y = self.y - fontsize\n # self.lastx = t.getX()\n # self.lasty = self.y\n #if isdatetime:\n # t = c.beginText(self.x, self.y)\n # t.setFont(Helvetica, 12.5)\n # t.setFillGray(0.4)\n # t.textOut(\" D D M M Y Y Y Y -H H :M M\")\n # c.drawText(t)\n # self.y = self.y - fontsize\n # self.lastx = t.getX()\n # self.lasty = self.y\n self.lastx = self.x", "def draw(self):\n print(\"Drawing...\", end=' ')\n s = self.pixelsPerCell\n for h in range(self.height):\n for w in range(self.width):\n self.box[w][h] = self.canvas.create_rectangle(w*s, h*s, w*s+s, h*s+s,\n fill = \"gray\", outline = \"gray\")\n self.canvas.update()\n print(\"Done!\")", "def format_page(pdf, cfg, page_mapping):\n\n # pick a standard indent that almost every chunk will fit (except for intros and probably verse 10 and greater)\n STANDARD_LABEL_INDENT_LENGTH = myStringWidth('8) ', cfg.FONT_FACE, cfg.SONGLINE_SIZE)\n\n # REMEMBER: we are in the 1st Quadrant (like Math) ... lower left is (0,0)\n y = 0\n\n outline_level = 0\n\n # set clip region\n pdf.saveState() # so we can restore to no clip after this page\n\n if cfg.DEBUG_MARGINS:\n pdf.rect(page_mapping.startx, page_mapping.starty,\n page_mapping.endx-page_mapping.startx,page_mapping.endy-page_mapping.starty)\n\n # make a bounding box to keep from printing out of bounds\n p = pdf.beginPath()\n p.rect(page_mapping.startx, page_mapping.starty,\n page_mapping.endx-page_mapping.startx,page_mapping.endy-page_mapping.starty)\n pdf.clipPath(p, stroke=0)\n\n # draw page items\n for item in page_mapping.page:\n if isinstance(item, Songbook):\n # add to outline\n key = str(hash(('SONGBOOK ' + item.title)))\n pdf.bookmarkPage(key, left=page_mapping.startx, top=page_mapping.starty-y)\n outline_level = 0\n pdf.addOutlineEntry(item.title, key, level=outline_level)\n outline_level = 1\n\n # SONGBOOK TITLE\n if not cfg.HIDE_BOOKTITLE:\n y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=cfg.BOOKTITLE_SIZE, y_offset=y,\n line_space=cfg.BOOKTITLE_SPACE, page_mapping=page_mapping, line=item.title)\n # SONG\n elif isinstance(item, Song):\n # add to outline\n key = str(hash('SONG(%d): %s' % (item.num, item.title)))\n pdf.bookmarkPage(key, left=page_mapping.startx, top=page_mapping.starty-y)\n pdf.addOutlineEntry(item.title, key, level=outline_level)\n #XXX: here we could add stuff to make index entries linkable\n\n # SONG TITLE\n for i, title_line in enumerate(item.title_wrapped):\n if i == 0: # first line\n indent = 0\n else:\n indent = item.num_width\n\n y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=cfg.SONGTITLE_SIZE, y_offset=y,\n x_offset=indent, line_space=cfg.SONGTITLE_SPACE, page_mapping=page_mapping, line=title_line)\n\n # small_text after title\n for sm_line in item.small_text:\n y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=cfg.SMALL_SIZE, y_offset=y,\n line_space=cfg.SMALL_SPACE, page_mapping=page_mapping, line=sm_line)\n\n # introduction if applicable -- not shown when chords are not shown\n if item.introduction and cfg.DISPLAY_CHORDS:\n y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=cfg.SONGCHORD_SIZE, y_offset=y,\n line_space=cfg.SONGCHORD_SPACE, page_mapping=page_mapping, line=item.introduction)\n\n # VERSE OR CHORUS\n elif isinstance(item, Chunk):\n y += cfg.SONGCHUNK_B4\n\n # calulate prefix text for the chunk\n if item.type == 'chorus':\n label = 'Chorus:'\n elif item.type == 'verse':\n label = '%d)' % item.num\n elif item.type == 'bridge':\n label = 'Bridge:'\n elif item.type == 'pre-chorus':\n label = 'Pre-Chorus:'\n elif item.type == 'final chorus':\n label = 'Final Chorus:'\n elif item.type == 'ending':\n label = 'Ending:'\n elif item.type == 'introduction':\n label = 'Introduction:'\n else:\n label = ''\n\n\n if item.type in VARIABLE_INDENT: # these chunks are indented by num of chars in label\n label_length = max(myStringWidth(label+' ', cfg.FONT_FACE, cfg.SONGLINE_SIZE), STANDARD_LABEL_INDENT_LENGTH)\n # type indented no label gets an extra indent\n if item.type == INDENT_NO_LABEL:\n label_length *= 2\n else: # everything else gets a standard indent\n label_length = STANDARD_LABEL_INDENT_LENGTH\n\n # print the chunk lines\n if item.type == 'introduction' and not cfg.DISPLAY_CHORDS: # introduction is not shown when chords are not shown\n pass\n else:\n for count, line in enumerate(item.lines):\n if count == 0: # on the first line in the chunk write the label: chorus, 1), 2), 3) ...\n if cfg.DISPLAY_CHORDS and item.has_chords() and item.type == 'verse': #for verses with chords, we move the label down \n new_y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=cfg.SONGLINE_SIZE, y_offset=y+cfg.SONGCHORD_SIZE+cfg.SONGCHORD_SPACE, x_offset=0, line_space=cfg.SONGLINE_SPACE, page_mapping=page_mapping, line=label)\n else: \n new_y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=cfg.SONGLINE_SIZE, y_offset=y, x_offset=0,\n line_space=cfg.SONGLINE_SPACE, page_mapping=page_mapping, line=label)\n if item.type not in VARIABLE_INDENT: # standard indent, with chunk body on next line\n y = new_y # so we update y ... in other cases y not updated, so same line used\n #else: ignore new_y and we print on same line below\n\n\n # shrink font size, or wrap the line if that lets us fit\n # if resize != 0 we are shrinking, else we wrap\n font_size = cfg.SONGLINE_SIZE\n if cfg.RESIZE_PERCENT == 0:\n # font size does not change. \n font_size = font_size \n \n else:\n # reduce font size as much as needed but don't pass x% original\n while (label_length + myStringWidth(line.text, cfg.FONT_FACE, font_size)) > (page_mapping.endx - page_mapping.startx) and font_size > cfg.SONGLINE_SIZE * cfg.RESIZE_PERCENT:\n font_size = font_size * 0.99 # reduce 1%\n #print 'reducing from', cfg.SONGLINE_SIZE, 'to', font_size, '%2.2f%%' % (font_size / cfg.SONGLINE_SIZE)\n \n # we have a font -- lets use it\n #DBG:sav_y = y\n if cfg.DISPLAY_CHORDS and item.has_chords():\n y = print_chords(pdf, cfg, font_size=font_size, y_offset=y, x_offset=label_length, page_mapping=page_mapping, line=line)\n y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=font_size, y_offset=y, x_offset=label_length,\n line_space=cfg.SONGLINE_SPACE, page_mapping=page_mapping, line=line.text)\n #DBG:pdf.setStrokeColor('green')\n #DBG:pdf.rect(page_mapping.startx+label_length, page_mapping.starty-(sav_y),\n #DBG: pdf.stringWidth(line.text, cfg.FONT_FACE, font_size), -line.height)\n #DBG:pdf.setStrokeColor('red')\n #DBG:pdf.rect(page_mapping.startx+label_length, page_mapping.starty-(sav_y),\n #DBG: pdf.stringWidth(line.text, cfg.FONT_FACE, font_size), sav_y-y)\n #DBG:# reset\n #DBG:pdf.setStrokeColor('black')\n #DBG:pdf.setFillColor('black')\n\n if item.last_chunk:\n y += cfg.SONGCHUNK_B4\n for line in item.copyright_footer:\n y = print_line(pdf, font_face=cfg.FONT_FACE, font_size=cfg.COPYRIGHT_SIZE, y_offset=y,\n line_space=0, page_mapping=page_mapping, line=line)\n y += cfg.COPYRIGHT_SPACE # COPYRIGHT SPACE is padding between copyright lines \n\n # any parting space\n y += item.height_after\n\n #DBG:pdf.rect(page_mapping.startx+5, page_mapping.starty - (starty+cfg.SONGLINE_SIZE), 20, starty-y)\n # INDEX\n elif isinstance(item, Index) and cfg.DISPLAY_INDEX != INDEX_OFF: # top-level index which contains index entries\n if cfg.DISPLAY_INDEX == INDEX_NO_PAGE_BREAK:\n y += cfg.INDEX_TITLE_B4 # only add space when index not starting on a new page\n y = print_line(pdf, font_face=cfg.INDEX_TITLE_FONT, font_size=cfg.INDEX_TITLE_SIZE, y_offset=y, \n line_space=cfg.INDEX_TITLE_SPACE, page_mapping=page_mapping, line=\"Alphabetical Index\")\n\n # SCRIP INDEX\n elif isinstance(item, ScripIndex) and cfg.DISPLAY_SCRIP_INDEX != INDEX_OFF: # top-level scrip_index which contains index entries\n if cfg.DISPLAY_SCRIP_INDEX == INDEX_NO_PAGE_BREAK:\n y += cfg.INDEX_TITLE_B4 # only add space when scrip index not starting on a new page\n y = print_line(pdf, font_face=cfg.INDEX_TITLE_FONT, font_size=cfg.INDEX_TITLE_SIZE, y_offset=y, \n line_space=cfg.INDEX_TITLE_SPACE, page_mapping=page_mapping, line=\"Scripture Index\")\n\n # CAT INDEX\n elif isinstance(item, CatIndex) and cfg.DISPLAY_CAT_INDEX != INDEX_OFF: # top-level cat_index which contains index entries\n if cfg.DISPLAY_CAT_INDEX == INDEX_NO_PAGE_BREAK:\n y += cfg.INDEX_TITLE_B4 # adding space because cat_index not starting on a new page\n y = print_line(pdf, font_face=cfg.INDEX_TITLE_FONT, font_size=cfg.INDEX_TITLE_SIZE, y_offset=y, \n line_space=cfg.INDEX_TITLE_SPACE, page_mapping=page_mapping, line=\"Category Index\")\n\n # CAT INDEX Category\n elif isinstance(item, Category) and cfg.DISPLAY_CAT_INDEX != INDEX_OFF: # Category inside cat_index\n y += cfg.INDEX_CAT_B4 # add space before the category\n y = print_line(pdf, font_face=cfg.INDEX_CAT_FONT, font_size=cfg.INDEX_CAT_SIZE, y_offset=y, \n line_space=cfg.INDEX_CAT_SPACE, page_mapping=page_mapping, line=item.category)\n\n # CAT INDEX ITEM\n elif isinstance(item, CatIndexEntry) and cfg.DISPLAY_CAT_INDEX != INDEX_OFF:\n # print only the song number at this time -- don't save y since we are going to print on the line again\n print_line(pdf, font_face=cfg.INDEX_SONG_FONT, font_size=cfg.INDEX_SONG_SIZE, y_offset=y, line_space=cfg.INDEX_SONG_SPACE,\n page_mapping=page_mapping, line=str(item.song.num))\n # now print the index text with a consistent x offset so everything lines up\n y = print_line(pdf, font_face=cfg.INDEX_SONG_FONT, font_size=cfg.INDEX_SONG_SIZE, y_offset=y, line_space=cfg.INDEX_SONG_SPACE,\n x_offset=max(cfg.INDEX_SONG_SIZE, cfg.INDEX_FIRST_LINE_SIZE)*2, page_mapping=page_mapping, line=item.index_text)\n\n # INDEX ITEMS (after CatIndexEntry because CatIndexEntry is a subclass of IndexEntry)\n elif isinstance(item, IndexEntry) and (cfg.DISPLAY_INDEX != INDEX_OFF or cfg.DISPLAY_SCRIP_INDEX != INDEX_OFF):\n if item.is_song_title:\n LINE_SIZE = cfg.INDEX_SONG_SIZE\n LINE_SPACE= cfg.INDEX_SONG_SPACE\n FONT = cfg.INDEX_SONG_FONT\n else:\n LINE_SIZE = cfg.INDEX_FIRST_LINE_SIZE\n LINE_SPACE= cfg.INDEX_FIRST_LINE_SPACE\n FONT = cfg.INDEX_FIRST_LINE_FONT\n\n # print only the song number at this time -- don't save y since we are going to print on the line again\n print_line(pdf, font_face=FONT, font_size=LINE_SIZE, y_offset=y, line_space=LINE_SPACE,\n page_mapping=page_mapping, line=str(item.song.num))\n # now print the index text with a consistent x offset so everything lines up\n y = print_line(pdf, font_face=FONT, font_size=LINE_SIZE, y_offset=y, line_space=LINE_SPACE,\n x_offset=max(cfg.INDEX_SONG_SIZE, cfg.INDEX_FIRST_LINE_SIZE)*2, page_mapping=page_mapping, line=item.index_text)\n \n # restore original clip settings\n pdf.restoreState()\n\n # debug -- print page (small page here) rect\n #DBG:print '%d x %d rect at (%d, %d)' % (page_mapping.endx-page_mapping.startx, page_mapping.endy-page_mapping.starty,\n #DBG: page_mapping.startx, page_mapping.starty)\n #XXX: uncomment last 2 lines to have a border around each page\n #pdf.rect(page_mapping.startx, page_mapping.starty,\n # page_mapping.endx-page_mapping.startx,page_mapping.endy-page_mapping.starty,\n # fill=0)\n if page_height(page_mapping.page) != y:\n print 'Page:', pdf.getPageNumber(), 'Expected page height:', page_height(page_mapping.page), 'not equal to actual page height:', y\n #DBG:pdf.rect(page_mapping.startx, page_mapping.starty,\n #DBG: page_mapping.endx-page_mapping.startx,-page_height(page_mapping.page),\n #DBG: fill=0)", "def draw(self, page, painter, key, tile, paperColor=None):\n # determine the part to draw; convert tile to viewbox\n viewbox = self.map(key, page._viewBox).mapRect(QRectF(*tile))\n target = QRectF(0, 0, tile.w, tile.h)\n if key.rotation & 1:\n target.setSize(target.size().transposed())\n with locking.lock(page._svg):\n page._svg.setViewBox(viewbox)\n # we must specify the target otherwise QSvgRenderer scales to the\n # unrotated image\n painter.save()\n painter.setClipRect(target, Qt.IntersectClip)\n # QSvgRenderer seems to set antialiasing always on anyway... :-)\n self.setRenderHints(painter)\n page._svg.render(painter, target)\n painter.restore()\n page._svg.setViewBox(page._viewBox)", "def draw_boxes(self, image, boxes):\n return draw_boxes(image, boxes, self.labels)", "def box(self, box, padx=0.5, pady=0.3, **options):\n\n # underride sets default values only if the called hasn't\n underride(options, outline='black')\n box.left -= padx\n box.top -= pady\n box.right += padx\n box.bottom += pady\n item = self.rectangle(box, **options)\n return item", "def draw_box(self, xy=None, bbox=None, flatratio=1.0, **options):\n options = self._check_options(options)\n args = []\n \n if options[\"outlinecolor\"]:\n pen = aggdraw.Pen(options[\"outlinecolor\"], options[\"outlinewidth\"])\n args.append(pen)\n if options[\"fillcolor\"]:\n brush = aggdraw.Brush(options[\"fillcolor\"])\n args.append(brush)\n \n if xy:\n x,y = xy\n width = options[\"fillwidth\"]\n height = options[\"fillheight\"]\n if flatratio: height *= flatratio\n width, height = width / self.width * self.coordspace_width, \\\n height / self.height * self.coordspace_height\n halfwidth, halfheight = width / 2.0, height / 2.0\n bbox = [x-halfwidth, y-halfheight, x+halfwidth, y+halfheight]\n \n elif bbox: pass\n \n else: raise Exception(\"Either xy or bbox has to be specified\")\n \n self.drawer.rectangle(bbox, *args)", "def _draw_boxes(self, image, boxes, classes, thickness=4):\n for i in range(len(boxes)):\n bot, left, top, right = boxes[i, ...]\n class_id = int(classes[i]) - 1\n color = self.COLOR_LIST[class_id]\n cv2.rectangle(image, (left, top), (right, bot), color=color, thickness=thickness)", "def draw(self, window):\n\n if(self.page == Page.Game):\n self.draw_game_page(window);\n elif(self.page == Page.Save):\n self.draw_save_page(window);\n\n pygame.display.flip();", "def draw(self, screen: pygame.Surface) -> None:\n page = self.pages[self.current_page]\n # Draw background\n screen.blit(page.background, (0, 0))\n # Draw buttons to screen\n for button in page.buttons:\n if button.image is not None:\n screen.blit(button.image, button.rect)\n screen.blit(button.text, button.rect)\n # Draw highlights if mouse is hovering over button\n if button.tag not in ('display', 'output') and \\\n button.rect.collidepoint(self.mouse_pos):\n surf = create_trans_surf(button.rect.width, button.rect.height, 50, (100, 255, 100))\n screen.blit(surf, button.rect)", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def draw_bbox(n):\n return drawBbox(named(n))", "def draw_partition(self, box, context, bounding_box):\n assert self.canvas\n\n cr = context.cairo\n cr.set_line_width(box.style(\"line-width\"))\n\n if self.subject and not self.subject.isDimension and self._toplevel:\n cr.move_to(0, 0)\n cr.line_to(bounding_box.width, 0)\n\n h = self._header_size[1]\n\n # draw outside lines if this item is toplevel partition\n if self._toplevel:\n cr.move_to(0, bounding_box.height)\n cr.line_to(0, h)\n cr.line_to(bounding_box.width, h)\n cr.line_to(bounding_box.width, bounding_box.height)\n\n if self._subpart:\n # header line for all subparitions\n hd = h + self._hdmax\n cr.move_to(0, hd)\n cr.line_to(bounding_box.width, hd)\n\n if self._subpart:\n # draw inside lines for all children but last one\n dp = 0\n for sl in self.canvas.get_children(self)[:-1]:\n dp += sl.width\n cr.move_to(dp, h)\n cr.line_to(dp, bounding_box.height)\n\n cr.stroke()\n\n if context.hovered or context.dropzone:\n cr.save()\n cr.set_dash((1.0, 5.0), 0)\n cr.set_line_width(1.0)\n cr.rectangle(0, 0, bounding_box.width, bounding_box.height)\n draw_highlight(context)\n cr.stroke()\n cr.restore()", "def draw(self):\n self.screen.fill(WHITE)\n self.color_invalid()\n self.draw_selected()\n self.shade_locked_cells()\n self.draw_grid()\n self.draw_buttons()\n self.draw_numbers()", "def zoombox(self, parameter):\n self.navigation_rectangle = parameter\n self.paint_manager.show_navigation_rectangle(parameter)", "def draw_box(image, box, color, thickness=2):\n b = np.array(box).astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), color, thickness, cv2.LINE_AA)", "def draw(self, *args, **kwargs):\n\n for stack in self:\n for component in stack:\n for i, bin in enumerate(self.binning):\n bin_content = component.th1f.GetBinContent(i+1)\n bin_error = component.th1f.GetBinError(i+1)\n component.distribution.add_bin(bin[0], bin[1], bin_content, bin_error)\n component.distribution.calculate_line()\n self.plotbox.add(component.distribution)\n\n self.plotbox.calculate_plotting_transform()\n\n self.canvas.draw(*args, **kwargs)", "def draw_boxes(self, im, boxes):\n for bbox in boxes:\n l = [int(x) for x in bbox[\"coords\"]]\n l = self.scalebox(l)\n icon = self.classes_to_icons[bbox[\"label\"]]\n overlay_im_to_background(im, icon, l[0], l[1] - icon.shape[0] - 5)\n cv2.rectangle(im,(l[0],l[1]),(l[2],l[3]),self.color,2)", "def draw(self, thing):\n thing.draw(self, Point([2,2]), flip=1)\n\n # configure the scroll region\n bbox = Canvas.bbox(self.canvas, ALL)\n self.canvas.configure(scrollregion=bbox)", "def draw_circle(self,\n boxes=1,\n completeline=0,\n lines=0,\n seek=0,\n continuetext=0,\n fontsize=0,\n gray=0,\n style=\"\"):\n\n c = self.canvas\n c.setLineWidth(0.90)\n c.setStrokeGray(gray)\n self.resetx(seek=seek)\n #if style == \"center\":\n # self.x = self.width / 2\n #elif style == \"right\":\n # self.x = self.width - self.marginsides - self.fontsize\n #if seek > (self.width - (self.marginsides + self.fontsize)):\n # seek = 0\n #if (self.y - self.fontsize) < 40:\n # self.set_new_page()\n #if continuetext == 1:\n # self.y = self.y + self.fontsize\n # self.x = self.lastx\n #else:\n # self.x = self.marginsides\n #if seek != 0:\n # self.x = self.x + seek\n #if fontsize == 0:\n # fontsize = self.fontsize\n #else:\n # self.fontsize = fontsize\n #if completeline == 1:\n # boxes = int(self.width / self.fontsize)\n for eachcircle in xrange(boxes):\n c.circle(self.x + self.fontsize/2, self.y + self.fontsize/2,\n self.fontsize/2, fill = 0)\n self.resetx(seek=self.fontsize)\n self.resetx(seek=seek)\n # if self.x > (self.width - (self.marginsides + self.fontsize)):\n # break\n #self.lastx = self.x\n #self.x = self.marginsides\n #self.y = self.y - self.fontsize", "def draw_box(image, curr_box, label, draw_line=False):\n # y1, x1, y2, x2 = box\n # print(curr_box)\n # assert False\n x1, y1, x2, y2 = curr_box[0], curr_box[1], curr_box[2], curr_box[3]\n _, h, w = image.size()\n x1 = int(x1.item() * w)\n y1 = int(y1.item() * h)\n x2 = int(x2.item() * w)\n y2 = int(y2.item() * h)\n if draw_line:\n if x1 > x2:\n x1, x2 = x2, x1\n if y1 > y2:\n y1, y2 = y2, y1\n image[:, y1:y1 + 3, x1:x2] = label/13.0\n image[:, y2:y2 + 3, x1:x2] = label/13.0\n image[:, y1:y2, x1:x1 + 3] = label/13.0\n image[:, y1:y2, x2:x2 + 3] = label/13.0\n else:\n image[:, y1:y1 + 3, x1:x2] = label/13.0\n image[:, y2:y2 + 3, x1:x2] = label/13.0\n image[:, y1:y2, x1:x1 + 3] = label/13.0\n image[:, y1:y2, x2:x2 + 3] = label/13.0\n return image", "def draw_box(self) -> None:\n from math import pi, sin, cos\n import pymol\n from pymol import cmd\n\n # Convert angle\n angle1 = (self.angle1.value() / 180.0) * pi\n angle2 = (self.angle2.value() / 180.0) * pi\n\n # Get positions of box vertices\n # P1\n x1 = -self.min_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n\n y1 = -self.min_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n \n z1 = self.min_x.value() * sin(angle2) + self.min_y.value() * sin(angle1) * cos(angle2) - self.min_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P2\n x2 = self.max_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n \n y2 = (-self.min_y.value()) * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n \n z2 = (-self.max_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P3\n x3 = (-self.min_x.value()) * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n\n y3 = self.max_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n\n z3 = -(-self.min_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P4\n x4 = (-self.min_x.value()) * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y4 = (-self.min_y.value()) * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z4 = -(-self.min_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P5\n x5 = self.max_x.value() * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n \n y5 = self.max_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n\n z5 = (-self.max_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P6\n x6 = self.max_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y6 = (-self.min_y.value()) * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z6 = (-self.max_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P7\n x7 = (-self.min_x.value()) * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n\n y7 = self.max_y.value() * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n\n z7 = -(-self.min_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P8\n x8 = self.max_x.value() * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y8 = self.max_y.value() * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z8 = (-self.max_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # Create box object\n pymol.stored.list = []\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.iterate(\"box\", \"stored.list.append((name, color))\", quiet=1)\n list_color = pymol.stored.list\n cmd.delete(\"box\")\n if len(list_color) > 0:\n for item in list_color:\n at_name = item[0]\n at_c = item[1]\n cmd.set_color(at_name + \"color\", cmd.get_color_tuple(at_c))\n else:\n for at_name in [\"v2\", \"v3\", \"v4\", \"v5\", \"v6\", \"v7\", \"v8\", \"v1x\", \"v1y\", \"v1z\", \"v2x\", \"v3y\", \"v4z\"]:\n cmd.set_color(at_name + \"color\", [0.86, 0.86, 0.86])\n\n # Create vertices\n cmd.pseudoatom(\"box\", name=\"v2\", pos=[x2, y2, z2], color=\"v2color\")\n cmd.pseudoatom(\"box\", name=\"v3\", pos=[x3, y3, z3], color=\"v3color\")\n cmd.pseudoatom(\"box\", name=\"v4\", pos=[x4, y4, z4], color=\"v4color\")\n cmd.pseudoatom(\"box\", name=\"v5\", pos=[x5, y5, z5], color=\"v5color\")\n cmd.pseudoatom(\"box\", name=\"v6\", pos=[x6, y6, z6], color=\"v6color\")\n cmd.pseudoatom(\"box\", name=\"v7\", pos=[x7, y7, z7], color=\"v7color\")\n cmd.pseudoatom(\"box\", name=\"v8\", pos=[x8, y8, z8], color=\"v8color\")\n\n # Connect vertices\n cmd.select(\"vertices\", \"(name v3,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v5,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v3,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v6,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v7,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v1x\", pos=[x1, y1, z1], color='red')\n cmd.pseudoatom(\"box\", name=\"v2x\", pos=[x2, y2, z2], color='red')\n cmd.select(\"vertices\", \"(name v1x,v2x)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v1y\", pos=[x1, y1, z1], color='forest')\n cmd.pseudoatom(\"box\", name=\"v3y\", pos=[x3, y3, z3], color='forest')\n cmd.select(\"vertices\", \"(name v1y,v3y)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v4z\", pos=[x4, y4, z4], color='blue')\n cmd.pseudoatom(\"box\", name=\"v1z\", pos=[x1, y1, z1], color='blue')\n cmd.select(\"vertices\", \"(name v1z,v4z)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.delete(\"vertices\")", "def __drawAndErase(self, boxToDraw, boxToErase=None):\n dc = wx.ClientDC(self.drawingSurface)\n dc.BeginDrawing()\n dc.SetPen(wx.Pen(wx.WHITE, 1, wx.DOT))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n dc.SetLogicalFunction(wx.XOR)\n if boxToErase:\n r = wx.Rect(*boxToErase)\n dc.DrawRectangleRect(r)\n\n r = wx.Rect(*boxToDraw)\n dc.DrawRectangleRect(r)\n dc.EndDrawing()" ]
[ "0.6347439", "0.63203573", "0.62022114", "0.6174339", "0.6136849", "0.6121753", "0.60829866", "0.6076081", "0.594833", "0.5891543", "0.58463913", "0.58332574", "0.574836", "0.5743358", "0.57408214", "0.57265896", "0.57255423", "0.5719865", "0.56654084", "0.5649143", "0.56281745", "0.55723876", "0.555682", "0.55516547", "0.55502504", "0.55450207", "0.55398667", "0.553772", "0.55307364", "0.55307174" ]
0.69499445
0
Draw a ``stacking_context`` on ``stream``.
def draw_stacking_context(stream, stacking_context): # See https://www.w3.org/TR/CSS2/zindex.html with stacked(stream): box = stacking_context.box stream.begin_marked_content(box, mcid=True) # apply the viewport_overflow to the html box, see #35 if box.is_for_root_element and ( stacking_context.page.style['overflow'] != 'visible'): rounded_box_path( stream, stacking_context.page.rounded_padding_box()) stream.clip() stream.end() if box.is_absolutely_positioned() and box.style['clip']: top, right, bottom, left = box.style['clip'] if top == 'auto': top = 0 if right == 'auto': right = 0 if bottom == 'auto': bottom = box.border_height() if left == 'auto': left = box.border_width() stream.rectangle( box.border_box_x() + right, box.border_box_y() + top, left - right, bottom - top) stream.clip() stream.end() if box.style['opacity'] < 1: original_stream = stream stream = stream.add_group(*stream.page_rectangle) if box.transformation_matrix: if box.transformation_matrix.determinant: stream.transform(*box.transformation_matrix.values) else: stream.end_marked_content() return # Point 1 is done in draw_page # Point 2 if isinstance(box, (boxes.BlockBox, boxes.MarginBox, boxes.InlineBlockBox, boxes.TableCellBox, boxes.FlexContainerBox)): # The canvas background was removed by layout_backgrounds draw_box_background_and_border(stream, stacking_context.page, box) with stacked(stream): # dont clip the PageBox, see #35 if box.style['overflow'] != 'visible' and not isinstance( box, boxes.PageBox): # Only clip the content and the children: # - the background is already clipped # - the border must *not* be clipped rounded_box_path(stream, box.rounded_padding_box()) stream.clip() stream.end() # Point 3 for child_context in stacking_context.negative_z_contexts: draw_stacking_context(stream, child_context) # Point 4 for block in stacking_context.block_level_boxes: draw_box_background_and_border( stream, stacking_context.page, block) # Point 5 for child_context in stacking_context.float_contexts: draw_stacking_context(stream, child_context) # Point 6 if isinstance(box, boxes.InlineBox): draw_inline_level(stream, stacking_context.page, box) # Point 7 for block in [box] + stacking_context.blocks_and_cells: if isinstance(block, boxes.ReplacedBox): draw_border(stream, block) draw_replacedbox(stream, block) elif block.children: if block != box: stream.begin_marked_content(block, mcid=True) if isinstance(block.children[-1], boxes.LineBox): for child in block.children: draw_inline_level( stream, stacking_context.page, child) if block != box: stream.end_marked_content() # Point 8 for child_context in stacking_context.zero_z_contexts: draw_stacking_context(stream, child_context) # Point 9 for child_context in stacking_context.positive_z_contexts: draw_stacking_context(stream, child_context) # Point 10 draw_outlines(stream, box) if box.style['opacity'] < 1: group_id = stream.id stream = original_stream stream.push_state() stream.set_alpha(box.style['opacity'], stroke=True, fill=True) stream.draw_x_object(group_id) stream.pop_state() stream.end_marked_content()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stacked(stream):\n stream.push_state()\n try:\n yield\n finally:\n stream.pop_state()", "def stack(self, *args, **kwargs):\n return self._block(*args, container=\"stack\", **kwargs)", "def build_stream(\n self,\n tag,\n manifest,\n synthetic_image_id,\n layer_iterator,\n tar_stream_getter_iterator,\n reporter=None,\n ):\n return GzipWrap(\n self.stream_generator(\n tag,\n manifest,\n synthetic_image_id,\n layer_iterator,\n tar_stream_getter_iterator,\n reporter=reporter,\n )\n )", "def generate_stack(self, **kwargs: Any) -> Stack:\n definition = CfnginStackDefinitionModel.construct(\n name=self.stack_name, tags=self.args.tags, **kwargs\n )\n stack = Stack(definition, self.context)\n stack._blueprint = self.blueprint # pylint: disable=protected-access\n return stack", "def create_stack():\n\n return Stack()", "def run_stacker(self):\n \tself.iterator()\n \tself.stack()", "def create_stack(self, update=False):\n\n stack = self.current_scenes.stack(\n self.products[self.current_cat][self.current_prod]['bands'], \n self.current_ctx\n )\n\n if update:\n self.set_current_stack(stack)\n\n return stack", "def stack(args):\n p = OptionParser(stack.__doc__)\n p.add_option(\"--top\", default=10, type=\"int\",\n help=\"Draw the first N chromosomes [default: %default]\")\n p.add_option(\"--stacks\",\n default=\"Exons,Introns,DNA_transposons,Retrotransposons\",\n help=\"Features to plot in stackplot [default: %default]\")\n p.add_option(\"--switch\",\n help=\"Change chr names based on two-column file [default: %default]\")\n add_window_options(p)\n opts, args, iopts = p.set_image_options(args, figsize=\"8x8\")\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n fastafile, = args\n top = opts.top\n window, shift, subtract = check_window_options(opts)\n switch = opts.switch\n if switch:\n switch = DictFile(opts.switch)\n\n stacks = opts.stacks.split(\",\")\n bedfiles = get_beds(stacks)\n binfiles = get_binfiles(bedfiles, fastafile, shift, subtract=subtract)\n\n sizes = Sizes(fastafile)\n s = list(sizes.iter_sizes())[:top]\n maxl = max(x[1] for x in s)\n margin = .08\n inner = .02 # y distance between tracks\n\n pf = fastafile.rsplit(\".\", 1)[0]\n fig = plt.figure(1, (iopts.w, iopts.h))\n root = fig.add_axes([0, 0, 1, 1])\n\n # Gauge\n ratio = draw_gauge(root, margin, maxl)\n\n # Per chromosome\n yinterval = (1 - 2 * margin) / (top + 1)\n xx = margin\n yy = 1 - margin\n for chr, clen in s:\n yy -= yinterval\n xlen = clen / ratio\n cc = chr\n if \"_\" in chr:\n ca, cb = chr.split(\"_\")\n cc = ca[0].upper() + cb\n\n if switch and cc in switch:\n cc = \"\\n\".join((cc, \"({0})\".format(switch[cc])))\n\n root.add_patch(Rectangle((xx, yy), xlen, yinterval - inner, color=gray))\n ax = fig.add_axes([xx, yy, xlen, yinterval - inner])\n\n nbins = clen / shift\n if clen % shift:\n nbins += 1\n\n stackplot(ax, binfiles, nbins, palette, chr, window, shift)\n root.text(xx - .04, yy + .5 * (yinterval - inner), cc, ha=\"center\", va=\"center\")\n\n ax.set_xlim(0, nbins)\n ax.set_ylim(0, 1)\n ax.set_axis_off()\n\n # Legends\n yy -= yinterval\n xx = margin\n for b, p in zip(bedfiles, palette):\n b = b.rsplit(\".\", 1)[0].replace(\"_\", \" \")\n b = Registration.get(b, b)\n\n root.add_patch(Rectangle((xx, yy), inner, inner, color=p, lw=0))\n xx += 2 * inner\n root.text(xx, yy, b, size=13)\n xx += len(b) * .012 + inner\n\n root.set_xlim(0, 1)\n root.set_ylim(0, 1)\n root.set_axis_off()\n\n image_name = pf + \".\" + iopts.format\n savefig(image_name, dpi=iopts.dpi, iopts=iopts)", "def draw(self):\n\n self.state_stack.peek().draw(self.screen)", "def __init__(self,stream=None,tabwidth=4):\n\n if stream is None:\n from sys import stdout\n stream = stdout\n self.stream = stream\n self.tabwidth = tabwidth\n self.stack = [0]", "def draw_page(page, stream):\n bleed = {\n side: page.style[f'bleed_{side}'].value\n for side in ('top', 'right', 'bottom', 'left')}\n marks = page.style['marks']\n stacking_context = StackingContext.from_page(page)\n draw_background(\n stream, stacking_context.box.background, clip_box=False, bleed=bleed,\n marks=marks)\n draw_background(stream, page.canvas_background, clip_box=False)\n draw_border(stream, page)\n draw_stacking_context(stream, stacking_context)", "def make_stack(self):\n self.snapshot = Snapshot()\n self.snapshot.clean(self.ref)\n \n self.values = {}\n self.classes = []\n self.stack = Stack(self, self.snapshot)", "def createPipe(self, transaction):\n pipe = detectPipeClass(transaction.dev, transaction.endpt)(self)\n name = \"Dev %s, %s\" % (transaction.dev, transaction.getTransferString())\n self.appendCanvas(name, pipe.stack)\n return pipe", "def annotate_stacks(self):\n curthread = gdb.selected_thread()\n try:\n for thread in gdb.selected_inferior().threads():\n thread.switch()\n\n # This is different depending on gdb version\n try:\n frame = gdb.newest_frame()\n stackpointer = frame.read_register(\"sp\")\n except:\n regname, as_hex, as_int = gdb.execute(\"info register sp\", False, True).split()\n stackpointer = int(as_hex, 16)\n memrange = self.get_range(stackpointer)\n tid = thread.ptid[1] if thread.ptid[1] else thread.ptid[2]\n if memrange is None:\n print(\"Did not find stack of thread %d\" % tid)\n continue\n memrange.settype(MemoryType.Stack, \"Stack of thread %d(TID %d)\" % (thread.num, tid))\n finally:\n curthread.switch()", "def create_kinesis_stream(stack, name, shard_count):\n\n return stack.stack.add_resource(\n Stream(\n '{0}Stream'.format(name.replace('-', '')),\n ShardCount=shard_count,\n Name='{0}Stream'.format(name)))", "def open_stack(self, width: Optional[int] = None, height: Optional[int] = None):\n self._open_layouting.append(\"Stack\")\n self._client.add_layout(Stack(width=width, height=height))", "def context_stack(self):\n stored = self.binary_context_stack\n if stored:\n return pickle.loads(stored)\n\n # Create a new context stack\n stack = ContextStack(self)\n\n # Add the static command layer as first layer\n stack.add_command_layer(\"static\")\n return stack", "def __init__(self, input_stream, level=9):\n super(Gzip, self).__init__(input_stream)\n\n self._level = level", "def generate_overlayfs_stacking(self, working_file_name):\n\n # Reopenthe working file\n working_file = open(working_file_name, \"a\")\n\n\n working_file.write(\"generate_overlayfs_stacking\\n\")\n\n # We are done here, now close the file\n working_file.close()", "def __repr__(self):\n return 'Stack({} items, top={})'.format(self.length(), self.peek())", "def __repr__(self):\n return 'Stack({} items, top={})'.format(self.length(), self.peek())", "def __repr__(self):\n return 'Stack({} items, top={})'.format(self.length(), self.peek())", "def __repr__(self):\n return 'Stack({} items, top={})'.format(self.length(), self.peek())", "def draw_background(stream, bg, clip_box=True, bleed=None, marks=()):\n if bg is None:\n return\n\n with stacked(stream):\n if clip_box:\n for box in bg.layers[-1].clipped_boxes:\n rounded_box_path(stream, box)\n stream.clip()\n stream.end()\n\n # Background color\n if bg.color.alpha > 0:\n with stacked(stream):\n stream.set_color_rgb(*bg.color[:3])\n stream.set_alpha(bg.color.alpha)\n painting_area = bg.layers[-1].painting_area\n if painting_area:\n if bleed:\n # Painting area is the PDF BleedBox\n x, y, width, height = painting_area\n painting_area = (\n x - bleed['left'], y - bleed['top'],\n width + bleed['left'] + bleed['right'],\n height + bleed['top'] + bleed['bottom'])\n stream.rectangle(*painting_area)\n stream.clip()\n stream.end()\n stream.rectangle(*painting_area)\n stream.fill()\n\n if bleed and marks:\n x, y, width, height = bg.layers[-1].painting_area\n x -= bleed['left']\n y -= bleed['top']\n width += bleed['left'] + bleed['right']\n height += bleed['top'] + bleed['bottom']\n half_bleed = {key: value * 0.5 for key, value in bleed.items()}\n svg = f'''\n <svg height=\"{height}\" width=\"{width}\"\n fill=\"transparent\" stroke=\"black\" stroke-width=\"1\"\n xmlns=\"http://www.w3.org/2000/svg\">\n '''\n if 'crop' in marks:\n svg += f'''\n <path d=\"M0,{bleed['top']} h{half_bleed['left']}\" />\n <path d=\"M0,{bleed['top']} h{half_bleed['right']}\"\n transform=\"translate({width},0) scale(-1,1)\" />\n <path d=\"M0,{bleed['bottom']} h{half_bleed['right']}\"\n transform=\"translate({width},{height}) scale(-1,-1)\" />\n <path d=\"M0,{bleed['bottom']} h{half_bleed['left']}\"\n transform=\"translate(0,{height}) scale(1,-1)\" />\n <path d=\"M{bleed['left']},0 v{half_bleed['top']}\" />\n <path d=\"M{bleed['right']},0 v{half_bleed['bottom']}\"\n transform=\"translate({width},{height}) scale(-1,-1)\" />\n <path d=\"M{bleed['left']},0 v{half_bleed['bottom']}\"\n transform=\"translate(0,{height}) scale(1,-1)\" />\n <path d=\"M{bleed['right']},0 v{half_bleed['top']}\"\n transform=\"translate({width},0) scale(-1,1)\" />\n '''\n if 'cross' in marks:\n svg += f'''\n <circle r=\"{half_bleed['top']}\" transform=\"scale(0.5)\n translate({width},{half_bleed['top']}) scale(0.5)\" />\n <path transform=\"scale(0.5) translate({width},0)\" d=\"\n M-{half_bleed['top']},{half_bleed['top']} h{bleed['top']}\n M0,0 v{bleed['top']}\" />\n <circle r=\"{half_bleed['bottom']}\" transform=\"\n translate(0,{height}) scale(0.5)\n translate({width},-{half_bleed['bottom']}) scale(0.5)\" />\n <path d=\"M-{half_bleed['bottom']},-{half_bleed['bottom']}\n h{bleed['bottom']} M0,0 v-{bleed['bottom']}\" transform=\"\n translate(0,{height}) scale(0.5) translate({width},0)\" />\n <circle r=\"{half_bleed['left']}\" transform=\"scale(0.5)\n translate({half_bleed['left']},{height}) scale(0.5)\" />\n <path d=\"M{half_bleed['left']},-{half_bleed['left']}\n v{bleed['left']} M0,0 h{bleed['left']}\"\n transform=\"scale(0.5) translate(0,{height})\" />\n <circle r=\"{half_bleed['right']}\" transform=\"\n translate({width},0) scale(0.5)\n translate(-{half_bleed['right']},{height}) scale(0.5)\" />\n <path d=\"M-{half_bleed['right']},-{half_bleed['right']}\n v{bleed['right']} M0,0 h-{bleed['right']}\" transform=\"\n translate({width},0) scale(0.5) translate(0,{height})\" />\n '''\n svg += '</svg>'\n tree = ElementTree.fromstring(svg)\n image = SVGImage(tree, None, None, stream)\n # Painting area is the PDF media box\n size = (width, height)\n position = (x, y)\n repeat = ('no-repeat', 'no-repeat')\n unbounded = True\n painting_area = position + size\n positioning_area = (0, 0, width, height)\n clipped_boxes = []\n layer = BackgroundLayer(\n image, size, position, repeat, unbounded, painting_area,\n positioning_area, clipped_boxes)\n bg.layers.insert(0, layer)\n # Paint in reversed order: first layer is \"closest\" to the viewer.\n for layer in reversed(bg.layers):\n draw_background_image(stream, layer, bg.image_rendering)", "def stack(cls,\n context: interfaces.context.ContextInterface,\n layer_name: str,\n progress_callback: constants.ProgressCallback = None) -> Optional[interfaces.layers.DataLayerInterface]:\n # Bail out by default unless we can stack properly\n layer = context.layers[layer_name]\n join = interfaces.configuration.path_join\n\n # Never stack on top of an intel layer\n # FIXME: Find a way to improve this check\n if isinstance(layer, intel.Intel):\n return None\n\n linux_banners = LinuxBannerCache.load_banners()\n # If we have no banners, don't bother scanning\n if not linux_banners:\n vollog.info(\"No Linux banners found - if this is a linux plugin, please check your symbol files location\")\n return None\n\n mss = scanners.MultiStringScanner([x for x in linux_banners if x is not None])\n for _, banner in layer.scan(context = context, scanner = mss, progress_callback = progress_callback):\n dtb = None\n vollog.debug(\"Identified banner: {}\".format(repr(banner)))\n\n symbol_files = linux_banners.get(banner, None)\n if symbol_files:\n isf_path = symbol_files[0]\n table_name = context.symbol_space.free_table_name('LintelStacker')\n table = linux.LinuxKernelIntermedSymbols(context,\n 'temporary.' + table_name,\n name = table_name,\n isf_url = isf_path)\n context.symbol_space.append(table)\n kaslr_shift, aslr_shift = cls.find_aslr(context,\n table_name,\n layer_name,\n progress_callback = progress_callback)\n\n layer_class = intel.Intel # type: Type\n if 'init_top_pgt' in table.symbols:\n layer_class = intel.Intel32e\n dtb_symbol_name = 'init_top_pgt'\n elif 'init_level4_pgt' in table.symbols:\n layer_class = intel.Intel32e\n dtb_symbol_name = 'init_level4_pgt'\n else:\n dtb_symbol_name = 'swapper_pg_dir'\n\n dtb = cls.virtual_to_physical_address(table.get_symbol(dtb_symbol_name).address + kaslr_shift)\n\n # Build the new layer\n new_layer_name = context.layers.free_layer_name(\"IntelLayer\")\n config_path = join(\"IntelHelper\", new_layer_name)\n context.config[join(config_path, \"memory_layer\")] = layer_name\n context.config[join(config_path, \"page_map_offset\")] = dtb\n context.config[join(config_path, LinuxSymbolFinder.banner_config_key)] = str(banner, 'latin-1')\n\n layer = layer_class(context,\n config_path = config_path,\n name = new_layer_name,\n metadata = {'kaslr_value': aslr_shift})\n\n if layer and dtb:\n vollog.debug(\"DTB was found at: 0x{:0x}\".format(dtb))\n return layer\n vollog.debug(\"No suitable linux banner could be matched\")\n return None", "def create_stream(self):\n pass", "def topStack(self):\r\n\r\n self.z_stack=0\r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaled(500,500)\r\n self.pixmap= self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def _stacking_for_tile(tile, params):\n preread_ifgs = params[C.PREREAD_IFGS]\n vcmt = params[C.VCMT]\n ifg_paths = [ifg_path.tmp_sampled_path for ifg_path in params[C.INTERFEROGRAM_FILES]]\n output_dir = params[C.TMPDIR]\n log.debug(f\"Stacking of tile {tile.index}\")\n ifg_parts = [shared.IfgPart(p, tile, preread_ifgs, params) for p in ifg_paths]\n mst_tile = np.load(Configuration.mst_path(params, tile.index))\n rate, error, samples = stack_rate_array(ifg_parts, params, vcmt, mst_tile)\n np.save(file=os.path.join(output_dir, 'stack_rate_{}.npy'.format(tile.index)), arr=rate)\n np.save(file=os.path.join(output_dir, 'stack_error_{}.npy'.format(tile.index)), arr=error)\n np.save(file=os.path.join(output_dir, 'stack_samples_{}.npy'.format(tile.index)), arr=samples)", "def preview_stream(self):\n pass", "def show(self, context, width=None, color=None):\n if not color:\n color = self.color\n if not width:\n width = self.width\n s = max(context.size)\n u = min(context.units)\n n = s/u\n p = Point(*context.point())\n p = self.projectPoint(p)\n v1 = Vector.createFromPolar(n, self.angle)\n v2 = -v1\n s = Segment(v1(p), v2(p), color=color, width=width, conversion=True)\n s.show(context)" ]
[ "0.617012", "0.5554513", "0.5277841", "0.5274168", "0.5218626", "0.51678014", "0.5144608", "0.5100139", "0.5036429", "0.4926174", "0.4897954", "0.48235258", "0.47874787", "0.47840837", "0.476707", "0.47404984", "0.46945328", "0.46772468", "0.4641723", "0.462906", "0.462906", "0.462906", "0.462906", "0.4585966", "0.45844498", "0.45802367", "0.45760787", "0.4538834", "0.45378765", "0.45368743" ]
0.74125075
0
Draw the background color and image to a ``document.Stream``. If ``clip_box`` is set to ``False``, the background is not clipped to the border box of the background, but only to the painting area.
def draw_background(stream, bg, clip_box=True, bleed=None, marks=()): if bg is None: return with stacked(stream): if clip_box: for box in bg.layers[-1].clipped_boxes: rounded_box_path(stream, box) stream.clip() stream.end() # Background color if bg.color.alpha > 0: with stacked(stream): stream.set_color_rgb(*bg.color[:3]) stream.set_alpha(bg.color.alpha) painting_area = bg.layers[-1].painting_area if painting_area: if bleed: # Painting area is the PDF BleedBox x, y, width, height = painting_area painting_area = ( x - bleed['left'], y - bleed['top'], width + bleed['left'] + bleed['right'], height + bleed['top'] + bleed['bottom']) stream.rectangle(*painting_area) stream.clip() stream.end() stream.rectangle(*painting_area) stream.fill() if bleed and marks: x, y, width, height = bg.layers[-1].painting_area x -= bleed['left'] y -= bleed['top'] width += bleed['left'] + bleed['right'] height += bleed['top'] + bleed['bottom'] half_bleed = {key: value * 0.5 for key, value in bleed.items()} svg = f''' <svg height="{height}" width="{width}" fill="transparent" stroke="black" stroke-width="1" xmlns="http://www.w3.org/2000/svg"> ''' if 'crop' in marks: svg += f''' <path d="M0,{bleed['top']} h{half_bleed['left']}" /> <path d="M0,{bleed['top']} h{half_bleed['right']}" transform="translate({width},0) scale(-1,1)" /> <path d="M0,{bleed['bottom']} h{half_bleed['right']}" transform="translate({width},{height}) scale(-1,-1)" /> <path d="M0,{bleed['bottom']} h{half_bleed['left']}" transform="translate(0,{height}) scale(1,-1)" /> <path d="M{bleed['left']},0 v{half_bleed['top']}" /> <path d="M{bleed['right']},0 v{half_bleed['bottom']}" transform="translate({width},{height}) scale(-1,-1)" /> <path d="M{bleed['left']},0 v{half_bleed['bottom']}" transform="translate(0,{height}) scale(1,-1)" /> <path d="M{bleed['right']},0 v{half_bleed['top']}" transform="translate({width},0) scale(-1,1)" /> ''' if 'cross' in marks: svg += f''' <circle r="{half_bleed['top']}" transform="scale(0.5) translate({width},{half_bleed['top']}) scale(0.5)" /> <path transform="scale(0.5) translate({width},0)" d=" M-{half_bleed['top']},{half_bleed['top']} h{bleed['top']} M0,0 v{bleed['top']}" /> <circle r="{half_bleed['bottom']}" transform=" translate(0,{height}) scale(0.5) translate({width},-{half_bleed['bottom']}) scale(0.5)" /> <path d="M-{half_bleed['bottom']},-{half_bleed['bottom']} h{bleed['bottom']} M0,0 v-{bleed['bottom']}" transform=" translate(0,{height}) scale(0.5) translate({width},0)" /> <circle r="{half_bleed['left']}" transform="scale(0.5) translate({half_bleed['left']},{height}) scale(0.5)" /> <path d="M{half_bleed['left']},-{half_bleed['left']} v{bleed['left']} M0,0 h{bleed['left']}" transform="scale(0.5) translate(0,{height})" /> <circle r="{half_bleed['right']}" transform=" translate({width},0) scale(0.5) translate(-{half_bleed['right']},{height}) scale(0.5)" /> <path d="M-{half_bleed['right']},-{half_bleed['right']} v{bleed['right']} M0,0 h-{bleed['right']}" transform=" translate({width},0) scale(0.5) translate(0,{height})" /> ''' svg += '</svg>' tree = ElementTree.fromstring(svg) image = SVGImage(tree, None, None, stream) # Painting area is the PDF media box size = (width, height) position = (x, y) repeat = ('no-repeat', 'no-repeat') unbounded = True painting_area = position + size positioning_area = (0, 0, width, height) clipped_boxes = [] layer = BackgroundLayer( image, size, position, repeat, unbounded, painting_area, positioning_area, clipped_boxes) bg.layers.insert(0, layer) # Paint in reversed order: first layer is "closest" to the viewer. for layer in reversed(bg.layers): draw_background_image(stream, layer, bg.image_rendering)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_page(page, stream):\n bleed = {\n side: page.style[f'bleed_{side}'].value\n for side in ('top', 'right', 'bottom', 'left')}\n marks = page.style['marks']\n stacking_context = StackingContext.from_page(page)\n draw_background(\n stream, stacking_context.box.background, clip_box=False, bleed=bleed,\n marks=marks)\n draw_background(stream, page.canvas_background, clip_box=False)\n draw_border(stream, page)\n draw_stacking_context(stream, stacking_context)", "def draw(self, background=COLOUR_BLACK, show=False, save=False,\n generation=None):\n size = self.img_size\n img = Image.new('RGB', size, background)\n draw = Image.new('RGBA', size)\n pdraw = ImageDraw.Draw(draw)\n for polygon in self.polygons:\n colour = polygon.colour\n points = polygon.points\n pdraw.polygon(points, fill=colour, outline=colour)\n img.paste(draw, mask=draw)\n\n if show:\n img.show()\n\n if save:\n # TODO use self.generation\n temp_dir = tempfile.gettempdir()\n temp_name = u\"0000000000{}\".format(generation)[-10:]\n out_path = u\"{}/{}.png\".format(temp_dir, temp_name)\n img = img.filter(ImageFilter.GaussianBlur(radius=3))\n img.save(out_path)\n print u\"saving image to {}\".format(out_path)\n\n return img", "def draw_stacking_context(stream, stacking_context):\n # See https://www.w3.org/TR/CSS2/zindex.html\n with stacked(stream):\n box = stacking_context.box\n\n stream.begin_marked_content(box, mcid=True)\n\n # apply the viewport_overflow to the html box, see #35\n if box.is_for_root_element and (\n stacking_context.page.style['overflow'] != 'visible'):\n rounded_box_path(\n stream, stacking_context.page.rounded_padding_box())\n stream.clip()\n stream.end()\n\n if box.is_absolutely_positioned() and box.style['clip']:\n top, right, bottom, left = box.style['clip']\n if top == 'auto':\n top = 0\n if right == 'auto':\n right = 0\n if bottom == 'auto':\n bottom = box.border_height()\n if left == 'auto':\n left = box.border_width()\n stream.rectangle(\n box.border_box_x() + right, box.border_box_y() + top,\n left - right, bottom - top)\n stream.clip()\n stream.end()\n\n if box.style['opacity'] < 1:\n original_stream = stream\n stream = stream.add_group(*stream.page_rectangle)\n\n if box.transformation_matrix:\n if box.transformation_matrix.determinant:\n stream.transform(*box.transformation_matrix.values)\n else:\n stream.end_marked_content()\n return\n\n # Point 1 is done in draw_page\n\n # Point 2\n if isinstance(box, (boxes.BlockBox, boxes.MarginBox,\n boxes.InlineBlockBox, boxes.TableCellBox,\n boxes.FlexContainerBox)):\n # The canvas background was removed by layout_backgrounds\n draw_box_background_and_border(stream, stacking_context.page, box)\n\n with stacked(stream):\n # dont clip the PageBox, see #35\n if box.style['overflow'] != 'visible' and not isinstance(\n box, boxes.PageBox):\n # Only clip the content and the children:\n # - the background is already clipped\n # - the border must *not* be clipped\n rounded_box_path(stream, box.rounded_padding_box())\n stream.clip()\n stream.end()\n\n # Point 3\n for child_context in stacking_context.negative_z_contexts:\n draw_stacking_context(stream, child_context)\n\n # Point 4\n for block in stacking_context.block_level_boxes:\n draw_box_background_and_border(\n stream, stacking_context.page, block)\n\n # Point 5\n for child_context in stacking_context.float_contexts:\n draw_stacking_context(stream, child_context)\n\n # Point 6\n if isinstance(box, boxes.InlineBox):\n draw_inline_level(stream, stacking_context.page, box)\n\n # Point 7\n for block in [box] + stacking_context.blocks_and_cells:\n if isinstance(block, boxes.ReplacedBox):\n draw_border(stream, block)\n draw_replacedbox(stream, block)\n elif block.children:\n if block != box:\n stream.begin_marked_content(block, mcid=True)\n if isinstance(block.children[-1], boxes.LineBox):\n for child in block.children:\n draw_inline_level(\n stream, stacking_context.page, child)\n if block != box:\n stream.end_marked_content()\n\n # Point 8\n for child_context in stacking_context.zero_z_contexts:\n draw_stacking_context(stream, child_context)\n\n # Point 9\n for child_context in stacking_context.positive_z_contexts:\n draw_stacking_context(stream, child_context)\n\n # Point 10\n draw_outlines(stream, box)\n\n if box.style['opacity'] < 1:\n group_id = stream.id\n stream = original_stream\n stream.push_state()\n stream.set_alpha(box.style['opacity'], stroke=True, fill=True)\n stream.draw_x_object(group_id)\n stream.pop_state()\n\n stream.end_marked_content()", "def display_in_window(self, clipping=False):\n for depth_image, color_image in self:\n grey_color = 153\n depth_image_3d = np.dstack((depth_image, depth_image, depth_image))\n\n # Render images\n depth_colormap = cv2.applyColorMap(\n cv2.convertScaleAbs(depth_image, alpha=0.03),\n cv2.COLORMAP_JET)\n\n if clipping:\n bg_removed = np.where((depth_image_3d <= 0), grey_color, color_image)\n images = np.hstack((bg_removed, depth_colormap))\n else:\n images = np.hstack((color_image, depth_colormap))\n\n cv2.namedWindow('Depth/Color Stream', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('Depth/Color Stream', images)\n\n key = cv2.waitKey(1)\n\n if key == ord('c'):\n save_camera_frame(depth_image, color_image)\n\n # Press esc or 'q' to close the image window\n if key == ord('q') or key == 27 or cv2.getWindowProperty('Depth/Color Stream', 0) == -1:\n cv2.destroyAllWindows()\n break\n\n self.pipeline.stop()", "def background(self, color=None, image=None, position=None, size=None,\n repeat=None, origin=None, clip=None, attachment=None):\n if color:\n self.__vars['--body-background-color'] = color\n if image:\n self.__vars['--body-background-image'] = image\n if position:\n self.__vars['--body-background-position'] = position\n if size:\n self.__vars['background-size'] = size\n if repeat:\n self.__vars['--body-background-repeat'] = repeat\n if origin:\n self.__vars['--body-background-origin'] = origin\n if clip:\n self.__vars['--body-background-clip'] = clip\n if attachment:\n self.__vars['--body-background-attachment'] = attachment\n return self", "def DrawBackground(self, dc, wnd, rect):\r\n \r\n # draw background\r\n dc.SetBrush(self._bkbrush)\r\n dc.SetPen(wx.TRANSPARENT_PEN)\r\n dc.DrawRectangle(-1, -1, rect.GetWidth()+2, rect.GetHeight()+2)\r\n\r\n # draw base line\r\n dc.SetPen(wx.GREY_PEN)\r\n dc.DrawLine(0, rect.GetHeight()-1, rect.GetWidth(), rect.GetHeight()-1)", "def DrawPlainBackground(self, dc, wnd, _rect):\r\n \r\n rect = wx.Rect(*_rect)\r\n rect.height += 1\r\n\r\n dc.SetBrush(wx.Brush(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DFACE)))\r\n dc.DrawRectangle(rect.x - 1, rect.y - 1, rect.width + 2, rect.height + 1)", "def draw(self, screen):\n pg.draw.rect(screen, self.bg_color, self.rect)\n\n for y, surf in enumerate(self.images):\n # Don't blit below the rect area.\n if y * self.font_height + self.font_height > self.rect.h:\n break\n screen.blit(surf, (self.rect.x, self.rect.y+y*self.font_height))", "def draw_border(stream, box):\n # We need a plan to draw beautiful borders, and that's difficult, no need\n # to lie. Let's try to find the cases that we can handle in a smart way.\n\n def get_columns_with_rule():\n \"\"\"Yield columns that have a rule drawn on the left.\"\"\"\n skip_next = True\n for child in box.children:\n if child.style['column_span'] == 'all':\n skip_next = True\n elif skip_next:\n skip_next = False\n else:\n yield child\n\n def draw_column_border():\n \"\"\"Draw column borders.\"\"\"\n columns = (\n isinstance(box, boxes.BlockContainerBox) and (\n box.style['column_width'] != 'auto' or\n box.style['column_count'] != 'auto'))\n if columns and box.style['column_rule_width']:\n border_widths = (0, 0, 0, box.style['column_rule_width'])\n for child in get_columns_with_rule():\n with stacked(stream):\n position_x = (child.position_x - (\n box.style['column_rule_width'] +\n box.style['column_gap']) / 2)\n border_box = (\n position_x, child.position_y,\n box.style['column_rule_width'], child.height)\n clip_border_segment(\n stream, box.style['column_rule_style'],\n box.style['column_rule_width'], 'left', border_box,\n border_widths)\n draw_rect_border(\n stream, border_box, border_widths,\n box.style['column_rule_style'], styled_color(\n box.style['column_rule_style'],\n get_color(box.style, 'column_rule_color'), 'left'))\n\n # The box is hidden, easy.\n if box.style['visibility'] != 'visible':\n draw_column_border()\n return\n\n widths = [getattr(box, f'border_{side}_width') for side in SIDES]\n\n # No border, return early.\n if all(width == 0 for width in widths):\n draw_column_border()\n return\n\n colors = [get_color(box.style, f'border_{side}_color') for side in SIDES]\n styles = [\n colors[i].alpha and box.style[f'border_{side}_style']\n for (i, side) in enumerate(SIDES)]\n\n # The 4 sides are solid or double, and they have the same color. Oh yeah!\n # We can draw them so easily!\n if set(styles) in (set(('solid',)), set(('double',))) and (\n len(set(colors)) == 1):\n draw_rounded_border(stream, box, styles[0], colors[0])\n draw_column_border()\n return\n\n # We're not smart enough to find a good way to draw the borders :/. We must\n # draw them side by side.\n for side, width, color, style in zip(SIDES, widths, colors, styles):\n if width == 0 or not color:\n continue\n with stacked(stream):\n clip_border_segment(\n stream, style, width, side, box.rounded_border_box()[:4],\n widths, box.rounded_border_box()[4:])\n draw_rounded_border(\n stream, box, style, styled_color(style, color, side))\n\n draw_column_border()", "def draw_background(self, verbosity=0):\n log.debug(\"Drawing background\")\n\n log.debug(\"Drawing 'rock' background\")\n pygame.draw.rect(self.surface, (127, 127, 127), self.surface.get_rect())\n\n log.debug(\"Drawing Region contents\")\n for region in self.dungeon_map.regions:\n coords = self.map_to_screen(region.coords)\n if verbosity > 0:\n # Color-code regions for convenience\n if region.kind == Region.ROOM:\n color = (255, 255, 240)\n elif region.kind == Region.CHAMBER:\n color = (255, 240, 255)\n elif region.kind == Region.PASSAGE:\n color = (240, 255, 255)\n else:\n raise LookupError(\"Unknown Region kind '{0}'\"\n .format(region.kind))\n else:\n color = (255, 255, 255)\n pygame.draw.polygon(self.surface, color, coords)\n\n if verbosity == 0:\n return\n log.debug(\"Drawing Connection contents\")\n for conn in self.dungeon_map.connections:\n coords = self.map_to_screen(conn.get_poly_coords())\n if (conn.kind == Connection.DOOR or\n conn.kind == Connection.SECRET or\n conn.kind == Connection.ONEWAY):\n if conn.is_incomplete():\n color = (127, 140, 127)\n else:\n color = (240, 255, 240)\n elif conn.kind == Connection.ARCH:\n if conn.is_incomplete():\n color = (127, 127, 140)\n else:\n color = (240, 240, 255)\n elif conn.kind == Connection.OPEN:\n if conn.is_incomplete():\n color = (140, 127, 127)\n else:\n color = (255, 240, 240)\n else:\n continue\n pygame.draw.polygon(self.surface, color, coords)", "def draw(self, background):\n background.blit(self.image, (self.x_pos, self.y_pos))", "def cut_into_clips(movie_info, \n peak_thresh, \n clip_window, \n clip_window_origin, \n output_file, \n draw_box=True,\n include_start_time=False):\n movie_file = movie_info['movie_file']\n peak_times = movie_info['peak_times']\n peak_vals = movie_info['peak_vals']\n start_time = movie_info['start_time']\n interaction_start_times = movie_info['interaction_start']\n interaction_end_times = movie_info['interaction_end']\n\n print \"clip_window\", clip_window\n if clip_window[0] == 0 and clip_window[1] == 0:\n clip_all_interactions = True\n else:\n clip_all_interactions = False\n\n if clip_window_origin == 'interaction_start':\n if interaction_start_times is not None:\n start_clip_times = interaction_start_times\n elif clip_window_origin == 'peak':\n start_clip_times = peak_times\n\n clip_list_arr = []\n ## Now cut clips (do this the lazy way for now,\n ## saving to individual files, figure out pipes\n ## after you get this working). \n duration = 0\n print \"start_clip_times\", start_clip_times\n for i in range(len(start_clip_times)):\n ## If you wanted to make sure that video clips do not overlap, uncomment the below if statement:\n #if i == 0 or (start_clip_times[i] - start_clip_times[i-1] > float(duration)):\n if include_start_time:\n t = start_clip_times[i] + start_time - clip_window[0]\n else:\n t = start_clip_times[i] - clip_window[0]\n v = peak_vals[i]\n #start = get_time_string(t)\n start = str(t)\n if clip_all_interactions and interaction_end_times is not None:\n duration = str(interaction_end_times[i] - start_clip_times[i])\n else:\n duration = str(clip_window[0] + clip_window[1])\n print 'start', start, 'duration', duration\n\n #new_file = output_file+'_'+str(int(t*100))+'_clip.mp4'\n new_file = output_file+'_'+str(int(i))+'_clip.mp4'\n if v<peak_thresh:\n print \"PEAK LESS THAN THRESHOLD: \", v, \"thresh: \", peak_thresh\n if v>=peak_thresh or clip_all_interactions:\n if len(clip_list_arr) == 0 or clip_list_arr[-1] != new_file:\n print \"Clipping: \" + str(i)\n clip_list_arr.append(new_file)\n\n ## THIS IS REPLACED BY shlex.split\n # cmd = ['ffmpeg']\n # cmd += ['-i', movie_file+'.mp4']\n # cmd += ['-ss', start]\n # cmd += ['-t', duration]\n # if draw_box:\n # cmd += ['-vf']\n # cmd += ['drawbox=0:0:100:100:red'+':t=1']\n # cmd += ['-vf']\n # #cmd += ['drawbox=0:0:100:100:red:t='+str(min(100, int(v*1000)/5.0))]\n # cmd += ['drawbox=640:0:100:100:red:t='+str(min(100, int(v*1000)/5.0))]\n\n # cmd += [' -vf drawtext=\\\"fontfile=/opt/X11/share/fonts/TTF/VeraMoBd.ttf: text='+str(i)+': [email protected]: x=610: y=460\\\"' ]\n # cmd += ['-y']\n # cmd += [new_file]\n # #cmd += ['> /dev/null 2>&1 < /dev/null'] #not 100% sure what this does\n # # it is supposed to not send\n # # output to the PIPE buffer\n\n\n cmd = 'ffmpeg -i '+str(movie_file)+'.mp4 -ss '+str(start)+' -t '+str(duration)\n if draw_box:\n cmd = cmd+' -vf drawbox=0:0:100:100:red:t=1 -vf drawbox=640:0:100:100:red:t='+str(min(100, int(v*1000)/5.0))+' -vf drawtext=\\\"fontfile=/opt/X11/share/fonts/TTF/VeraMoBd.ttf: text='+str(i)+': [email protected]: x=610: y=460\\\"' \n cmd = cmd + ' -y '+new_file\n\n \n args = shlex.split(cmd)\n\n cmd_string = ''.join([\"%s \" % el for el in cmd])\n print '-->Running: ', cmd\n p = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p.wait()\n else:\n print \"Error, did not clip: \" + str(i)\n print \"peak_thresh: \" + str(peak_thresh)\n print \"start_clip_times[i] - start_clip_times[i-1]\" + str(start_clip_times[i] - start_clip_times[i-1])\n\n return clip_list_arr", "def draw_background(stdscr, title=\"2048 Rogue-Agile Edition\", color_scheme=2):\n console_height, console_width = stdscr.getmaxyx()\n brd.color_border(stdscr, (0, 0), (console_width, console_height), color_scheme)\n brd.color_hash(stdscr, (0, 0), (console_width, console_height), color_scheme)\n brd.add_title(stdscr, title, underline=False, color_scheme=color_scheme)", "def Clip(*args, **kwargs):\n return _gdi_.GraphicsContext_Clip(*args, **kwargs)", "def draw(self, screen: pygame.Surface) -> None:\n page = self.pages[self.current_page]\n # Draw background\n screen.blit(page.background, (0, 0))\n # Draw buttons to screen\n for button in page.buttons:\n if button.image is not None:\n screen.blit(button.image, button.rect)\n screen.blit(button.text, button.rect)\n # Draw highlights if mouse is hovering over button\n if button.tag not in ('display', 'output') and \\\n button.rect.collidepoint(self.mouse_pos):\n surf = create_trans_surf(button.rect.width, button.rect.height, 50, (100, 255, 100))\n screen.blit(surf, button.rect)", "def DrawBackground(self, dc):\r\n\r\n rect = self.GetClientRect()\r\n\r\n dc.SetPen(wx.TRANSPARENT_PEN)\r\n dc.SetBrush(wx.Brush(colourTargetBackground))\r\n dc.DrawRectangleRect(rect)\r\n\r\n dc.SetPen(wx.Pen(colourTargetBorder))\r\n\r\n left = rect.GetLeft()\r\n top = rect.GetTop()\r\n right = rect.GetRight()\r\n bottom = rect.GetBottom()\r\n\r\n if self._direction != wx.CENTER:\r\n \r\n if not self._center or self._direction != wx.BOTTOM:\r\n dc.DrawLine(left, top, right+1, top)\r\n if not self._center or self._direction != wx.RIGHT:\r\n dc.DrawLine(left, top, left, bottom+1)\r\n if not self._center or self._direction != wx.LEFT:\r\n dc.DrawLine(right, top, right, bottom+1)\r\n if not self._center or self._direction != wx.TOP:\r\n dc.DrawLine(left, bottom, right+1, bottom)\r\n\r\n dc.SetPen(wx.Pen(colourTargetShade))\r\n\r\n if self._direction != wx.RIGHT:\r\n dc.DrawLine(left + 1, top + 1, left + 1, bottom)\r\n if self._direction != wx.BOTTOM:\r\n dc.DrawLine(left + 1, top + 1, right, top + 1)", "def display_audio_clip(audio_clip, sample_freq, title_label):\n t = arange(audio_clip.shape[0], dtype=float32) / sample_freq\n \n f = figure(1, figsize=(8.54, 4.8))\n clf()\n subplot(211)\n plot(t, audio_clip)\n xlabel(\"time (s)\")\n ylabel('amplitude (a. u.)')\n title(title_label)\n grid(True)\n subplot(212)\n specgram(audio_clip, Fs=sample_freq)\n xlim(0, 1.1)\n ylim(0, 10000)\n xlabel('time (s)')\n ylabel('frequency (Hz)')\n tight_layout()\n savefig('tmp.png', dpi=25)", "def style(self, box_style=None, border_visible=False, border_colour='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight='', slider_width='4cm'):\n format_box(self, box_style, border_visible, border_colour, border_style,\n border_width, border_radius, padding, margin)\n self.channels_wid.single_slider.width = slider_width\n self.channels_wid.multiple_slider.width = slider_width\n format_font(self, font_family, font_size, font_style, font_weight)\n format_font(self.rgb_checkbox, font_family, font_size, font_style,\n font_weight)\n format_font(self.masked_checkbox, font_family, font_size, font_style,\n font_weight)\n format_font(self.sum_checkbox, font_family, font_size, font_style,\n font_weight)\n format_font(self.glyph_checkbox, font_family, font_size, font_style,\n font_weight)\n format_font(self.glyph_block_size_text, font_family, font_size,\n font_style, font_weight)\n format_font(self.glyph_use_negative_checkbox, font_family, font_size,\n font_style, font_weight)\n format_font(self.no_options_latex, font_family, font_size, font_style,\n font_weight)\n self.channels_wid.style(\n box_style=box_style, border_visible=False, text_box_style=None,\n text_box_background_colour=None, text_box_width=None,\n font_family=font_family, font_size=font_size, font_style=font_style,\n font_weight=font_weight)", "def drawBackground(self,screen):\n pygame.draw.rect(screen,(240,240,240),(self.basepos[0],self.basepos[1],204,504))\n pygame.draw.rect(screen,(0,0,0),(self.basepos[0]+2,self.basepos[1]+2,200,500))", "def display_background(self, imagepath):\n background_image = Image.open(imagepath)\n self.canvas.image = ImageTk.PhotoImage(background_image)\n self.canvas.create_image((0, 0), image=self.canvas.image, anchor='nw')", "def display_background(self, imagepath):\n background_image = Image.open(imagepath)\n self.canvas.image = ImageTk.PhotoImage(background_image)\n self.canvas.create_image((0, 0), image=self.canvas.image, anchor='nw')", "def display_background(self, image_path):\n # draws and paints the background with image of given path\n background_image = Image.open(image_path)\n self.canvas.image = ImageTk.PhotoImage(background_image)\n self.canvas.create_image((0, 0), image=self.canvas.image, anchor='nw')", "def _add_background(axis, /, *, debug = False):\n\n # Import special modules ...\n try:\n import matplotlib\n matplotlib.rcParams.update(\n {\n \"backend\" : \"Agg\", # NOTE: See https://matplotlib.org/stable/gallery/user_interfaces/canvasagg.html\n \"figure.dpi\" : 300,\n \"figure.figsize\" : (9.6, 7.2), # NOTE: See https://github.com/Guymer/misc/blob/main/README.md#matplotlib-figure-sizes\n \"font.size\" : 8,\n }\n )\n except:\n raise Exception(\"\\\"matplotlib\\\" is not installed; run \\\"pip install --user matplotlib\\\"\") from None\n\n # **************************************************************************\n\n # Create suitable colour ...\n facecolor = matplotlib.colors.to_rgba(matplotlib.colors.CSS4_COLORS[\"lightblue\"])\n if debug:\n print(f\"INFO: \\\"background\\\" is ({facecolor[0]:.6f},{facecolor[1]:.6f},{facecolor[2]:.6f},{facecolor[3]:.6f}).\")\n\n # Set background ...\n axis.set_facecolor(facecolor)", "def _draw_mask_on_image(self, mask):\n mask = self.STANDARD_COLORS_ARRAY[mask]\n cv2.addWeighted(mask,self.config.ALPHA,self.image,1.0,0,self.image)", "def DrawBackground(self, dc, wnd, rect):\r\n\r\n self._buttonRect = wx.Rect()\r\n\r\n # draw background\r\n agwFlags = self.GetAGWFlags()\r\n if agwFlags & AUI_NB_BOTTOM:\r\n r = wx.Rect(rect.x, rect.y, rect.width+2, rect.height)\r\n\r\n # TODO: else if (agwFlags & AUI_NB_LEFT) \r\n # TODO: else if (agwFlags & AUI_NB_RIGHT) \r\n else: #for AUI_NB_TOP\r\n r = wx.Rect(rect.x, rect.y, rect.width+2, rect.height-3)\r\n\r\n dc.GradientFillLinear(r, self._background_top_colour, self._background_bottom_colour, wx.SOUTH)\r\n\r\n # draw base lines\r\n\r\n dc.SetPen(self._border_pen)\r\n y = rect.GetHeight()\r\n w = rect.GetWidth()\r\n\r\n if agwFlags & AUI_NB_BOTTOM:\r\n dc.SetBrush(wx.Brush(self._background_bottom_colour))\r\n dc.DrawRectangle(-1, 0, w+2, 4)\r\n\r\n # TODO: else if (agwFlags & AUI_NB_LEFT) \r\n # TODO: else if (agwFlags & AUI_NB_RIGHT)\r\n \r\n else: # for AUI_NB_TOP\r\n dc.SetBrush(self._base_colour_brush)\r\n dc.DrawRectangle(-1, y-4, w+2, 4)", "def clip(self, window):\n\n def connect_points(clipped, side1, side2, window):\n \"\"\" Connects points of the window. \"\"\"\n edge = side1\n while edge != side2:\n clipped.append(window.points[0][edge])\n edge = (edge - 1) % 4\n\n boundaries = window.real_boundaries\n clipped = []\n for face in self._points:\n new_face = []\n entered, exited = None, None\n for i in range(len(face) - 1):\n points, side = Object._clip_line(\n face[i], face[i + 1], *boundaries[0], *boundaries[1])\n\n if not points: # clipped line is outside window\n continue\n\n if side[0] is not None: # entered\n if exited is not None:\n connect_points(new_face, exited, side[0], window)\n else:\n entered = side[0]\n\n if side[1] is not None: # exited\n exited = side[1]\n new_face.append(points[0])\n new_face.append(points[1])\n else:\n new_face.append(points[0])\n\n if new_face and face[0] == face[-1]:\n if entered is not None:\n connect_points(new_face, exited, entered, window)\n new_face.append(new_face[0])\n\n clipped.append(new_face)\n\n self._points = clipped", "def draw(self, draw_surface):\n draw_surface.blit(self.background_frame, (0, 120))", "def draw(self, mask, color=(255, 0, 0), line_width=50, average=False):\n h, w = mask.shape[:2]\n\n plot_y = np.linspace(0, h - 1, h)\n coeffs = self.average_fit if average else self.last_fit_pixel\n\n line_center = coeffs[0] * plot_y ** 2 + coeffs[1] * plot_y + coeffs[2]\n line_left_side = line_center - line_width // 2\n line_right_side = line_center + line_width // 2\n\n pts_left = np.array(list(zip(line_left_side, plot_y)))\n pts_right = np.array(np.flipud(list(zip(line_right_side, plot_y))))\n pts = np.vstack([pts_left, pts_right])\n\n return cv2.fillPoly(mask, [np.int32(pts)], color)", "def blit_background(self):\n self.screen.fill([67, 67, 67])\n self.screen.blit(self.background, (0,0))\n pygame.draw.rect(self.screen, (0, 0, 0), self.seperate_line)", "def __drawAndErase(self, boxToDraw, boxToErase=None):\n dc = wx.ClientDC(self.drawingSurface)\n dc.BeginDrawing()\n dc.SetPen(wx.Pen(wx.WHITE, 1, wx.DOT))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n dc.SetLogicalFunction(wx.XOR)\n if boxToErase:\n r = wx.Rect(*boxToErase)\n dc.DrawRectangleRect(r)\n\n r = wx.Rect(*boxToDraw)\n dc.DrawRectangleRect(r)\n dc.EndDrawing()" ]
[ "0.50916755", "0.50457513", "0.47716227", "0.46886986", "0.45910823", "0.45242292", "0.45099384", "0.4491094", "0.44542804", "0.4447981", "0.44441408", "0.44344813", "0.43993294", "0.4392615", "0.43871245", "0.43814042", "0.43637508", "0.43514937", "0.4329039", "0.43201867", "0.43201867", "0.43094394", "0.43005523", "0.42835578", "0.4273968", "0.42699313", "0.4260491", "0.42575374", "0.42547014", "0.4247992" ]
0.6876859
0
Draw the box border to a ``document.Stream``.
def draw_border(stream, box): # We need a plan to draw beautiful borders, and that's difficult, no need # to lie. Let's try to find the cases that we can handle in a smart way. def get_columns_with_rule(): """Yield columns that have a rule drawn on the left.""" skip_next = True for child in box.children: if child.style['column_span'] == 'all': skip_next = True elif skip_next: skip_next = False else: yield child def draw_column_border(): """Draw column borders.""" columns = ( isinstance(box, boxes.BlockContainerBox) and ( box.style['column_width'] != 'auto' or box.style['column_count'] != 'auto')) if columns and box.style['column_rule_width']: border_widths = (0, 0, 0, box.style['column_rule_width']) for child in get_columns_with_rule(): with stacked(stream): position_x = (child.position_x - ( box.style['column_rule_width'] + box.style['column_gap']) / 2) border_box = ( position_x, child.position_y, box.style['column_rule_width'], child.height) clip_border_segment( stream, box.style['column_rule_style'], box.style['column_rule_width'], 'left', border_box, border_widths) draw_rect_border( stream, border_box, border_widths, box.style['column_rule_style'], styled_color( box.style['column_rule_style'], get_color(box.style, 'column_rule_color'), 'left')) # The box is hidden, easy. if box.style['visibility'] != 'visible': draw_column_border() return widths = [getattr(box, f'border_{side}_width') for side in SIDES] # No border, return early. if all(width == 0 for width in widths): draw_column_border() return colors = [get_color(box.style, f'border_{side}_color') for side in SIDES] styles = [ colors[i].alpha and box.style[f'border_{side}_style'] for (i, side) in enumerate(SIDES)] # The 4 sides are solid or double, and they have the same color. Oh yeah! # We can draw them so easily! if set(styles) in (set(('solid',)), set(('double',))) and ( len(set(colors)) == 1): draw_rounded_border(stream, box, styles[0], colors[0]) draw_column_border() return # We're not smart enough to find a good way to draw the borders :/. We must # draw them side by side. for side, width, color, style in zip(SIDES, widths, colors, styles): if width == 0 or not color: continue with stacked(stream): clip_border_segment( stream, style, width, side, box.rounded_border_box()[:4], widths, box.rounded_border_box()[4:]) draw_rounded_border( stream, box, style, styled_color(style, color, side)) draw_column_border()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_page(page, stream):\n bleed = {\n side: page.style[f'bleed_{side}'].value\n for side in ('top', 'right', 'bottom', 'left')}\n marks = page.style['marks']\n stacking_context = StackingContext.from_page(page)\n draw_background(\n stream, stacking_context.box.background, clip_box=False, bleed=bleed,\n marks=marks)\n draw_background(stream, page.canvas_background, clip_box=False)\n draw_border(stream, page)\n draw_stacking_context(stream, stacking_context)", "def update_border(self):\n # side borders\n for row_position in range(NUM_ROWS+BORDER_WIDTH*2):\n self.stdscr.addstr(row_position, 0, '|', curses.color_pair(7))\n self.stdscr.addstr(row_position, NUM_COLUMNS*BLOCK_WIDTH+1, '|', curses.color_pair(7))\n # top and bottom borders\n for column_position in range(NUM_COLUMNS*BLOCK_WIDTH+BORDER_WIDTH*2):\n self.stdscr.addstr(0, column_position, '-', curses.color_pair(7))\n self.stdscr.addstr(NUM_ROWS+1, column_position, '-', curses.color_pair(7))", "def draw_border():\n \n length = len(BORDER_COORDS)\n \n # Constants for sine wave\n b = 2 * math.pi / length\n speed = 2\n \n # Draw sinusoid red/green design\n for i in range(length):\n # Sine function\n t = perf_counter()\n sine = math.sin(b*i + speed*t) # Wave with period 28\n \n # Map sine value from [-1, 1] to [0, 4)\n red = min(math.floor(2 * sine + 2), 3)\n \n # Fade red and green colors\n lp.led_ctrl_xy(*BORDER_COORDS[i], red, 3 - red)", "def draw_background(stream, bg, clip_box=True, bleed=None, marks=()):\n if bg is None:\n return\n\n with stacked(stream):\n if clip_box:\n for box in bg.layers[-1].clipped_boxes:\n rounded_box_path(stream, box)\n stream.clip()\n stream.end()\n\n # Background color\n if bg.color.alpha > 0:\n with stacked(stream):\n stream.set_color_rgb(*bg.color[:3])\n stream.set_alpha(bg.color.alpha)\n painting_area = bg.layers[-1].painting_area\n if painting_area:\n if bleed:\n # Painting area is the PDF BleedBox\n x, y, width, height = painting_area\n painting_area = (\n x - bleed['left'], y - bleed['top'],\n width + bleed['left'] + bleed['right'],\n height + bleed['top'] + bleed['bottom'])\n stream.rectangle(*painting_area)\n stream.clip()\n stream.end()\n stream.rectangle(*painting_area)\n stream.fill()\n\n if bleed and marks:\n x, y, width, height = bg.layers[-1].painting_area\n x -= bleed['left']\n y -= bleed['top']\n width += bleed['left'] + bleed['right']\n height += bleed['top'] + bleed['bottom']\n half_bleed = {key: value * 0.5 for key, value in bleed.items()}\n svg = f'''\n <svg height=\"{height}\" width=\"{width}\"\n fill=\"transparent\" stroke=\"black\" stroke-width=\"1\"\n xmlns=\"http://www.w3.org/2000/svg\">\n '''\n if 'crop' in marks:\n svg += f'''\n <path d=\"M0,{bleed['top']} h{half_bleed['left']}\" />\n <path d=\"M0,{bleed['top']} h{half_bleed['right']}\"\n transform=\"translate({width},0) scale(-1,1)\" />\n <path d=\"M0,{bleed['bottom']} h{half_bleed['right']}\"\n transform=\"translate({width},{height}) scale(-1,-1)\" />\n <path d=\"M0,{bleed['bottom']} h{half_bleed['left']}\"\n transform=\"translate(0,{height}) scale(1,-1)\" />\n <path d=\"M{bleed['left']},0 v{half_bleed['top']}\" />\n <path d=\"M{bleed['right']},0 v{half_bleed['bottom']}\"\n transform=\"translate({width},{height}) scale(-1,-1)\" />\n <path d=\"M{bleed['left']},0 v{half_bleed['bottom']}\"\n transform=\"translate(0,{height}) scale(1,-1)\" />\n <path d=\"M{bleed['right']},0 v{half_bleed['top']}\"\n transform=\"translate({width},0) scale(-1,1)\" />\n '''\n if 'cross' in marks:\n svg += f'''\n <circle r=\"{half_bleed['top']}\" transform=\"scale(0.5)\n translate({width},{half_bleed['top']}) scale(0.5)\" />\n <path transform=\"scale(0.5) translate({width},0)\" d=\"\n M-{half_bleed['top']},{half_bleed['top']} h{bleed['top']}\n M0,0 v{bleed['top']}\" />\n <circle r=\"{half_bleed['bottom']}\" transform=\"\n translate(0,{height}) scale(0.5)\n translate({width},-{half_bleed['bottom']}) scale(0.5)\" />\n <path d=\"M-{half_bleed['bottom']},-{half_bleed['bottom']}\n h{bleed['bottom']} M0,0 v-{bleed['bottom']}\" transform=\"\n translate(0,{height}) scale(0.5) translate({width},0)\" />\n <circle r=\"{half_bleed['left']}\" transform=\"scale(0.5)\n translate({half_bleed['left']},{height}) scale(0.5)\" />\n <path d=\"M{half_bleed['left']},-{half_bleed['left']}\n v{bleed['left']} M0,0 h{bleed['left']}\"\n transform=\"scale(0.5) translate(0,{height})\" />\n <circle r=\"{half_bleed['right']}\" transform=\"\n translate({width},0) scale(0.5)\n translate(-{half_bleed['right']},{height}) scale(0.5)\" />\n <path d=\"M-{half_bleed['right']},-{half_bleed['right']}\n v{bleed['right']} M0,0 h-{bleed['right']}\" transform=\"\n translate({width},0) scale(0.5) translate(0,{height})\" />\n '''\n svg += '</svg>'\n tree = ElementTree.fromstring(svg)\n image = SVGImage(tree, None, None, stream)\n # Painting area is the PDF media box\n size = (width, height)\n position = (x, y)\n repeat = ('no-repeat', 'no-repeat')\n unbounded = True\n painting_area = position + size\n positioning_area = (0, 0, width, height)\n clipped_boxes = []\n layer = BackgroundLayer(\n image, size, position, repeat, unbounded, painting_area,\n positioning_area, clipped_boxes)\n bg.layers.insert(0, layer)\n # Paint in reversed order: first layer is \"closest\" to the viewer.\n for layer in reversed(bg.layers):\n draw_background_image(stream, layer, bg.image_rendering)", "def format_box(box, box_style, border_visible, border_color, border_style,\n border_width, border_radius, padding, margin):\n box.box_style = box_style\n box.padding = padding\n box.margin = margin\n if border_visible:\n box.border_color = border_color\n box.border_style = border_style\n box.border_width = border_width\n box.border_radius = border_radius\n else:\n box.border_width = 0", "def draw(self):\n print(\"Drawing...\", end=' ')\n s = self.pixelsPerCell\n for h in range(self.height):\n for w in range(self.width):\n self.box[w][h] = self.canvas.create_rectangle(w*s, h*s, w*s+s, h*s+s,\n fill = \"gray\", outline = \"gray\")\n self.canvas.update()\n print(\"Done!\")", "def _box_face(image, face):\n draw = PIL.ImageDraw.Draw(image.image)\n draw.rectangle(face.as_box(), outline=\"yellow\")", "def style(self, box_style=None, border_visible=False, border_colour='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight='', slider_width='4cm'):\n format_box(self, box_style, border_visible, border_colour, border_style,\n border_width, border_radius, padding, margin)\n self.channels_wid.single_slider.width = slider_width\n self.channels_wid.multiple_slider.width = slider_width\n format_font(self, font_family, font_size, font_style, font_weight)\n format_font(self.rgb_checkbox, font_family, font_size, font_style,\n font_weight)\n format_font(self.masked_checkbox, font_family, font_size, font_style,\n font_weight)\n format_font(self.sum_checkbox, font_family, font_size, font_style,\n font_weight)\n format_font(self.glyph_checkbox, font_family, font_size, font_style,\n font_weight)\n format_font(self.glyph_block_size_text, font_family, font_size,\n font_style, font_weight)\n format_font(self.glyph_use_negative_checkbox, font_family, font_size,\n font_style, font_weight)\n format_font(self.no_options_latex, font_family, font_size, font_style,\n font_weight)\n self.channels_wid.style(\n box_style=box_style, border_visible=False, text_box_style=None,\n text_box_background_colour=None, text_box_width=None,\n font_family=font_family, font_size=font_size, font_style=font_style,\n font_weight=font_weight)", "def render(self, context):\n pygame.draw.rect(context, (255, 0, 0), self.box)", "def drawBox (self, left, top, width, height, colour):\r\n w = self.bih_vals [bih_Width]\r\n h = self.bih_vals [bih_Height]\r\n\r\n cols = [left, left + width - 1]\r\n rows = [top, top + height - 1]\r\n \r\n x0 = max ((0,left))\r\n x1 = min ((cols[1]+1, w))\r\n y0 = max ((0,top))\r\n y1 = min ((rows [1]+1, h))\r\n\r\n # rows\r\n\r\n for r in rows:\r\n if r >= 0 and r < h:\r\n row = self.image [r]\r\n for x in range (x0, x1):\r\n row [x] = colour\r\n\r\n # columns\r\n \r\n for y in range (y0, y1):\r\n row = self.image [y]\r\n for c in cols:\r\n if c >= 0 and c < w :\r\n row [c] = colour", "def clip_border_segment(stream, style, width, side, border_box,\n border_widths=None, radii=None):\n bbx, bby, bbw, bbh = border_box\n (tlh, tlv), (trh, trv), (brh, brv), (blh, blv) = radii or 4 * ((0, 0),)\n bt, br, bb, bl = border_widths or 4 * (width,)\n\n def transition_point(x1, y1, x2, y2):\n \"\"\"Get the point use for border transition.\n\n The extra boolean returned is ``True`` if the point is in the padding\n box (ie. the padding box is rounded).\n\n This point is not specified. We must be sure to be inside the rounded\n padding box, and in the zone defined in the \"transition zone\" allowed\n by the specification. We chose the corner of the transition zone. It's\n easy to get and gives quite good results, but it seems to be different\n from what other browsers do.\n\n \"\"\"\n return (\n ((x1, y1), True) if abs(x1) > abs(x2) and abs(y1) > abs(y2)\n else ((x2, y2), False))\n\n def corner_half_length(a, b):\n \"\"\"Return the length of the half of one ellipsis corner.\n\n Inspired by [Ramanujan, S., \"Modular Equations and Approximations to\n pi\" Quart. J. Pure. Appl. Math., vol. 45 (1913-1914), pp. 350-372],\n wonderfully explained by Dr Rob.\n\n https://mathforum.org/dr.math/faq/formulas/\n\n \"\"\"\n x = (a - b) / (a + b)\n return pi / 8 * (a + b) * (\n 1 + 3 * x ** 2 / (10 + sqrt(4 - 3 * x ** 2)))\n\n if side == 'top':\n (px1, py1), rounded1 = transition_point(tlh, tlv, bl, bt)\n (px2, py2), rounded2 = transition_point(-trh, trv, -br, bt)\n width = bt\n way = 1\n angle = 1\n main_offset = bby\n elif side == 'right':\n (px1, py1), rounded1 = transition_point(-trh, trv, -br, bt)\n (px2, py2), rounded2 = transition_point(-brh, -brv, -br, -bb)\n width = br\n way = 1\n angle = 2\n main_offset = bbx + bbw\n elif side == 'bottom':\n (px1, py1), rounded1 = transition_point(blh, -blv, bl, -bb)\n (px2, py2), rounded2 = transition_point(-brh, -brv, -br, -bb)\n width = bb\n way = -1\n angle = 3\n main_offset = bby + bbh\n elif side == 'left':\n (px1, py1), rounded1 = transition_point(tlh, tlv, bl, bt)\n (px2, py2), rounded2 = transition_point(blh, -blv, bl, -bb)\n width = bl\n way = -1\n angle = 4\n main_offset = bbx\n\n if side in ('top', 'bottom'):\n a1, b1 = px1 - bl / 2, way * py1 - width / 2\n a2, b2 = -px2 - br / 2, way * py2 - width / 2\n line_length = bbw - px1 + px2\n length = bbw\n stream.move_to(bbx + bbw, main_offset)\n stream.line_to(bbx, main_offset)\n stream.line_to(bbx + px1, main_offset + py1)\n stream.line_to(bbx + bbw + px2, main_offset + py2)\n elif side in ('left', 'right'):\n a1, b1 = -way * px1 - width / 2, py1 - bt / 2\n a2, b2 = -way * px2 - width / 2, -py2 - bb / 2\n line_length = bbh - py1 + py2\n length = bbh\n stream.move_to(main_offset, bby + bbh)\n stream.line_to(main_offset, bby)\n stream.line_to(main_offset + px1, bby + py1)\n stream.line_to(main_offset + px2, bby + bbh + py2)\n\n if style in ('dotted', 'dashed'):\n dash = width if style == 'dotted' else 3 * width\n if rounded1 or rounded2:\n # At least one of the two corners is rounded\n chl1 = corner_half_length(a1, b1)\n chl2 = corner_half_length(a2, b2)\n length = line_length + chl1 + chl2\n dash_length = round(length / dash)\n if rounded1 and rounded2:\n # 2x dashes\n dash = length / (dash_length + dash_length % 2)\n else:\n # 2x - 1/2 dashes\n dash = length / (dash_length + dash_length % 2 - 0.5)\n dashes1 = int(ceil((chl1 - dash / 2) / dash))\n dashes2 = int(ceil((chl2 - dash / 2) / dash))\n line = int(floor(line_length / dash))\n\n def draw_dots(dashes, line, way, x, y, px, py, chl):\n if not dashes:\n return line + 1, 0\n for i in range(0, dashes, 2):\n i += 0.5 # half dash\n angle1 = (\n ((2 * angle - way) + i * way * dash / chl) /\n 4 * pi)\n angle2 = (min if way > 0 else max)(\n ((2 * angle - way) + (i + 1) * way * dash / chl) /\n 4 * pi,\n angle * pi / 2)\n if side in ('top', 'bottom'):\n stream.move_to(x + px, main_offset + py)\n stream.line_to(\n x + px - way * px * 1 / tan(angle2), main_offset)\n stream.line_to(\n x + px - way * px * 1 / tan(angle1), main_offset)\n elif side in ('left', 'right'):\n stream.move_to(main_offset + px, y + py)\n stream.line_to(\n main_offset, y + py + way * py * tan(angle2))\n stream.line_to(\n main_offset, y + py + way * py * tan(angle1))\n if angle2 == angle * pi / 2:\n offset = (angle1 - angle2) / ((\n ((2 * angle - way) + (i + 1) * way * dash / chl) /\n 4 * pi) - angle1)\n line += 1\n break\n else:\n offset = 1 - (\n (angle * pi / 2 - angle2) / (angle2 - angle1))\n return line, offset\n\n line, offset = draw_dots(\n dashes1, line, way, bbx, bby, px1, py1, chl1)\n line = draw_dots(\n dashes2, line, -way, bbx + bbw, bby + bbh, px2, py2, chl2)[0]\n\n if line_length > 1e-6:\n for i in range(0, line, 2):\n i += offset\n if side in ('top', 'bottom'):\n x1 = max(bbx + px1 + i * dash, bbx + px1)\n x2 = min(bbx + px1 + (i + 1) * dash, bbx + bbw + px2)\n y1 = main_offset - (width if way < 0 else 0)\n y2 = y1 + width\n elif side in ('left', 'right'):\n y1 = max(bby + py1 + i * dash, bby + py1)\n y2 = min(bby + py1 + (i + 1) * dash, bby + bbh + py2)\n x1 = main_offset - (width if way > 0 else 0)\n x2 = x1 + width\n stream.rectangle(x1, y1, x2 - x1, y2 - y1)\n else:\n # 2x + 1 dashes\n stream.clip(even_odd=True)\n stream.end()\n dash = length / (\n round(length / dash) - (round(length / dash) + 1) % 2) or 1\n for i in range(0, int(round(length / dash)), 2):\n if side == 'top':\n stream.rectangle(bbx + i * dash, bby, dash, width)\n elif side == 'right':\n stream.rectangle(\n bbx + bbw - width, bby + i * dash, width, dash)\n elif side == 'bottom':\n stream.rectangle(\n bbx + i * dash, bby + bbh - width, dash, width)\n elif side == 'left':\n stream.rectangle(bbx, bby + i * dash, width, dash)\n stream.clip(even_odd=True)\n stream.end()", "def draw_boundary() -> None:\n # Upper edge\n print(rpipes.terminal.move_xy(0, 0), WBorder.HORIZONTAL * (rpipes.terminal.width - 1))\n\n # Left and Right edges\n for row in range(rpipes.terminal.height - 2):\n print(\n WBorder.VERTICAL,\n rpipes.terminal.move_right(rpipes.terminal.width - 4),\n WBorder.VERTICAL,\n )\n\n # Bottom edge\n print(\n rpipes.terminal.move_xy(0, rpipes.terminal.height - 2),\n WBorder.HORIZONTAL * (rpipes.terminal.width - 1),\n )\n\n # Top left corner\n print(rpipes.terminal.move_xy(0, 0) + WBorder.DOWN_AND_RIGHT)\n\n # Top right corner\n print(rpipes.terminal.move_xy(rpipes.terminal.width - 1, 0) + WBorder.DOWN_AND_LEFT)\n\n # Bottom left corner\n print(rpipes.terminal.move_xy(0, rpipes.terminal.height - 2) + WBorder.UP_AND_RIGHT)\n\n # Bottom right corner\n print(\n rpipes.terminal.move_xy(rpipes.terminal.width - 1, rpipes.terminal.height - 2)\n + WBorder.UP_AND_LEFT\n )", "def _write(self, stream):\n\n self._img.append(self.make_path())\n self._img.append(self.make_border())\n self._img.append(self.make_text())\n\n ET.ElementTree(self._img).write(stream, encoding=\"UTF-8\", xml_declaration=True)", "def drawBorder(self):\n\t\t# horizontal lines\n\t\tself.wts(0, 0, '╭' + '─' * (self.width - 2) + '╮', self._borderColor)\t\t\t\t\t\t# Top\n\t\tself.wts(self.height - 2, 0, '└' + '─' * (self.width - 2) + '╯', self._borderColor)\t\t\t# Bottom\n\t\t# vertical lines\n\t\tfor yPos in range(1, self.height - 2):\n\t\t\tself.wts(yPos, 0, '│', self._borderColor)\n\t\t\tself.wts(yPos, self.width - 1, '│', self._borderColor)", "def draw_bounding_box(self):\n # Gets the bounding box\n xmin, ymin, xmax, ymax = self.get_bounding_box()\n\n # Gets the actual coordinates\n width = xmax - xmin\n height = ymax - ymin\n center_x = xmin + (width)/2\n center_y = ymin + (height)/2\n\n arcade.draw_rectangle_outline(center_x, center_y, width, height, (255, 0, 0))", "def draw_box(img, box):\n draw_img = img.copy()\n cv2.polylines(draw_img, np.int32([box]), True, (255, 0, 0), 4)\n show(draw_img)", "def border(self):\n ...", "def drawbox(length, width, xstart, ystart):\n # curses takes y,x not x,y\n # Make the top left corner\n mvaddch(ystart, xstart, ACS_ULCORNER, color_pair(BORDER_COLOUR) | A_BOLD)\n # Draw the top side\n for i in range(0, width - 1):\n mvaddch(ystart, xstart + 1 + i, ACS_HLINE, color_pair(BORDER_COLOUR) | A_BOLD)\n #Make the top right corner\n mvaddch(ystart, xstart + width - 1, ACS_URCORNER, color_pair(BORDER_COLOUR) | A_BOLD)\n # Draw the left side\n for i in range(1, length):\n mvaddch(ystart + i, xstart, ACS_VLINE, color_pair(BORDER_COLOUR) | A_BOLD)\n # Draw the right side\n for i in range(1, length):\n mvaddch(ystart + i, xstart + width - 1, ACS_VLINE, color_pair(BORDER_COLOUR) | A_BOLD)\n # Make the bottom left corner\n mvaddch(ystart + length, xstart, ACS_LLCORNER, color_pair(BORDER_COLOUR) | A_BOLD)\n # # Draw the bottom side\n for i in range(0, width - 1):\n mvaddch(ystart + length, xstart + 1 + i, ACS_HLINE, color_pair(BORDER_COLOUR) | A_BOLD)\n # # Make the bottom left corner\n mvaddch(ystart + length, xstart + width - 1, ACS_LRCORNER, color_pair(BORDER_COLOUR) | A_BOLD)\n refresh()", "def _draw_outline(self) -> None:\n stroke = self.border_thickness\n\n # draw outline rectangle\n for _w in range(self.widget_width):\n for line in range(stroke):\n self._bitmap[_w, line] = 1\n self._bitmap[_w, self.widget_height - 1 - line] = 1\n for _h in range(self.widget_height):\n for line in range(stroke):\n self._bitmap[line, _h] = 1\n self._bitmap[self.widget_width - 1 - line, _h] = 1", "def print_upper_box_line():\n print_indentation()\n print(STYLES[parameters[\"Style\"]][\"Upper left corner\"], end=\"\")\n for _ in range(text_width_with_spaces):\n print(STYLES[parameters[\"Style\"]][\"Horizontal line\"], end=\"\")\n print(STYLES[parameters[\"Style\"]][\"Upper right corner\"])", "def style(self, box_style=None, border_visible=False, border_colour='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight=''):\n format_box(self, box_style, border_visible, border_colour, border_style,\n border_width, border_radius, padding, margin)\n format_font(self, font_family, font_size, font_style, font_weight)\n for i in range(self.n_lines):\n format_font(self.latex_texts[i], font_family, font_size,\n font_style, font_weight)", "def _draw(self, frame, boxes, probs, landmarks, name):\n try:\n print('drawing')\n for box, prob, ld, id in zip(boxes, probs, landmarks, name):\n # Draw rectangle on frame\n\n cv2.putText(frame, id, (200, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\n\n\n except:\n print('not draw box')\n pass\n\n return frame", "def style(self, box_style=None, border_visible=False, border_colour='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight=''):\n format_box(self, box_style, border_visible, border_colour, border_style,\n border_width, border_radius, padding, margin)\n format_font(self, font_family, font_size, font_style, font_weight)\n format_font(self.play_stop_toggle, font_family, font_size, font_style,\n font_weight)\n format_font(self.play_options_toggle, font_family, font_size,\n font_style, font_weight)\n format_font(self.loop_checkbox, font_family, font_size, font_style,\n font_weight)\n format_font(self.interval_text, font_family, font_size, font_style,\n font_weight)\n if self.index_style == 'buttons':\n self.index_wid.style(\n box_style=None, border_visible=False, padding=0,\n margin='0.1cm', font_family=font_family, font_size=font_size,\n font_style=font_style, font_weight=font_weight)\n else:\n self.index_wid.style(\n box_style=None, border_visible=False, padding=0,\n margin='0.1cm', font_family=font_family, font_size=font_size,\n font_style=font_style, font_weight=font_weight)", "def style(self, box_style=None, border_visible=False, border_colour='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight=''):\n format_box(self, box_style, border_visible, border_colour, border_style,\n border_width, border_radius, padding, margin)\n format_font(self, font_family, font_size, font_style, font_weight)\n format_font(self.file_format_select, font_family, font_size, font_style,\n font_weight)\n format_font(self.dpi_text, font_family, font_size, font_style,\n font_weight)\n format_font(self.orientation_dropdown, font_family, font_size,\n font_style, font_weight)\n format_font(self.papertype_select, font_family, font_size, font_style,\n font_weight)\n format_font(self.transparent_checkbox, font_family, font_size,\n font_style, font_weight)\n format_font(self.pad_inches_text, font_family, font_size, font_style,\n font_weight)\n format_font(self.filename_text, font_family, font_size, font_style,\n font_weight)\n format_font(self.overwrite_checkbox, font_family, font_size, font_style,\n font_weight)\n format_font(self.save_button, font_family, font_size, font_style,\n font_weight)\n self.facecolour_widget.style(\n box_style=None, border_visible=False, font_family=font_family,\n font_size=font_size, font_weight=font_weight, font_style=font_style)\n self.edgecolour_widget.style(\n box_style=None, border_visible=False, font_family=font_family,\n font_size=font_size, font_weight=font_weight, font_style=font_style)", "def style(self, box_style=None, border_visible=False, border_color='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight=''):\n _format_box(self, box_style, border_visible, border_color, border_style,\n border_width, border_radius, padding, margin)\n _format_font(self, font_family, font_size, font_style, font_weight)\n for i in range(self.n_lines):\n _format_font(self.latex_texts[i], font_family, font_size,\n font_style, font_weight)", "def draw_box(image, box, color, thickness=2):\n b = np.array(box).astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), color, thickness, cv2.LINE_AA)", "def make_box(stdscr, margin:int):\n sh, sw = stdscr.getmaxyx()\n\n box = (\n (margin, margin),\n (sh-margin, sw-margin)\n )\n rectangle(stdscr, box[0][0], box[0][1], box[1][0], box[1][1])\n stdscr.refresh()\n return box", "def style(self, box_style=None, border_visible=False, border_color='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight=''):\n _format_box(self, box_style, border_visible, border_color, border_style,\n border_width, border_radius, padding, margin)\n _format_font(self, font_family, font_size, font_style, font_weight)\n _format_font(self.play_stop_toggle, font_family, font_size, font_style,\n font_weight)\n _format_font(self.play_options_toggle, font_family, font_size,\n font_style, font_weight)\n _format_font(self.loop_checkbox, font_family, font_size, font_style,\n font_weight)\n _format_font(self.interval_text, font_family, font_size, font_style,\n font_weight)\n if self.index_style == 'buttons':\n self.index_wid.style(\n box_style=None, border_visible=False, padding=0,\n margin='0.15cm', font_family=font_family, font_size=font_size,\n font_style=font_style, font_weight=font_weight)\n else:\n self.index_wid.style(\n box_style=None, border_visible=False, padding=0,\n margin='0.15cm', font_family=font_family, font_size=font_size,\n font_style=font_style, font_weight=font_weight)", "def _draw_stream_dividers(self, painter):\n\t\tclip_left = self._history_left\n\t\tclip_right = self._history_left + self._history_width\n\n\t\trow = 0\n\t\tfor stream_name in self.streams:\n\t\t\t(x, y, w, h) = self._history_boundaries[stream_name]\n\n\t\t\tif row % 2 == 0:\n\t\t\t\tpainter.setPen(QtCore.Qt.lightGray)\n\t\t\t\tpainter.setBrush(QtGui.QBrush(self._history_background_color_alternate))\n\t\t\telse:\n\t\t\t\tpainter.setPen(QtCore.Qt.lightGray)\n\t\t\t\tpainter.setBrush(QtGui.QBrush(self._history_background_color))\n\t\t\tleft = max(clip_left, x)\n\t\t\tpainter.drawRect(left, y, min(clip_right - left, w), h)\n\t\t\trow += 1\n\t\tpainter.setBrush(self._default_brush)\n\t\tpainter.setPen(self._default_pen)", "def draw_box(\n draw,\n box,\n img_width,\n img_height,\n text=\"\",\n color=(255, 255, 0),\n) -> None:\n\n line_width = 3\n font_height = 8\n y_min, x_min, y_max, x_max = box\n (left, right, top, bottom) = (\n x_min * img_width,\n x_max * img_width,\n y_min * img_height,\n y_max * img_height,\n )\n draw.line(\n [(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],\n width=line_width,\n fill=color,\n )\n if text:\n draw.text(\n (left + line_width, abs(top - line_width - font_height)), text, fill=color\n )" ]
[ "0.5705386", "0.5639399", "0.5570043", "0.556558", "0.55434954", "0.5383438", "0.53265554", "0.53187335", "0.52529013", "0.52422464", "0.5228069", "0.52273524", "0.52140033", "0.52086645", "0.51901776", "0.5178869", "0.5124615", "0.5115901", "0.5094592", "0.50910413", "0.50902665", "0.50793236", "0.50789803", "0.5052191", "0.50302035", "0.5027052", "0.50191164", "0.50103647", "0.49964708", "0.49944243" ]
0.69521314
0
Yield columns that have a rule drawn on the left.
def get_columns_with_rule(): skip_next = True for child in box.children: if child.style['column_span'] == 'all': skip_next = True elif skip_next: skip_next = False else: yield child
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_columnrules(self, index, M):\n return list(itertools.chain.from_iterable(\n [[[self.strip(index[0], index[1], i), self.strip(index[0], x, i)]\n for x in range(1, M+1) if x != index[1]]\n for i in range(1, M+1)]\n ))", "def collect_columns():\n return ((x, y) for x in range(72) for y in range(x + 9, 81, 9))", "def collapseLeft(self):\n retval = False\n for rStartInd in [i * self.col for i in range(self.row)]:\n cSlice = self.Range[rStartInd: rStartInd + self.col]\n lst = [self.get_cell(i) for i in cSlice]\n lst, tmp = self.collapseRow(lst)\n for i in range(self.col):\n self.set_cell(cSlice[i], lst[i])\n retval = retval or tmp\n return retval", "def columns(self):\n \n pass", "def positive_left_right_requirements(\n self,\n ) -> Tuple[Tuple[GriddedPerm, ...], Tuple[GriddedPerm, ...]]:\n left, right = [], []\n for (x, y) in self._tiling.active_cells:\n if self._fuse_row and y == self._row_idx:\n left.append(GriddedPerm.single_cell((0,), (x, y)))\n right.append(GriddedPerm.single_cell((0,), (x, y + 1)))\n if not self._fuse_row and x == self._col_idx:\n left.append(GriddedPerm.single_cell((0,), (x, y)))\n right.append(GriddedPerm.single_cell((0,), (x + 1, y)))\n return tuple(sorted(left)), tuple(sorted(right))", "def columns(self):\n headers = self.headers()\n for h in headers:\n col = self.get_column(h)\n yield col", "def get_cols_dummy():", "def columns(self, model=None):\n column = self.column(model=model)\n if column:\n yield column\n\n check = self.__value\n if not isinstance(check, (list, set, tuple)):\n check = (check,)\n\n for val in check:\n if isinstance(val, (Query, QueryCompound)):\n for col in val.columns(model):\n yield col", "def cols(self):\n for col in range(self.min_col, self.max_col+1):\n yield tuple('%s%d' % (get_column_letter(col), row)\n for row in range(self.min_row, self.max_row+1))", "def get_rowrules(self, index, M):\n return list(itertools.chain.from_iterable(\n [[[self.strip(index[0], index[1], i), self.strip(x, index[1], i)]\n for x in range(1, M+1) if x != index[0]]\n for i in range(1, M+1)]\n ))", "def __print_rules(self, left=0):\n\n for line in self.__rules:\n print((\" \" * left) + line, end=\"\")", "def makeLeftRightline(self):\r\n left_lines = []\r\n right_lines = []\r\n for i in self.lines:\r\n for x1,y1,x2,y2 in i:\r\n if x1 == x2:\r\n #Vertical Lines\r\n pass\r\n else:\r\n m = (y2 - y1) / (x2 - x1)\r\n c = y1 - m * x1\r\n if m < 0:\r\n left_lines.append((m,c))\r\n elif m >= 0:\r\n right_lines.append((m,c))\r\n return left_lines,right_lines", "def horizontals(self):\n horizontal_shifts = set(izip_longest(map(\n lambda i: i - self.x, range(self.board.length)), [], fillvalue=0))\n horizontal_shifts.discard((0, 0))\n return horizontal_shifts", "def get_cols_drop():", "def strip_left_cols(df, cols_to_strip):\n columnss = df.columns\n return df[columns[cols_to_strip:]]", "def getColumns(self, stripped):\n noheader = stripped[7:]\n entries = list(chunkify(noheader, 5))\n columns = []\n coverage = [False for x in range(self.rowLength)] #makes a string of bytes covered (0 or 1)\n for entry in entries:\n offset = int(entry[1])\n length = int(entry[2])\n assert offset+length<=self.rowLength, \"Error: entry length does not match row length in:\\n\"+ self.path +\".\"\n if coverage[offset] == True: # in the case where NMM entries are overlapping\n entry[0] = \"##OVERLAP WARNING## \" + entry[0]\n for x in range(offset, offset+length):\n coverage[x] = True #set bytes as covered.\n columns.append(NightmareEntry(entry))\n #at this point you have a list [True, True, False] or whatever\n fillerEntries = []\n count = 0\n for offset,val in enumerate(coverage):\n if val==False:\n count += 1\n if count == 1:\n fillerEntry = [\"##UNKNOWN##\",offset,1,\"HEXA\",\"NULL\"]\n fillerEntries.append(fillerEntry)\n else:\n fillerEntries[-1][2] = count\n else:\n count = 0\n for fillerEntry in fillerEntries:\n columns.append(NightmareEntry(fillerEntry))\n columns.sort(key=lambda col: col.offset) #sort columns by offset\n return columns", "def cells(self):\n return chain.from_iterable(self.cols)", "def columns(self):\r\n _columns = self.base_columns + self.veg_columns\r\n return _columns", "def get_left_side(grid):\n right = int(grid.width / 2)\n left_side = Grid(\n grid=grid, crop=Crop(left=0, right=right, top=0, bottom=0))\n left_side.find_grid_lines()\n left_side.vert_insert_line(0, distance=-80)\n left_side.get_cells()\n left_side.get_row_labels()\n return left_side", "def rotate_left(self):\n\n grid = Grid(self.width, self.height)\n\n for j in range(0, self.height):\n for i in range(0, self.width):\n v = self.get(self.width - 1 - j, i)\n grid.set(i, j, v)\n\n return grid", "def unfuse_gridded_perm(\n self, gp: GriddedPerm, left_points: Optional[int] = None\n ) -> Iterator[GriddedPerm]:\n\n def stretch_above(p):\n return p if p[1] < self._row_idx else (p[0], p[1] + 1)\n\n def stretch_left(p):\n return p if p[0] < self._col_idx else (p[0] + 1, p[1])\n\n if self._fuse_row:\n stretch = stretch_above\n editable_pos_idx = [\n i for i, p in enumerate(gp.pos) if p[1] == self._row_idx\n ]\n editable_pos_idx.sort(key=lambda i: gp.patt[i])\n else:\n stretch = stretch_left\n editable_pos_idx = [\n i for i, p in enumerate(gp.pos) if p[0] == self._col_idx\n ]\n editable_pos_idx.sort()\n\n pos = list(map(stretch, gp.pos))\n if left_points is None or left_points == 0:\n yield gp.__class__(gp.patt, pos)\n if left_points == 0:\n return\n row_shift = int(self._fuse_row)\n col_shift = 1 - int(self._fuse_row)\n for left_points_so_far, i in enumerate(editable_pos_idx):\n pos[i] = (pos[i][0] - col_shift, pos[i][1] - row_shift)\n if left_points is None or left_points_so_far + 1 == left_points:\n yield gp.__class__(gp.patt, pos)\n if left_points_so_far + 1 == left_points:\n break", "def left(self, node):\r\n if self._col(node.count) > 0:\r\n return self.nodes[node.count - 1]\r\n else:\r\n return None", "def atoms_left(self):\r\n return self._board.get_atoms()", "def draw_next_column(self):\n self.xPos += self.XCOLUMNSKIP + self.XCOLUMNSEP\n self.yPos = self.YORIGIN + Blender.Window.GetAreaSize()[1]", "def dependent_cols():\n\n return ...", "def repair_column():\n turn_left()\n while front_is_clear():\n if no_beepers_present():\n put_beeper()\n move()\n if no_beepers_present():\n put_beeper()\n turn_around()\n while front_is_clear():\n move()\n turn_left()", "def landmark_x(self):\n ######## TODO: NATSORT columns before returning #######\n x_cols = [col for col in self.landmark_columns if \"x\" in col]\n return self[x_cols]", "def landmark_x(self):\n ######## TODO: NATSORT columns before returning #######\n x_cols = [col for col in self.landmark_columns if \"x\" in col]\n return self[x_cols]", "def removeCols(self) -> List['StateNode']:\n cols = self.state[1]\n states: List[StateNode] = []\n for i in range(len(cols)):\n for j in range(i + 1, len(cols) + 1):\n # for j in range(i + 1, i + 2):\n new_cols = cols[:i] + cols[j:]\n if len(new_cols) == 0:\n continue\n states.append(StateNode(self.table, \n (self.state[0], new_cols),\n ([], cols[i:j]),\n self.cost + j - i + self.count_pairs(self.state[0], cols[i:j]),\n self))\n return states", "def dots_left(self):\n return (len(self.top_row) +\n len(self.bottom_row) +\n len(self.left_col) +\n len(self.right_col))" ]
[ "0.645149", "0.6057414", "0.586089", "0.56928515", "0.5675787", "0.5641196", "0.5573644", "0.55316836", "0.552581", "0.5522299", "0.54805315", "0.5416425", "0.53937966", "0.539047", "0.52966505", "0.52788615", "0.527391", "0.5270788", "0.52541006", "0.52484876", "0.52372336", "0.5230341", "0.5217386", "0.52089596", "0.5201697", "0.51953346", "0.5177903", "0.5177903", "0.51730514", "0.5164712" ]
0.7647242
0
Return the length of the half of one ellipsis corner. Inspired by [Ramanujan, S., "Modular Equations and Approximations to pi" Quart. J. Pure. Appl. Math., vol. 45 (19131914), pp. 350372], wonderfully explained by Dr Rob.
def corner_half_length(a, b): x = (a - b) / (a + b) return pi / 8 * (a + b) * ( 1 + 3 * x ** 2 / (10 + sqrt(4 - 3 * x ** 2)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sq_footage(length, width):\n return length * width", "def pointlength(x):\n return 0.0", "def len_square(bound):\n\treturn (8 - 2 * bound)", "def dots_left(self):\n return (len(self.top_row) +\n len(self.bottom_row) +\n len(self.left_col) +\n len(self.right_col))", "def length(self):\n return self.length2 ** 0.5", "def get_interval_width(interval, points_per_inch=72):\n return (interval.end - interval.begin) / points_per_inch", "def width(self):\n return abs(self.end[0] - self.start[0])", "def edge_length(self):\n if self.edge_length_l is not None:\n return self.edge_length_l\n else:\n self.edge_length_l = (2 * self.radius * math.sin(math.pi / self.vert_count))\n return self.edge_length_l", "def _arc_min_length(height_in_units):\n return 2 * _arc_radius(height_in_units) * math.sin(_ANGLE)", "def leg_length(self, *args):\n i, j = args\n return Partition(list(self)).leg_length(i-1, j-1)", "def width(self) -> int:", "def width(self) -> int:", "def innerWidth(self):\n raise NotImplementedError", "def length(v):\n return math.sqrt(v[0]**2 + v[1]**2)", "def get_length(self):\n return math.sqrt(self.x**2 + self.y**2)", "def __length_hint__(self) -> 'Literal[28]':\n return 28", "def get_length_sqrd(self):\n return self.x**2 + self.y**2", "def calc_length(Ox,Oy,n) :\n R , l1, l2, l3 = Oy, Ox, 0, (n-Ox)\n theta1 = 2 * math.atan(l1/R)\n theta3 = 2 * math.atan(l3/R)\n theta2 = 2 * math.pi - (theta1 + theta3)\n l2 = R * theta2\n total_length = l1 + l2 +l3\n\n min_length = math.ceil(total_length)\n return min_length", "def focal_length(self):\n\n return self.f * self.diameter", "def get_bend_length(self):\n # The length of a parametric curve x(t) y(t) is Integral[ sqrt( (dx/dt)^2 + (dy/dt)^2 ), {t,0,t0}], which for a Fresnel curve, simplifies to just t0\n return 4 * self.t * self.scale_factor", "def getSideLength():\n side = float(input(\"How long do you want the side length?\"))\n return side", "def upper_hook_length(self, i, j, parameter):\n leg = self.circle_star().leg_length(i, j)\n arm = self.star().arm_length(i, j)\n return leg + parameter*(arm + 1)", "def get_max_length_diff_in_quad(points):\n leftmost, uppermost, rightmost, bottommost = (points[0, 0] for i in range(4))\n for point in points:\n x = point[0, 0]\n y = point[0, 1]\n if x < leftmost[0]:\n # Point is located on the left side of leftmost point\n leftmost = point[0]\n elif x > rightmost[0]:\n rightmost = point[0]\n elif y < uppermost[1]:\n uppermost = point[0]\n elif y > bottommost[1]:\n bottommost = point[0]\n\n length_diff = [cv2.norm(uppermost - leftmost),\n cv2.norm(rightmost - uppermost),\n cv2.norm(bottommost - rightmost),\n cv2.norm(leftmost - bottommost)]\n return np.max(length_diff)", "def getLength(self):\n return self.sideLength", "def _get_slice_len(s, axlen):\n if s.start is None:\n start = 0\n else:\n start = s.start\n if s.stop is None:\n stop = axlen\n else:\n stop = np.min([s.stop, axlen])\n if s.step is None:\n step = 1\n else:\n step = s.step\n\n return ((stop - 1 - start) // step) + 1", "def __length_hint__(self):\n return 2", "def _extra_width(self) -> int:\n width = 0\n if self.box and self.show_edge:\n width += 2\n if self.box:\n width += len(self.columns) - 1\n return width", "def length(self):\n return math.sqrt(self.x * self.x + self.y * self.y)", "def lfn(self):\n if self.precision:\n return self.evaluations.exposedWing.edges[1].point1.z - self.evaluations.chordIntersected.edges[1].length\n else:\n return (self.acW + self.longPosW) / 2 # first guess for a faster evaluation", "def width(poly):\n num = len(poly) - 1\n if abs(poly[num][2] - poly[0][2]) < abs(poly[1][2] - poly[0][2]):\n return dist(poly[num], poly[0])\n elif abs(poly[num][2] - poly[0][2]) > abs(poly[1][2] - poly[0][2]):\n return dist(poly[1], poly[0])\n else: return max(dist(poly[num], poly[0]), dist(poly[1], poly[0]))" ]
[ "0.57467", "0.57435304", "0.56325287", "0.55781907", "0.54365695", "0.54319966", "0.5429056", "0.5394755", "0.53694063", "0.5361059", "0.53427887", "0.53427887", "0.5335277", "0.5334549", "0.5322909", "0.5317182", "0.53119785", "0.5297855", "0.529538", "0.5269954", "0.5258615", "0.52508014", "0.5239358", "0.5218922", "0.52111685", "0.52026826", "0.5200984", "0.5197747", "0.51809645", "0.5177462" ]
0.65680283
0
Draw borders of table cells when they collapse.
def draw_collapsed_borders(stream, table): row_heights = [ row.height for row_group in table.children for row in row_group.children] column_widths = table.column_widths if not (row_heights and column_widths): # One of the list is empty: don’t bother with empty tables return row_positions = [ row.position_y for row_group in table.children for row in row_group.children] column_positions = list(table.column_positions) grid_height = len(row_heights) grid_width = len(column_widths) assert grid_width == len(column_positions) # Add the end of the last column, but make a copy from the table attr. if table.style['direction'] == 'ltr': column_positions.append(column_positions[-1] + column_widths[-1]) else: column_positions.insert(0, column_positions[0] + column_widths[0]) # Add the end of the last row. No copy here, we own this list row_positions.append(row_positions[-1] + row_heights[-1]) vertical_borders, horizontal_borders = table.collapsed_border_grid if table.children[0].is_header: header_rows = len(table.children[0].children) else: header_rows = 0 if table.children[-1].is_footer: footer_rows = len(table.children[-1].children) else: footer_rows = 0 skipped_rows = table.skipped_rows if skipped_rows: body_rows_offset = skipped_rows - header_rows else: body_rows_offset = 0 original_grid_height = len(vertical_borders) footer_rows_offset = original_grid_height - grid_height def row_number(y, horizontal): # Examples in comments for 2 headers rows, 5 body rows, 3 footer rows if header_rows and y < header_rows + int(horizontal): # Row in header: y < 2 for vertical, y < 3 for horizontal return y elif footer_rows and y >= grid_height - footer_rows - int(horizontal): # Row in footer: y >= 7 for vertical, y >= 6 for horizontal return y + footer_rows_offset else: # Row in body: 2 >= y > 7 for vertical, 3 >= y > 6 for horizontal return y + body_rows_offset segments = [] def half_max_width(border_list, yx_pairs, vertical=True): result = 0 for y, x in yx_pairs: if ( (0 <= y < grid_height and 0 <= x <= grid_width) if vertical else (0 <= y <= grid_height and 0 <= x < grid_width) ): yy = row_number(y, horizontal=not vertical) _, (_, width, _) = border_list[yy][x] result = max(result, width) return result / 2 def add_vertical(x, y): yy = row_number(y, horizontal=False) score, (style, width, color) = vertical_borders[yy][x] if width == 0 or color.alpha == 0: return pos_x = column_positions[x] pos_y1 = row_positions[y] if y != 0 or not table.skip_cell_border_top: pos_y1 -= half_max_width(horizontal_borders, [ (y, x - 1), (y, x)], vertical=False) pos_y2 = row_positions[y + 1] if y != grid_height - 1 or not table.skip_cell_border_bottom: pos_y2 += half_max_width(horizontal_borders, [ (y + 1, x - 1), (y + 1, x)], vertical=False) segments.append(( score, style, width, color, 'left', (pos_x, pos_y1, 0, pos_y2 - pos_y1))) def add_horizontal(x, y): if y == 0 and table.skip_cell_border_top: return if y == grid_height and table.skip_cell_border_bottom: return yy = row_number(y, horizontal=True) score, (style, width, color) = horizontal_borders[yy][x] if width == 0 or color.alpha == 0: return pos_y = row_positions[y] shift_before = half_max_width(vertical_borders, [(y - 1, x), (y, x)]) shift_after = half_max_width( vertical_borders, [(y - 1, x + 1), (y, x + 1)]) if table.style['direction'] == 'ltr': pos_x1 = column_positions[x] - shift_before pos_x2 = column_positions[x + 1] + shift_after else: pos_x1 = column_positions[x + 1] - shift_after pos_x2 = column_positions[x] + shift_before segments.append(( score, style, width, color, 'top', (pos_x1, pos_y, pos_x2 - pos_x1, 0))) for x in range(grid_width): add_horizontal(x, 0) for y in range(grid_height): add_vertical(0, y) for x in range(grid_width): add_vertical(x + 1, y) add_horizontal(x, y + 1) # Sort bigger scores last (painted later, on top) # Since the number of different scores is expected to be small compared # to the number of segments, there should be little changes and Timsort # should be closer to O(n) than O(n * log(n)) segments.sort(key=operator.itemgetter(0)) for segment in segments: _, style, width, color, side, border_box = segment with stacked(stream): bx, by, bw, bh = border_box draw_line( stream, bx, by, bx + bw, by + bh, width, style, styled_color(style, color, side))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tile_border(draw, r_s, r_e, c_s, c_e, color, border_size=TILE_BORDER_SIZE):\n for x in range(0, border_size):\n draw.rectangle([(c_s + x, r_s + x), (c_e - 1 - x, r_e - 1 - x)], outline=color)", "def _handle_border(self, i_row, i_col, adj_opp_cells, check_func, loc):\n check_func(i_row, i_col, adj_opp_cells, loc)\n #check_func will be any of the five functions listed below", "def _add_border(self):\n top = TopWallCell(self)\n left = SideWallCell(self, False)\n right = SideWallCell(self, True)\n for col in range(self._columns):\n self.cell_at(col, self._rows - 1, top)\n for row in range(self._rows):\n self.cell_at(0, row, left)\n self.cell_at(self._columns - 1, row, right)", "def update_border(self):\n # side borders\n for row_position in range(NUM_ROWS+BORDER_WIDTH*2):\n self.stdscr.addstr(row_position, 0, '|', curses.color_pair(7))\n self.stdscr.addstr(row_position, NUM_COLUMNS*BLOCK_WIDTH+1, '|', curses.color_pair(7))\n # top and bottom borders\n for column_position in range(NUM_COLUMNS*BLOCK_WIDTH+BORDER_WIDTH*2):\n self.stdscr.addstr(0, column_position, '-', curses.color_pair(7))\n self.stdscr.addstr(NUM_ROWS+1, column_position, '-', curses.color_pair(7))", "def border(self):\n ...", "def drawBorder(self):\n\t\t# horizontal lines\n\t\tself.wts(0, 0, '╭' + '─' * (self.width - 2) + '╮', self._borderColor)\t\t\t\t\t\t# Top\n\t\tself.wts(self.height - 2, 0, '└' + '─' * (self.width - 2) + '╯', self._borderColor)\t\t\t# Bottom\n\t\t# vertical lines\n\t\tfor yPos in range(1, self.height - 2):\n\t\t\tself.wts(yPos, 0, '│', self._borderColor)\n\t\t\tself.wts(yPos, self.width - 1, '│', self._borderColor)", "def _gen_table_style_lines(self):\n yield '.heatmap {border: none; border-collapse: collapse; border-spacing: 0}'\n yield '.heatmap td {padding: 0; margin: 0; font-family: monospace;}'", "def set_table_borders(self, element):\n # TODO actually obey the css style\n borders = {'all': {}}\n if element.attrib.has_key('style'):\n style = element.attrib['style']\n else:\n return borders\n borders['all']['color'] = 'black'\n borders['all']['space'] = '8'\n borders['all']['sz'] = '8'\n borders['all']['val'] = 'single'\n return borders", "def _draw_border(self, grid):\n # Left and Right border\n for i, x in enumerate(grid):\n x[0] = x[len(grid) - 1] = self._wall_color\n grid[i] = x\n\n # Top and Bottom border\n grid[0] = grid[len(grid) - 1] = [self._wall_color for _ in range(len(grid))]\n return grid", "def set_borders(self, val):\n self.rborder = val\n self.lborder = val\n self.tborder = val\n self.bborder = val", "def check_border_cells(self):\n for row, col in self.land_cells:\n if row == 1 or row == self.unique_rows[-1] or col == 1 or col == self.unique_cols[-1]:\n raise ValueError(\"Only water cells may be border cells!\")", "def closingState(self):\n if self.__usingColgroup==False:\n self.__table.remove(self.__colgroup)\n if len(self.currentElement.items)==0:\n # Empty\n self.stateElement = None\n \"\"\" do clean up for IE border-right error for table that have right border. \"\"\"\n tableColumnsAndRows = self.__table.getLastChild()\n \n if tableColumnsAndRows is not None:\n tableRows = tableColumnsAndRows.getChildren()\n rightBorderValue = \"\"\n if tableRows != []:\n for row in tableRows:\n if row.name == \"tr\":\n cell = row.getLastChild() #Just check for last cell\n #for cell in cells:\n cellStyle = self.o.styles.getOooStyle(cell.getAttribute(\"style\").value)\n \n if cellStyle.type == \"\":\n rightBorderValue = \"\"\n break\n else:\n relist=cellStyle.type.split(\";\")\n for r in relist:\n try:\n match=r.split(\":\")\n #those converted border value that is less than 1 px will be replaced\n if match[0].strip() == \"border-right\":\n if match[1].strip() != \"none\":\n rightBorderValue = match[1] \n except: \n pass\n if rightBorderValue != \"\":\n #process here\n attr = self.__table.getAttribute(\"style\")\n if attr is not None:\n tableStyle = attr.value\n else:\n tableStyle = \"\"\n tableStyle += \"border: %s\" % rightBorderValue\n self.__table.setAttribute(\"style\", tableStyle)\n # Do not display tables that only contain empty cells\n # This is so that we can hide a table.\n allEmpty = True\n for row in self.__rows:\n for cell in row.cells:\n for c in cell.cellElement.getChildren():\n if c.type!=\"comment\":\n allEmpty = False\n break\n if allEmpty==False:\n break\n if allEmpty:\n self.rollback()", "def __table_column_style(self):\n cell = TableCellStyle()\n cell.set_bottom_border(1)\n self.default_style.add_cell_style('PLC-TableColumn', cell)", "def borders(self, borders):\n\n self._borders = borders", "def draw_border(stream, box):\n # We need a plan to draw beautiful borders, and that's difficult, no need\n # to lie. Let's try to find the cases that we can handle in a smart way.\n\n def get_columns_with_rule():\n \"\"\"Yield columns that have a rule drawn on the left.\"\"\"\n skip_next = True\n for child in box.children:\n if child.style['column_span'] == 'all':\n skip_next = True\n elif skip_next:\n skip_next = False\n else:\n yield child\n\n def draw_column_border():\n \"\"\"Draw column borders.\"\"\"\n columns = (\n isinstance(box, boxes.BlockContainerBox) and (\n box.style['column_width'] != 'auto' or\n box.style['column_count'] != 'auto'))\n if columns and box.style['column_rule_width']:\n border_widths = (0, 0, 0, box.style['column_rule_width'])\n for child in get_columns_with_rule():\n with stacked(stream):\n position_x = (child.position_x - (\n box.style['column_rule_width'] +\n box.style['column_gap']) / 2)\n border_box = (\n position_x, child.position_y,\n box.style['column_rule_width'], child.height)\n clip_border_segment(\n stream, box.style['column_rule_style'],\n box.style['column_rule_width'], 'left', border_box,\n border_widths)\n draw_rect_border(\n stream, border_box, border_widths,\n box.style['column_rule_style'], styled_color(\n box.style['column_rule_style'],\n get_color(box.style, 'column_rule_color'), 'left'))\n\n # The box is hidden, easy.\n if box.style['visibility'] != 'visible':\n draw_column_border()\n return\n\n widths = [getattr(box, f'border_{side}_width') for side in SIDES]\n\n # No border, return early.\n if all(width == 0 for width in widths):\n draw_column_border()\n return\n\n colors = [get_color(box.style, f'border_{side}_color') for side in SIDES]\n styles = [\n colors[i].alpha and box.style[f'border_{side}_style']\n for (i, side) in enumerate(SIDES)]\n\n # The 4 sides are solid or double, and they have the same color. Oh yeah!\n # We can draw them so easily!\n if set(styles) in (set(('solid',)), set(('double',))) and (\n len(set(colors)) == 1):\n draw_rounded_border(stream, box, styles[0], colors[0])\n draw_column_border()\n return\n\n # We're not smart enough to find a good way to draw the borders :/. We must\n # draw them side by side.\n for side, width, color, style in zip(SIDES, widths, colors, styles):\n if width == 0 or not color:\n continue\n with stacked(stream):\n clip_border_segment(\n stream, style, width, side, box.rounded_border_box()[:4],\n widths, box.rounded_border_box()[4:])\n draw_rounded_border(\n stream, box, style, styled_color(style, color, side))\n\n draw_column_border()", "def border(self):\n sel=self.ch_border.isChecked()\n for i in [ self.sb_border_width, self.cb_style, self.b_color ]:\n i.setEnabled(sel)", "def draw(self):\n if self.master != None :\n outline = Cell.FILLED_COLOR_BORDER if self.fill else Cell.EMPTY_COLOR_BORDER\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = self.fill, outline = outline)", "def _draw_outline(self) -> None:\n stroke = self.border_thickness\n\n # draw outline rectangle\n for _w in range(self.widget_width):\n for line in range(stroke):\n self._bitmap[_w, line] = 1\n self._bitmap[_w, self.widget_height - 1 - line] = 1\n for _h in range(self.widget_height):\n for line in range(stroke):\n self._bitmap[line, _h] = 1\n self._bitmap[self.widget_width - 1 - line, _h] = 1", "def redrawVisible(self, event=None, callback=None):\n\n # print 'redraw'\n model = self.model\n self.rows = self.model.getRowCount()\n self.cols = self.model.getColumnCount()\n if self.cols == 0 or self.rows == 0:\n self.delete('entry')\n self.delete('rowrect')\n self.delete('currentrect')\n return\n self.tablewidth = (self.cellwidth) * self.cols\n self.configure(bg=self.cellbackgr)\n self.set_colPositions()\n\n # are we drawing a filtered subset of the recs?\n if self.filtered == True and self.model.filteredrecs != None:\n self.rows = len(self.model.filteredrecs)\n self.delete('colrect')\n\n self.rowrange = list(range(0, self.rows))\n self.configure(scrollregion=(0, 0, self.tablewidth + self.x_start,\n self.rowheight * self.rows + 10))\n\n x1, y1, x2, y2 = self.getVisibleRegion()\n # print x1, y1, x2, y2\n startvisiblerow, endvisiblerow = self.getVisibleRows(y1, y2)\n self.visiblerows = list(range(startvisiblerow, endvisiblerow))\n startvisiblecol, endvisiblecol = self.getVisibleCols(x1, x2)\n self.visiblecols = list(range(startvisiblecol, endvisiblecol))\n\n self.drawGrid(startvisiblerow, endvisiblerow)\n align = self.align\n self.delete('fillrect')\n for row in self.visiblerows:\n if callback != None:\n callback()\n for col in self.visiblecols:\n colname = model.getColumnName(col)\n bgcolor = model.getColorAt(row, col, 'bg')\n fgcolor = model.getColorAt(row, col, 'fg')\n text = model.getValueAt(row, col)\n self.draw_Text(row, col, text, fgcolor, align)\n if bgcolor != None:\n self.draw_rect(row, col, color=bgcolor)\n\n # self.drawSelectedCol()\n self.tablecolheader.redraw()\n self.tablerowheader.redraw()\n # self.setSelectedRow(self.currentrow)\n self.drawSelectedRow()\n self.draw_selected_rect(self.currentrow, self.currentcol)\n # print self.multiplerowlist\n\n if len(self.multiplerowlist) > 1:\n self.tablerowheader.drawSelectedRows(self.multiplerowlist)\n self.drawMultipleRows(self.multiplerowlist)\n self.drawMultipleCells()\n return", "def drawBorder(self,color,x1,y1,x2,y2,thick):\n self.drawRect(color,x1,y1,x2,y1+thick)\n self.drawRect(color,x1,y1,x1+thick,y2)\n self.drawRect(color,x2-thick,y1,x2,y2)\n self.drawRect(color,x1,y2-thick,x2,y2)", "def cells_off(self):\n self.plotter.cells_off(self.ax)\n self.fig.canvas.draw()", "def draw_borders(img):\n ret = img.copy()\n ret[0, :] = GRAY # top\n ret[-1, :] = GRAY # bottom\n ret[:, 0] = GRAY # left\n ret[:, -1] = GRAY # right\n return ret", "def draw_board(self):\r\n for i in range(self.size):\r\n for k in range(self.size):\r\n left = k * self.CELL_SIZE + (k+1) * self.BORDER_WIDTH\r\n top = i * self.CELL_SIZE + (i+1) * self.BORDER_WIDTH\r\n rect = pygame.Rect(left, top, self.CELL_SIZE, self.CELL_SIZE)\r\n color = self.BG_COLOR\r\n if self.map[i][k] == self.BLOCK_CHAR:\r\n color = self.BLOCK_COLOR\r\n elif self.map[i][k] == self.START_CHAR:\r\n color = self.START_COLOR\r\n elif self.map[i][k] == self.END_CHAR:\r\n color = self.END_COLOR\r\n elif (k, i) in self.path:\r\n color = self.PATH_COLOR\r\n pygame.draw.rect(self.screen, color, rect)", "def get_borders(self):\r\n return (self.tiles[0][0], self.tiles[-1][-1])", "def draw(self):\n if self.master != None :\n fill = self.fill\n #fill = Cell.FILLED_COLOR_BG\n outline = Cell.EMPTY_COLOR_BORDER\n\n #if not self.fill:\n # fill = Cell.EMPTY_COLOR_BG\n # outline = Cell.EMPTY_COLOR_BORDER\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = fill, outline = outline)", "def draw_grid(self):\n buf = self.__hbar\n for rInd in range(self.row):\n line = '\\t|'\n for cInd in range(self.col):\n this = ((rInd * self.col) + cInd)\n cell = self.get_cell(this)\n if not cell:\n line += '%s|' % ' '.center(5)\n else:\n if this == self.new_cell:\n tmp = green(str(cell).center(5))\n else:\n tmp = str(cell).center(5)\n line += '%s|' % tmp\n buf += line + '\\n' + self.__hbar\n print(buf)", "def borders(self):\n border_left = pm.Segment(self.space.static_body, (-5, 0), (-5, self.screen_height), 10)\n border_right = pm.Segment(self.space.static_body, (self.screen_width + 5, 0),\n (self.screen_width + 5, self.screen_height), 10)\n border_top = pm.Segment(self.space.static_body, (0, self.screen_height + 5),\n (self.screen_width, self.screen_height + 5), 10)\n border_bottom = pm.Segment(self.space.static_body, (0, 0), (self.screen_width, 0),\n self.screen_height * 0.1)\n border_bottom.friction = TERRAIN_FRICTION # Set the bottom border friction\n border_bottom.color = DARK_GREY # Set the bottom border color\n\n # Set the collision types so that the collision handlers check for them\n border_top.collision_type = 4\n border_left.collision_type = 4\n border_right.collision_type = 4\n border_bottom.collision_type = 4\n self.space.add(border_left, border_right, border_top, border_bottom) # Add the borders to the Pymunk space", "def _render_borders(self):\n\n # XXX\n # - read the old glBlendFunc value and restore it if neccessary.\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n self.border_program.use()\n for shape_object_id, instances in self._instances.items():\n self._shape_vaos[shape_object_id].bind()\n for instance in instances:\n border_size = instance.border['size']\n if len(instance.border) > 0:\n glEnable(GL_BLEND)\n # XXX\n # - cache the modelview matrix\n modelview = ModelView()\n modelview.set_scaling(instance.size[0]+2*border_size, instance.size[1]+2*border_size)\n modelview.set_position(instance.position[0]-border_size, instance.position[1]-border_size)\n self.border_program.uniform('mat_modelview', modelview.mat4)\n self.border_program.uniform('color', instance.border['color'])\n glDrawArrays(GL_TRIANGLES, 0, 6)\n\n glDisable(GL_BLEND)\n # XXX\n # - cache the modelview matrix\n modelview = ModelView()\n modelview.set_scaling(*instance.size)\n modelview.set_position(*instance.position)\n self.border_program.uniform('color', [0,0,0,0])\n self.border_program.uniform('mat_modelview', modelview.mat4)\n glDrawArrays(GL_TRIANGLES, 0, 6)\n\n self._shape_vaos[shape_object_id].unbind()\n self.border_program.unuse()\n\n glEnable(GL_BLEND)", "def drawBorder(self):\n if self._focused:\n self._window.attron(curses.A_BOLD)\n else:\n self._window.attroff(curses.A_BOLD)\n self._window.border()\n self._window.addstr(0, 1, self.__title)\n self._window.attroff(curses.A_BOLD)", "def _edit_border_tiles(self, vmf: VMF, seg_min: Vec, seg_max: Vec, border: bool, blacken: bool) -> None:\n up = abs(self.up_axis)\n forward = (seg_max - seg_min).norm()\n norm_dir = self.normal().axis()\n\n tiledefs_up: list[tiling.TileDef] = []\n tiledefs_dn: list[tiling.TileDef] = []\n\n overlay_len = int((seg_max - seg_min).mag())\n\n # We need to snap the axis normal_axis to the grid, since it could\n # be forward or back.\n min_pos = seg_min.copy()\n min_pos[norm_dir] = min_pos[norm_dir] // 128 * 128 + 64\n\n u_ax, v_ax = Vec.INV_AXIS[up.axis()]\n side_dir = Vec.dot(abs(Vec.cross(up, forward)), seg_min - min_pos)\n side_ind = round((side_dir + 48) / 32, 2) # 0/1/2/3 for the center of tiles.\n # 4.5 -> [4, 5] and 4 -> [4].\n pos_iter = sorted({round(side_ind - 0.25), round(side_ind + 0.25)})\n if u_ax == forward.axis():\n uv_pos = [\n (u, v)\n for u in range(4)\n for v in pos_iter\n ]\n elif v_ax == forward.axis():\n uv_pos = [\n (u, v)\n for u in pos_iter\n for v in range(4)\n ]\n else: # Should be impossible?\n uv_pos = []\n\n for offset in range(64, overlay_len, 128):\n # Each position on top or bottom, inset 64 from each end.\n # First check if the tiles themselves are present, then check if any of the\n # subtiles are present - blackening on the way if required.\n pos = min_pos + offset * forward\n tile_cat = []\n try:\n top_tile = tiling.TILES[\n (pos + 128 * up).as_tuple(),\n (-up).as_tuple()\n ]\n except KeyError:\n pass\n else:\n tile_cat.append((tiledefs_up, top_tile))\n try:\n btm_tile = tiling.TILES[\n (pos - 128 * up).as_tuple(),\n up.as_tuple()\n ]\n except KeyError:\n pass\n else:\n tile_cat.append((tiledefs_dn, btm_tile))\n for tiledefs, tile in tile_cat:\n found = False\n for u, v in uv_pos:\n subtile = tile[u, v]\n if subtile.is_tile:\n found = True\n if blacken:\n tile[u, v] = subtile.as_black\n if found:\n tiledefs.append(tile)\n\n if not border or (not tiledefs_up and not tiledefs_dn):\n return\n\n overlay_thickness = options.get(int, 'fizz_border_thickness')\n overlay_repeat = options.get(int, 'fizz_border_repeat')\n flip_uv = options.get(bool, 'fizz_border_vertical')\n\n if flip_uv:\n u_rep = 1.0\n v_rep = overlay_len / overlay_repeat\n else:\n u_rep = overlay_len / overlay_repeat\n v_rep = 1.0\n\n cent_pos = (seg_min + seg_max) / 2\n\n if tiledefs_up:\n over = srctools.vmf.make_overlay(\n vmf,\n normal=-up,\n origin=cent_pos + 64 * up,\n uax=forward * overlay_len,\n vax=Vec.cross(up, forward) * overlay_thickness,\n material=texturing.SPECIAL.get(cent_pos + 64 * up, 'fizz_border'),\n surfaces=[],\n u_repeat=u_rep,\n v_repeat=v_rep,\n swap=flip_uv,\n )\n for tile in tiledefs_up:\n tile.bind_overlay(over)\n\n if tiledefs_dn:\n over = srctools.vmf.make_overlay(\n vmf,\n normal=up,\n origin=cent_pos - 64 * up,\n uax=forward * overlay_len,\n vax=Vec.cross(-up, forward) * overlay_thickness,\n material=texturing.SPECIAL.get(cent_pos - 64 * up, 'fizz_border'),\n surfaces=[],\n u_repeat=u_rep,\n v_repeat=v_rep,\n swap=flip_uv,\n )\n for tile in tiledefs_dn:\n tile.bind_overlay(over)" ]
[ "0.61329514", "0.61080414", "0.6092054", "0.6007161", "0.5973623", "0.59045523", "0.5830328", "0.58236414", "0.577159", "0.57643205", "0.5728255", "0.5599663", "0.55689305", "0.5513478", "0.54590344", "0.5346918", "0.53133273", "0.5308185", "0.5281155", "0.5266912", "0.5189118", "0.5177626", "0.5172572", "0.51620716", "0.5156286", "0.5124461", "0.51236975", "0.51047635", "0.5097982", "0.50858575" ]
0.6858842
0
Draw textdecoration of ``textbox`` to a ``document.Stream``.
def draw_text_decoration(stream, textbox, offset_x, offset_y, thickness, color): draw_line( stream, textbox.position_x, textbox.position_y + offset_y, textbox.position_x + textbox.width, textbox.position_y + offset_y, thickness, textbox.style['text_decoration_style'], color, offset_x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_text(stream, textbox, offset_x, text_overflow, block_ellipsis):\n # Pango crashes with font-size: 0\n assert textbox.style['font_size']\n\n if textbox.style['visibility'] != 'visible':\n return\n\n text_decoration_values = textbox.style['text_decoration_line']\n text_decoration_color = textbox.style['text_decoration_color']\n if text_decoration_color == 'currentColor':\n text_decoration_color = textbox.style['color']\n if 'overline' in text_decoration_values:\n thickness = textbox.pango_layout.underline_thickness\n offset_y = (\n textbox.baseline - textbox.pango_layout.ascent + thickness / 2)\n draw_text_decoration(\n stream, textbox, offset_x, offset_y, thickness,\n text_decoration_color)\n if 'underline' in text_decoration_values:\n thickness = textbox.pango_layout.underline_thickness\n offset_y = (\n textbox.baseline - textbox.pango_layout.underline_position +\n thickness / 2)\n draw_text_decoration(\n stream, textbox, offset_x, offset_y, thickness,\n text_decoration_color)\n\n x, y = textbox.position_x, textbox.position_y + textbox.baseline\n stream.set_color_rgb(*textbox.style['color'][:3])\n stream.set_alpha(textbox.style['color'][3])\n\n textbox.pango_layout.reactivate(textbox.style)\n stream.begin_text()\n emojis = draw_first_line(\n stream, textbox, text_overflow, block_ellipsis, x, y)\n stream.end_text()\n\n draw_emojis(stream, textbox.style['font_size'], x, y, emojis)\n\n if 'line-through' in text_decoration_values:\n thickness = textbox.pango_layout.strikethrough_thickness\n offset_y = (\n textbox.baseline - textbox.pango_layout.strikethrough_position)\n draw_text_decoration(\n stream, textbox, offset_x, offset_y, thickness,\n text_decoration_color)\n\n textbox.pango_layout.deactivate()", "def text_draw(self, x, y, text, style={}):", "def add_textbox(ax, textstr):\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n ax.text(0.25, 0.90, textstr, transform=ax.transAxes, fontsize=10, verticalalignment='top', bbox=props)", "def draw_text(self, text, origin, font=cv2.FONT_HERSHEY_SIMPLEX, text_scale=0.7, text_color=(255,0,0), thickness=2):\r\n cv2.putText(self.image, text, origin, font, text_scale, text_color, thickness)", "def draw_text(self, text, font, color, surface, x, y): #use for narrative in end sequence\n text_obj = font.render(text, True, color)\n text_rect = text_obj.get_rect()\n text_rect.center = (x, y)\n surface.blit(text_obj, text_rect)", "def text(self, text, x, y, height, width):\n cook = cookie()\n t = Text(cook, self)\n t.text = text\n self.call('text', cook, text, x, y, height, width)\n return t", "async def outline_text(draw_surface, coords, draw_text, font):\n draw = partial(draw_surface.text, text=draw_text, font=font,\n fill=\"black\")\n for offset_pair in product(range(-1, 2), repeat=2):\n draw((coords[0]+offset_pair[0], coords[1]+offset_pair[1]))\n draw(coords, fill=\"white\")", "def _draw_text_outline(img, font, text, fill_color, line_color, origin):\n draw = ImageDraw.Draw(img)\n # set to render text unaliased (https://mail.python.org/pipermail/image-sig/2005-August/003497.html)\n draw.fontmode = \"1\"\n # correct for font offset\n x_offset, y_offset = font.getoffset(text)\n # These values were tweaked by hand to get a better centered text\n x, y = (origin[0]-2*x_offset, origin[1]-y_offset//2)\n origin = (x, y)\n # draw border\n draw.text((x - 1, y), text, font=font, fill=line_color)\n draw.text((x + 1, y), text, font=font, fill=line_color)\n draw.text((x, y - 1), text, font=font, fill=line_color)\n draw.text((x, y + 1), text, font=font, fill=line_color)\n # draw text over it\n draw.text(origin, text, font=font, fill=fill_color)", "def _draw_text_outline(img: Image.Image, font: ImageFont.ImageFont, text: str, fill_color: Color, line_color: Color, origin: Tuple[int, int]) -> None:\n draw = ImageDraw.Draw(img)\n # set to render text unaliased (https://mail.python.org/pipermail/image-sig/2005-August/003497.html)\n draw.fontmode = \"1\"\n # correct for font offset\n x_offset, y_offset, _, _ = font.getbbox(text)\n # These values were tweaked by hand to get a better centered text\n x, y = (origin[0]-2*x_offset, origin[1]-y_offset//2)\n origin = (x, y)\n # draw border\n draw.text((x - 1, y), text, font=font, fill=line_color)\n draw.text((x + 1, y), text, font=font, fill=line_color)\n draw.text((x, y - 1), text, font=font, fill=line_color)\n draw.text((x, y + 1), text, font=font, fill=line_color)\n # draw text over it\n draw.text(origin, text, font=font, fill=fill_color)", "def add_text(self, x, y, text, style=None):\n style = self.__prepare_style(style, ' ')\n for i, c in enumerate(text):\n if self.check_coord_in_range(x + i, y):\n text_style = Style(c, style.fg_color, style.bg_color, style.font_style)\n self.canvas[y][x + i] = text_style", "def DocumentAppendStyledText(self, wave_id, wavelet_id, blip_id, text, style):\n raise NotImplementedError()", "def DrawText(*args, **kwargs):\n return _gdi_.PseudoDC_DrawText(*args, **kwargs)", "def DrawText(*args, **kwargs):\n return _gdi_.DC_DrawText(*args, **kwargs)", "def __init__(self, x, y, align_bottom_left, text, font, color):\n super(TextBox, self).__init__(x, y)\n\n self.align_bottom_left = align_bottom_left\n self.__text = text\n self.font = font\n self.color = color\n self.original_x = x\n self.original_y = y\n self.image = None\n self.image_rect = None\n\n self.set_text(text)", "def text(self, tft, oled, text, wait=0, start_clear=False, end_clear=False):\n if start_clear:\n self.clear(tft, oled)\n oled.text((5, 5), text, tft.WHITE, sysfont, 1)\n sleep(wait)\n if end_clear:\n self.clear(tft, oled)", "def add_text(self, text, size=10, x=0.5, y=0.5, ha='center', va='center', bbox='default', **kwargs):\n if bbox == 'default':\n bbox = {'boxstyle': 'square', 'fc': 'none'}\n\n return self.ax.text(x=x, y=y, s=text, size=size, ha=ha, va=va, bbox=bbox, **kwargs)", "def paintText(self, text):\n return '@paint '+text * 2", "def draw_text(self, text, position, font_size, font_color):\n font_color = check_color(font_color)\n STtext.text(self.canvas, text, position, font_size, font_color)", "def echo(self, text, who=None, prefix=None):\n tb = self._textbox\n tb.configure(state=Tix.NORMAL)\n colors = self.colors or self.default_colors\n for line in text.splitlines():\n tb.insert(Tix.END, time.strftime(\"[%H:%M:%S] \"))\n if prefix is not None:\n tb.insert(Tix.END, prefix)\n if who is not None:\n tb.insert(Tix.END, \"<%s>\" % who,\n \"color_%d\" % (hash(who) % len(colors)))\n else:\n tb.insert(Tix.END, \"***\")\n tb.insert(Tix.END, \" \")\n for (part, tag) in self._format_message(line):\n if tag is None:\n tb.insert(Tix.END, part)\n else:\n tb.insert(Tix.END, part, tag)\n tb.insert(Tix.END, \"\\n\")\n tb.configure(state=Tix.DISABLED)\n t, b = tb.yview()\n h = b - t\n tb.yview_moveto(1 - h)", "def draw_text(\n self,\n text: str,\n transform: Matrix44,\n properties: Properties,\n cap_height: float,\n ) -> None:\n raise NotImplementedError", "def drawText(text, font, surface, x, y, textcolour):\r\n textobj = font.render(text, 1, textcolour)\r\n textrect = textobj.get_rect()\r\n textrect.topleft = (x, y)\r\n surface.blit(textobj, textrect)", "def underline(self, underline):\n\n self._underline = underline", "def draw(self):\n if self.dirty:\n self._render()\n for text in self.text_lines:\n text.draw()", "def DrawText(*args, **kwargs):\n return _gdi_.GraphicsContext_DrawText(*args, **kwargs)", "def underline(self, underline=True, doubleDot=False):\n if type(underline) is not bool:\n raise ValueError('underline must be True or False')\n elif type(doubleDot) is not bool:\n raise ValueError('doubleDot must be True or False')\n elif self._usePrintMode:\n self._textUnderline = underline\n self._updatePrintMode()\n elif underline and doubleDot:\n self._write(self.__class__.__ESC + '-\\x02')\n elif underline:\n self._write(self.__class__.__ESC + '-\\x01')\n else:\n self._write(self.__class__.__ESC + '-\\x00')", "def draw_text(display, font_name, text, size, color, x, y):\n font = pg.font.Font(font_name, size)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.midtop = (x, y)\n display.blit(text_surface, text_rect)", "def draw_text(self, text, i, j, **params):", "def SetConsoleTextAttribute(stream_id, attrs):\n handle = handles[stream_id]\n return windll.kernel32.SetConsoleTextAttribute(handle, attrs)", "def create_text_box(self, box_pos, text_font):\n self.textBox = tk.Text(self.top, height=1, width=17,\n font=('Helvetica', text_font))\n self.textBox.grid(row=box_pos[0], column=box_pos[1],\n columnspan=box_pos[2], rowspan=box_pos[3])", "def write(self, text: str):\n if self.color:\n text_color = self.edit.textColor()\n self.edit.setTextColor(text_color)\n if self.out:\n self.out.write(text)\n self.edit.moveCursor(QtGui.QTextCursor.End)\n self.edit.insertPlainText(text)" ]
[ "0.71900946", "0.61658967", "0.56317216", "0.56239617", "0.5589885", "0.5569843", "0.54945374", "0.53644043", "0.53160083", "0.52535385", "0.524488", "0.5234661", "0.52239835", "0.5214129", "0.5204475", "0.5171991", "0.51597446", "0.5150328", "0.51423484", "0.5134784", "0.51300144", "0.5115629", "0.5099984", "0.5069906", "0.50297064", "0.5028549", "0.5026028", "0.501949", "0.50111413", "0.49883193" ]
0.8075535
0
Instantiate the scope by keys. e.g. PullRequest.init_by_keys(organization='octocat', repository='HelloWorld', number=1) > PullRequest(organization='octocat', repository='HelloWorld', number=1)
def init_by_keys(cls, **query): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(self, keys: List[str]):", "def build(keys: List[str]):\n api = API()\n api.build(*keys)", "def __init__(self, keys_to_track):\r\n self.keys_to_track = keys_to_track\r\n self.tracker = {}\r\n for key_to_track in self.keys_to_track:\r\n self.tracker[key_to_track] = {}", "def __init__(self, termname, keys, ordinal=False):\n \n self.keys = list(set(keys))\n self.keys.sort()\n self._name = termname\n self.termname = termname\n self.ordinal = ordinal\n\n if self.ordinal:\n name = self.name\n else:\n name = ['(%s==%s)' % (self.termname, str(key)) for key in self.keys]\n\n term.__init__(self, name, termname=self.termname, func=self.get_columns)", "def __init__(self, **kwargs):\n\n for name, attr in kwargs.items():\n setattr(self, name, attr)\n\n if 'scope' in kwargs.keys():\n self.is_main = True\n\n # collect all fields from all configs and regular kwargs\n fields = (_get_fields(attr) for name, attr in\n sorted(kwargs.items(), key=itemgetter(0))\n if not name == \"scope\")\n\n self.identifier_fields = sum(fields, [])", "def __init__(self, keys, values):\n self.keys = keys\n self.values = values", "def __init__(self, scope=None, expires_at=None, scope_details=None):\n\n self._scope = None\n self._expires_at = None\n self._scope_details = None\n\n if scope is not None:\n self.scope = scope\n if expires_at is not None:\n self.expires_at = expires_at\n if scope_details is not None:\n self.scope_details = scope_details", "def __init__(self, path, number_keys=1):\n\n self.path = path\n self.keyring = []\n if os.path.exists(path):\n self.keyring = read_keys(path)\n else:\n for n in range(number_keys):\n key = generate_key(generate_random())\n self.keyring.append(key)\n write_keys(path, self.keyring)", "def __init__(self, key=None):\n self.key = key", "def __init__(self, terms, *interfaces):\n self.by_value = {}\n self.by_token = {}\n self._terms = []\n for term in terms:\n if term.value in self.by_value:\n raise ValueError(\n 'term values must be unique: %s' % repr(term.value))\n if term.token in self.by_token:\n raise ValueError(\n 'term tokens must be unique: %s' % repr(term.token))\n self.by_value[term.value] = term\n self.by_token[term.token] = term\n self._terms.append(term)\n if interfaces:\n directlyProvides(self, *interfaces)", "def __init__(self, key):\n self.key = key", "def __init__(self):\n self._keys = []\n self._sortKeys = []", "def __init__(self, mods, key):\n self.mods = mods\n self.key = key", "def __init__(self, termname, keys, ordinal=False):\n\n if not ordinal:\n self.keys = list(set(keys))\n self.keys.sort()\n else:\n self.keys = keys\n if len(set(keys)) != len(list(keys)):\n raise ValueError('keys for ordinal Factor should be unique, in increasing order')\n self._name = termname\n self.termname = termname\n self.ordinal = ordinal\n\n if self.ordinal:\n name = self.termname\n else:\n name = ['(%s==%s)' % (self.termname, str(key)) for key in self.keys]\n\n Term.__init__(self, name, termname=self.termname, func=self.get_columns)", "def __init__(self, **fields):\r\n \r\n self._by_number = []\r\n self._names = []\r\n self._by_name = {}\r\n self._numbers = {}\r\n \r\n for name in sorted(fields.keys()):\r\n self.add(name, fields[name])", "def __init__(self, k_spec_dict):\n self.k_spec_dict = k_spec_dict\n # list of Atomic_2_Global_Descriptor objections\n self.engines = {}\n\n self.bind()", "def gen_keys():", "def __init__(__self__, *,\n key: pulumi.Input[str],\n name: pulumi.Input[str],\n namespace: pulumi.Input[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"namespace\", namespace)", "def remote_pullNamespace(*keys):", "def fromkeys(cls, keys, default=None):\n return cls([(k, default) for k in keys])", "def __init__(__self__, *,\n key: str,\n values: Sequence[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"values\", values)", "def __init__(__self__, *,\n key: str,\n values: Sequence[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"values\", values)", "def __init__(__self__, *,\n key: str,\n values: Sequence[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"values\", values)", "def __init__(__self__, *,\n key: str,\n values: Sequence[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"values\", values)", "def __init__(__self__, *,\n key: str,\n values: Sequence[str]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"values\", values)", "def init(x_in):\n global public_keys, secret_keys, x\n x = func.get_bits(x_in)\n\n public_keys, secret_keys = [], []\n\n elgamal.init_g_p_q()\n for i in range(3):\n create_keys(i)", "def __init__(self, *args: Union[List[AtomKey], HKT], **kwargs: str) -> None:\n ...", "def __init__(__self__, *,\n api_groups: Optional[Sequence[str]] = None,\n api_versions: Optional[Sequence[str]] = None,\n operations: Optional[Sequence[str]] = None,\n resources: Optional[Sequence[str]] = None,\n scope: Optional[str] = None):\n if api_groups is not None:\n pulumi.set(__self__, \"api_groups\", api_groups)\n if api_versions is not None:\n pulumi.set(__self__, \"api_versions\", api_versions)\n if operations is not None:\n pulumi.set(__self__, \"operations\", operations)\n if resources is not None:\n pulumi.set(__self__, \"resources\", resources)\n if scope is not None:\n pulumi.set(__self__, \"scope\", scope)", "def __init__(__self__, *,\n api_groups: Optional[Sequence[str]] = None,\n api_versions: Optional[Sequence[str]] = None,\n operations: Optional[Sequence[str]] = None,\n resources: Optional[Sequence[str]] = None,\n scope: Optional[str] = None):\n if api_groups is not None:\n pulumi.set(__self__, \"api_groups\", api_groups)\n if api_versions is not None:\n pulumi.set(__self__, \"api_versions\", api_versions)\n if operations is not None:\n pulumi.set(__self__, \"operations\", operations)\n if resources is not None:\n pulumi.set(__self__, \"resources\", resources)\n if scope is not None:\n pulumi.set(__self__, \"scope\", scope)", "def fromkeys(\n cls, \n keys: Sequence[Hashable], \n value: Any, \n **kwargs: Any) -> Lexicon:\n return cls(contents = dict.fromkeys(keys, value), **kwargs)" ]
[ "0.6308261", "0.60055834", "0.55877304", "0.55266637", "0.54057276", "0.5345015", "0.52308273", "0.5219602", "0.5178745", "0.5178331", "0.51643896", "0.51558304", "0.51527977", "0.5145405", "0.5113519", "0.50983435", "0.5097046", "0.50778776", "0.50524044", "0.50505924", "0.50321215", "0.50321215", "0.50321215", "0.50321215", "0.50321215", "0.5024885", "0.5018596", "0.50026983", "0.50026983", "0.4996495" ]
0.6373544
0
Return whether this endpoint scope is a singleton or not.
def is_singleton_scope(cls): return not cls.primary_keys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_Singleton(self):\n return self.size == 1", "def valid_endpoint(cls):\n\t\treturn cls.__subclasses__() == []", "def has_request_scope(self):\n return self.request_scope", "def private_instance(self) -> bool:\n return pulumi.get(self, \"private_instance\")", "def is_shared(self):\n return self._tag == 'shared'", "def is_configured(self):\n return self._session is not None", "def is_singleton(self) -> bool:\n return len(self.elements) == 1", "def _has_endpoint(self, endpoint):\n return self.endpoints.filter(pk=endpoint.pk).exists()", "def is_static(self):\n return self._is_static", "def pollable(self):\n return bool(self.ScopeCollector)", "def names_singleton(self):\r\n if self.stream:\r\n return True\r\n else:\r\n return os.path.isfile(self.object_name)", "def has(self) -> bool:\n\n return self.scopefunc() in self.registry", "def active(cls):\r\n return cls._GLOBAL", "def is_global(self) -> bool:\n return self.parent is None", "def __bool__(self) -> bool:\n return self._rpc is not None", "def is_open(self):\n return self._session is not None", "def public_endpoint(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"public_endpoint\")", "def is_open(self):\n\t\treturn self._session is not None", "def active(self):\n return self in manager.handler", "def in_global_code(self):\n return self.sscope is None and self.lscope is None", "def is_authenticated(self):\n return self.ping() is not None", "def is_request_in_microsite():\r\n return get_configuration()", "def active(self):\n return self.server.is_active() or self.executing", "def is_global(self) -> bool:\n return self._parent_node.is_global()", "def is_served(self):\n return self._is_served", "def has_request_context():\n from .application import Nereid\n\n return base_has_request_context() and \\\n isinstance(current_app._get_current_object(), Nereid)", "def alive(self):\n return self._thread is not None", "def valid_in_request(self):\n return self._repeatable[0] is not None", "def is_open(self):\n return self._socket is not None", "def is_global(self, key):\n return key in dir(django_global_settings)" ]
[ "0.7000542", "0.64485866", "0.6180424", "0.61669946", "0.6114246", "0.60283005", "0.60228246", "0.5933381", "0.5918585", "0.5906537", "0.5830943", "0.58295834", "0.58278096", "0.5807388", "0.57512015", "0.5715974", "0.5711425", "0.56972426", "0.5690803", "0.5667977", "0.5655342", "0.56474197", "0.5637246", "0.5598425", "0.55947024", "0.5574134", "0.5563394", "0.5554443", "0.55245554", "0.5518384" ]
0.77170163
0
Return the hash for the event using ID only
def hash_by_id(cls, event_id): return '{}::{}'.format(cls.Endpoint.key, event_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def existing_hash(self, id):\r\n return self._read_sha_by_id(id)", "def _calculate_hash(self, entry):\n entry.pop('id', None)\n return hashlib.sha224(json.dumps(\n entry, cls=DjangoJSONEncoder).encode('utf-8')).hexdigest()", "def id_to_hash(self, id):\n mm = hashlib.sha256(struct.pack('>q', id))\n vv = struct.unpack(\">q\", mm.digest()[0:8])\n return vv[0] & 0x7fffffffffffffff # Don't be negative", "def hash(self):\n return self.hash_by_id(self.id)", "def generate_hash(group_id, event_id, base_seed=0):\n hash = (base_seed + (group_id * GROUP_ID_HASH_CODE) % HASH_MOD_CODE +\n (event_id * EVENT_ID_HASH_CODE) % HASH_MOD_CODE) % HASH_MOD_CODE\n\n return hash", "def id(self) -> str:\n return self._event.get('id')", "def __hash__(self):\n return self['id'].__hash__()", "def calculate_hash_id(self):\n return get_md5_hash(f'{self.type}{self.get_primary_id()}')", "def generate_hash_hazard(hazard_group_id, event_id, base_seed=0):\n hash = (base_seed + (hazard_group_id * HAZARD_GROUP_ID_HASH_CODE) % HAZARD_HASH_MOD_CODE +\n (event_id * HAZARD_EVENT_ID_HASH_CODE) % HAZARD_HASH_MOD_CODE) % HAZARD_HASH_MOD_CODE\n\n return hash", "def getHash():\n return str(uuid.uuid4())[-17:].replace(\"-\", \"\")", "def id(self):\n return sha256(self.serialize()).digest()", "def get_hash(self) -> str:\n return self.__hash.hexdigest()", "def get_hash(self):\r\n return", "def get_key(self, obj):\n if hasattr(obj, \"id\"):\n hashed_id = hashlib.md5(str(obj.id).encode(\"utf-8\")).hexdigest()\n return hashed_id\n else:\n return None", "def __hash__(self):\n return hash(self.id)", "def __hash__(self):\n return hash(self.id)", "def __hash__(self):\n return hash(self.id)", "def __hash__(self):\n return hash(self.id)", "def hash(self) -> str:\n return pulumi.get(self, \"hash\")", "def get_hash(self):\n return self.__hash", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"", "async def get_hash(identifier):\n return hashlib.md5(identifier.encode('utf8')).hexdigest()", "def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"!f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.signature.encode()\r\n block_data += self.choice.encode()\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()", "def __hash__(self):\n return self._id", "def current_hash(self):", "def hash_block(self):\n sha = hasher.sha256()\n sha.update((str(self.index) + str(self.timestamp) + str(self.data) + str(self.previous_hash)).endswith('utf-8'))\n return sha.hexdigest()", "def hash(self) -> bytes:", "def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.public_key.public_bytes(serialization.Encoding.X962,\r\n serialization.PublicFormat.CompressedPoint)\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()", "def event_id(self):\n return self._event_id", "def hash_key(self):" ]
[ "0.7319287", "0.707525", "0.70566404", "0.6818503", "0.67281663", "0.6721193", "0.6599954", "0.65424097", "0.64546216", "0.64048505", "0.6393958", "0.6373491", "0.63731545", "0.63528925", "0.6342936", "0.6342936", "0.6342936", "0.6342936", "0.6318093", "0.63149315", "0.6305195", "0.62620616", "0.62551427", "0.6231054", "0.6213415", "0.62031066", "0.61806977", "0.6176034", "0.6169167", "0.61356866" ]
0.82204044
0
Build new_events This factory method is actually the events factory.
def build_events(self) -> list: raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_new_event(self):\n pass", "def handle_new_events(self, events):\n for event in events:\n self.events.append(\n self.create_event_object(\n event[0],\n event[1],\n int(event[2])))", "def create_event(self, **kwargs):\n events = self.variables['events']\n events.append(kwargs)\n self.variables['events'] = events", "def event_factory_fixture():\n def _factory(device_id, event_type=\"DEVICE_EVENT\", capability='',\n attribute='Updated', value='Value', data=None):\n event = Mock()\n event.event_type = event_type\n event.device_id = device_id\n event.component_id = 'main'\n event.capability = capability\n event.attribute = attribute\n event.value = value\n event.data = data\n event.location_id = str(uuid4())\n return event\n return _factory", "def buildEvent(data):", "def __init__(self, events):\n self.events = events", "def CreateNewEvent(arguments: List[Tuple[str, type]] = [], event_name: str = '') -> Event:\n pass", "def _event_builder(self, events, event_codes):\n for ev in events:\n event_out = lems.EventOut(ev) # output (e.g. spike)\n oc = lems.OnCondition(renderer.render_expr(events[ev]))\n oc.add_action(event_out)\n # if event is not in model ports we should add it\n if ev not in self._component_type.event_ports:\n self._component_type.add(lems.EventPort(name=ev, direction='out'))\n if ev in event_codes:\n for ec in re.split(';|\\n', event_codes[ev]):\n event_eq = _equation_separator(ec)\n oc.add_action(lems.StateAssignment(event_eq[0], event_eq[1]))\n spike_flag = False\n if ev == SPIKE:\n spike_flag = True\n yield (spike_flag, oc)", "def createEvents(self):\n # If modifying these scopes, delete the file token.pickle.\n SCOPES = ['https://www.googleapis.com/auth/calendar']\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n # create the service variable\n service = build('calendar', 'v3', credentials=creds)\n\n # Call the Calendar API\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n\n # adds self.mainEvent to calendar\n event = service.events().insert(calendarId='primary', body=self.mainEvent).execute()\n print('Event created: %s' % (event.get('htmlLink')))\n\n # adds all values in self.importantDates to calendar\n for i in self.importantDates:\n event = service.events().insert(calendarId='primary', body=i).execute()\n print('Event created: %s' % (event.get('htmlLink')))", "def create_event(klass, form, creator):\n\n if form.is_recurring.data:\n # Series\n return klass.create_series(form, creator)\n # Single event\n return klass.create_single_event(form, creator)", "async def createEvent(self, event: Event) -> None:", "def events(self):\r\n return Events(self)", "def events(self):\r\n return Events(self)", "def events(self):\r\n return Events(self)", "def _create_event(\n project,\n creator_id,\n datetime_start,\n datetime_end,\n description=\"Test Event\",\n location=\"test_location\",\n is_public=False,\n event_type=\"MN\",\n coordinator=None\n):\n event = Event(\n project=project,\n description=description,\n location=location,\n is_public=is_public,\n datetime_start=datetime_start,\n datetime_end=datetime_end,\n coordinator=coordinator,\n creator_id=creator_id\n )\n event.save()\n return event", "def create(self, validated_data):\n return Event.objects.create(**validated_data)", "def load_new_events_list(self):\n self._event_index_list = self.gdc.new_events_indices\n self.populate_event_list_from_index_list()", "def _get_events(self):\n self.cache = []\n\n # Test if we have event table\n with datascope.closing(datascope.dbopen(self.db, 'r')) as db:\n dbtable = db.lookup(table='event')\n if dbtable.query(datascope.dbTABLE_PRESENT):\n steps = ['dbopen event']\n steps.extend(['dbjoin origin'])\n steps.extend(['dbsubset origin.orid != NULL'])\n steps.extend(['dbsubset origin.orid == prefor'])\n fields = ['evid']\n else:\n steps = ['dbopen origin']\n steps.extend(['dbsubset orid != NULL'])\n fields = []\n\n fields.extend(['orid','time','lat','lon','depth','auth','nass',\n 'ndef','review'])\n\n for v in extract_from_db(self.db, steps, fields, self.db_subset):\n if not 'evid' in v:\n v['evid'] = v['orid']\n\n self.logging.debug( \"Events(): new event #%s\" % v['evid'] )\n\n v['allmags'] = []\n v['magnitude'] = '-'\n v['maglddate'] = 0\n v['srname'] = '-'\n v['grname'] = '-'\n v['time'] = parse_sta_time(v['time'])\n v['strtime'] = readable_time(v['time'], self.timeformat, self.timezone)\n\n try:\n v['srname'] = stock.srname(v['lat'],v['lon'])\n except Exception,e:\n warninig('Problems with srname for orid %s: %s' % (v['orid'],\n v['lat'],v['lon'],e) )\n\n try:\n v['grname'] = stock.grname(v['lat'],v['lon'])\n except Exception,e:\n warninig('Problems with grname for orid %s: %s' % (v['orid'],\n v['lat'], v['lon'],e) )\n\n orid = v['orid']\n if orid in self.mags:\n for o in self.mags[orid]:\n v['allmags'].append(self.mags[orid][o])\n if self.mags[orid][o]['lddate'] > v['maglddate']:\n v['magnitude'] = self.mags[orid][o]['strmag']\n v['maglddate'] = self.mags[orid][o]['lddate']\n\n\n self.cache.append( v )", "def create_event() -> abc.Event:\n return get_asynclib().Event()", "def fusion_api_create_events(self, body, api=None, headers=None):\n return self.event.create(body, api, headers)", "def __init__(self, events):\n for event in events:\n #do stuff\n pass", "def event_request_factory_fixture(event_factory):\n def _factory(device_ids=None, events=None):\n request = Mock()\n request.installed_app_id = uuid4()\n if events is None:\n events = []\n if device_ids:\n events.extend([event_factory(id) for id in device_ids])\n events.append(event_factory(uuid4()))\n events.append(event_factory(device_ids[0], event_type=\"OTHER\"))\n request.events = events\n return request\n return _factory", "def collect_new_events(self) -> list:\n self.logger.debug('Collecting new events...')\n events = self.build_events()\n if not events:\n self.logger.debug('No new events.')\n for event in events:\n self.logger.info('A new event has been detected: {}'.format(event))\n self._buffer_buisy_mutex.acquire()\n self._events_buffer.append(event)\n self._buffer_buisy_mutex.release()", "def create_default_events(self):\n self.events.register_class(\"commands\", LineEvent)\n self.events.register_class(\"commands_out\", LineEvent)\n self.events.register_class(\"hooks\", HookEvent)", "def test_create_event(self):\n event_type = 'SERVICE NOTIFICATION'\n fields = EVENT_FIELDS.get(event_type, None)\n parts = [\n 'nagiosadmin',\n 'nagios4',\n 'Root Partition',\n 'CRITICAL',\n 'notify-service-by-email',\n 'DISK CRITICAL - free space: / 1499 MB (2.46% inode=77%):'\n ]\n event = create_event(\n timestamp=1603813628, event_type=event_type, hostname='docker-desktop', fields=fields._make(parts)\n )\n\n assert event['timestamp'] == 1603813628\n assert event['event_type'] == 'SERVICE NOTIFICATION'\n assert event[\"msg_title\"] == 'Root Partition'\n assert event[\"source_type_name\"] == 'SERVICE NOTIFICATION'\n assert event[\"msg_text\"] == 'CRITICAL'\n assert event['tags'] == [\n 'contact:nagiosadmin',\n 'host:nagios4',\n 'check_name:Root Partition',\n 'event_state:CRITICAL',\n 'notification_type:notify-service-by-email',\n 'payload:DISK CRITICAL - free space: / 1499 MB (2.46% inode=77%):'\n ]", "def new_event(self, subject=None):\n return self.event_constructor(parent=self, subject=subject)", "def create_event(self):\n self.driver.get(f'{self.base_url}/event')\n\n enter_event_name = WebDriverWait(self.driver, 20).until(expected_conditions.presence_of_element_located((By.NAME, 'eventName')))\n enter_event_name.send_keys(self.random_string)\n\n # self.driver.find_element_by_xpath('//*[@id=\"root\"]/div/div[3]/div/div[2]/div/div/div[1]/div/div[1]/div[1]/label[2]/span[1]').click()", "def new_event(self, key, **kwargs):\n return Event(project=self._project_name,\n plugin=self.name, key=key, **kwargs)", "def create_events():\n events = {}\n events[\"Workers_can_proceed\"] = mp.Event()\n for i in range(NUM_WORKERS):\n events[i] = mp.Event()\n return events", "def host_create_event():\n data = request.get_json(force=True)\n if not data:\n return jsonify(**{'succeed': False, 'data': []})\n new_event = Event()\n new_event.data = data\n new_event.data['registrants'] = {email: False for email in new_event.data['registrants']}\n all_event[data['event_name']] = new_event\n # create folder for this event\n folder_name = data['event_name'].replace(' ', '')\n absolute_folder_name = os.path.dirname(os.path.abspath(__file__)) + '/static/files/' + folder_name\n print absolute_folder_name\n if not os.path.exists(absolute_folder_name):\n os.makedirs(absolute_folder_name)\n for key,val in data['files'].iteritems():\n file_address = absolute_folder_name + '/' + key\n with open(file_address, \"w\") as text_file:\n text_file.write(val)\n data['files'][key] = '/static/files/' + folder_name + '/' + key\n return jsonify(**{'succeed': True, 'data': new_event.data})" ]
[ "0.7068018", "0.6923349", "0.6733583", "0.6481142", "0.6407484", "0.63098305", "0.6274381", "0.62523794", "0.61910576", "0.61229914", "0.601929", "0.6007083", "0.6007083", "0.6007083", "0.60045576", "0.597696", "0.595828", "0.593561", "0.5929939", "0.5923887", "0.5875728", "0.5875431", "0.58721465", "0.5867852", "0.5847854", "0.5839234", "0.58380187", "0.58001864", "0.57983536", "0.5763822" ]
0.6995154
1
Collecting new events and push them into the events buffer
def collect_new_events(self) -> list: self.logger.debug('Collecting new events...') events = self.build_events() if not events: self.logger.debug('No new events.') for event in events: self.logger.info('A new event has been detected: {}'.format(event)) self._buffer_buisy_mutex.acquire() self._events_buffer.append(event) self._buffer_buisy_mutex.release()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_events(self):\n while 1:\n try:\n self.events_local.append(self._q.get(False))\n except queue.Empty:\n break", "def slurp_events(self):\n while self.has_event():\n self.get_event()", "def _store_events(self, c, e):\n self._events.append(e)", "def handle_new_events(self, events):\n for event in events:\n self.events.append(\n self.create_event_object(\n event[0],\n event[1],\n int(event[2])))", "def ProcessEvents(self):\n self.work_queue.put(self.__ProcessEventsAsync)", "def get_events_batch(self) -> PayloadDictList:\n batch = self.event_buffer\n self.event_buffer = []\n return batch", "def update_events(self, new_events):\n\n for new_event in new_events:\n self.__events.setdefault(new_event.type, []).append(new_event)", "def collect_events(self, inputs, ew):\n input_module.collect_events(self, inputs, ew)", "async def events(self) -> Iterable[Event]:", "def push(event):\n _pushedEvents.append(event)", "def test_streamBufferedEvents(self):\n events = (\n dict(eventID=u\"1\", eventText=u\"A\"),\n dict(eventID=u\"2\", eventText=u\"B\"),\n dict(eventID=u\"3\", eventText=u\"C\"),\n dict(eventID=u\"4\", eventText=u\"D\"),\n )\n\n resource = self.eventSourceResource()\n resource.addEvents(events)\n\n response = self.render(resource)\n\n # Each result from read() is another event\n for i in range(len(events)):\n result = yield response.stream.read()\n self.assertEquals(\n result,\n textAsEvent(\n text=events[i][\"eventText\"],\n eventID=events[i][\"eventID\"]\n )\n )", "def _gather_events(self, newframe_event):\n if not self.closed:\n for pg_event in pg.event.get():\n event = self._pygame_event_to_event(pg_event)\n if event is not None:\n self.event_hub.raise_event(event)\n self._add_animation_events()", "def processEvents(self):\n self.framelist = sorted(self.framelist, key=lambda event: event.timestamp, reverse=True)\n self.framequeue = sorted(self.framequeue, key=lambda event: event.timestamp, reverse=True)\n self.packetqueue = sorted(self.packetqueue, key=lambda event: event.timestamp, reverse=True)\n \n print len(self.framequeue)\n print len(self.packetqueue)\n \n while len(self.framequeue) > 0 or len(self.packetqueue) > 0:\n self.getNextEvent().processEvent(self, self.decisionAlg)", "def push_event(self, evt):\n self.event_list.append(evt)", "def get_events(self):\n self._events = []\n self.ircobj.process_once(timeout=0.1)\n return self._events", "def write_event(self, event):\n self.events_written.append(event)", "def logevents(self, events, request = None):\n for event in events:\n self.logevent(event, request)", "def _iter_events(self) -> Generator:\n response = self.client.call()\n events: list = response.json()\n\n if not events:\n return []\n\n while True:\n yield events\n last = events.pop()\n self.client.set_next_run_filter(last['@timestamp'])\n response = self.client.call()\n events = response.json()\n try:\n events.pop(0)\n assert events\n except (IndexError, AssertionError):\n LOG('empty list, breaking')\n break", "def addtoevents(self, event):\n self._events.append(event)", "def events(self, events):\n\n self._events = events", "def update(self, events):\n events = events", "def process_events(self):\n gameevents = copy.copy(self.gameevents)\n del self.gameevents[:]\n while len(gameevents) > 0:\n currentevent = gameevents.pop(0)\n ticks = currentevent.ticks\n time = currentevent.time\n eid = currentevent.eid\n game = currentevent.game\n command = currentevent.command\n obj = currentevent.obj\n target = currentevent.target\n type = currentevent.type\n if self.config['Logging']['logging'] and currentevent.log:\n self.log.write(\"%s\\t%f\\t%d\\t%d\\t%d\\t%s\\t%s\\t%s\\n\" % (type, time, ticks, game, eid, command, obj, target))\n if command == \"press\":\n if obj == \"pause\":\n self.gametimer.pause()\n self.state = self.STATE_PAUSED\n elif obj == \"unpause\":\n self.state = self.STATE_PLAY\n self.gametimer.unpause()\n elif obj == \"quit\":\n self.lc.stop()\n elif obj == \"left\":\n self.ship.turn_left_flag = True\n elif obj == \"right\":\n self.ship.turn_right_flag = True\n elif obj == \"thrust\":\n self.ship.thrust_flag = True\n elif obj == \"fire\":\n self.ship.fire()\n elif obj == \"iff\":\n #print len(self.mine_list)\n #don't do anything if there's no mine on the screen\n if len(self.mine_list) == 0:\n pass\n elif self.mine_list[0].tagged == \"fail\":\n self.gameevents.add(\"tag\", \"already_failed\")\n elif self.mine_list[0].tagged == \"disable\":\n self.gameevents.add(\"tag\", \"already_disabled\")\n elif self.mine_list[0].tagged == \"tagged\":\n self.gameevents.add(\"tag\", \"already_tagged\")\n #if the mine is untagged and this is the first tap\n elif self.mine_list[0].tagged == \"untagged\" and self.mine_list.iff_flag == False:\n if self.score.iff in self.mine_list.foe_letters:\n self.gameevents.add(\"first_tag\", \"foe\")\n else:\n self.gameevents.add(\"first_tag\", \"friend_fail\")\n #if the mine is a foe, untagged, and this is the second tap, check timer, set intrvl\n elif self.mine_list[0].tagged == \"untagged\" and self.mine_list.iff_flag:\n self.score.intrvl = self.mine_list.iff_timer.elapsed()\n if (self.mine_list.iff_timer.elapsed() > self.config['Mine']['intrvl_min']) and (self.mine_list.iff_timer.elapsed() < self.config['Mine']['intrvl_max']):\n self.gameevents.add(\"second_tag\", \"foe\")\n else:\n self.gameevents.add(\"second_tag\", \"out_of_bounds\")\n elif obj == \"shots\":\n if not self.bonus_captured:\n self.bonus_captured = True\n if self.config['General']['bonus_system'] == \"standard\":\n #if current symbol is bonus but previous wasn't, set flag to deny bonus if next symbol happens to be the bonus symbol\n if (self.bonus.current_symbol == self.bonus.bonus_symbol) and (self.bonus.prior_symbol != self.bonus.bonus_symbol):\n self.bonus.flag = True\n self.gameevents.add(\"flagged_for_first_bonus\")\n if (self.bonus.current_symbol == self.bonus.bonus_symbol) and (self.bonus.prior_symbol == self.bonus.bonus_symbol):\n #bonus available, check flag to award or deny bonus\n if self.bonus.flag:\n self.gameevents.add(\"attempt_to_capture_flagged_bonus\")\n else:\n self.capturedBonuses += 1\n self.gameevents.add(\"shots_bonus_capture\")\n self.gameevents.add(\"score+\", \"shots\", self.config['Score']['bonus_missiles'])\n self.gameevents.add(\"score+\", \"bonus\", self.config['Score']['bonus_points'] / 2)\n self.bonus.flag = True\n else: #AX-CPT\n if self.bonus.axcpt_flag == True and (self.bonus.state == \"iti\" or self.bonus.state == \"target\") and self.bonus.current_pair == \"ax\":\n self.snd_bonus_success.play()\n self.capturedBonuses += 1\n self.gameevents.add(\"shots_bonus_capture\")\n self.gameevents.add(\"score+\", \"shots\", self.config['Score']['bonus_missiles'])\n if self.config['General']['next_gen']:\n self.gameevents.add(\"score+\", \"pnts\", self.config['Score']['bonus_points'] / 2)\n else:\n self.gameevents.add(\"score+\", \"bonus\", self.config['Score']['bonus_points'] / 2)\n elif self.bonus.axcpt_flag:\n self.bonus.axcpt_flag = False\n self.snd_bonus_fail.play()\n self.gameevents.add(\"shots_bonus_failure\")\n if self.config['General']['next_gen']:\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['bonus_points'] / 2)\n else:\n self.gameevents.add(\"score-\", \"bonus\", self.config['Score']['bonus_points'] / 2)\n elif obj == \"pnts\":\n if not self.bonus_captured:\n self.bonus_captured = True\n if self.config['General']['bonus_system'] == \"standard\":\n #if current symbol is bonus but previous wasn't, set flag to deny bonus if next symbol happens to be the bonus symbol\n if (self.bonus.current_symbol == self.bonus.bonus_symbol) and (self.bonus.prior_symbol != self.bonus.bonus_symbol):\n self.bonus.flag = True\n self.gameevents.add(\"flagged_for_first_bonus\")\n if (self.bonus.current_symbol == self.bonus.bonus_symbol) and (self.bonus.prior_symbol == self.bonus.bonus_symbol):\n #bonus available, check flag to award or deny bonus\n if self.bonus.flag:\n self.gameevents.add(\"attempt_to_capture_flagged_bonus\")\n else:\n self.capturedBonuses += 1\n self.gameevents.add(\"pnts_pnts_capture\")\n self.gameevents.add(\"score+\", \"bonus\", self.config['Score']['bonus_points'])\n self.gameevents.add(\"score+\", \"pnts\", self.config['Score']['bonus_points'])\n self.bonus.flag = True\n else: #AX-CPT\n if self.bonus.axcpt_flag == True and (self.bonus.state == \"iti\" or self.bonus.state == \"target\") and self.bonus.current_pair == \"ax\":\n self.snd_bonus_success.play()\n self.capturedBonuses += 1\n self.gameevents.add(\"pnts_bonus_capture\")\n if self.config['General']['next_gen']:\n self.gameevents.add(\"score+\", \"pnts\", self.config['Score']['bonus_points'])\n else:\n self.gameevents.add(\"score+\", \"pnts\", self.config['Score']['bonus_points'])\n self.gameevents.add(\"score+\", \"bonus\", self.config['Score']['bonus_points'])\n elif self.bonus.axcpt_flag:\n self.bonus.axcpt_flag = False\n self.snd_bonus_fail.play()\n self.gameevents.add(\"pnts_bonus_failure\")\n if self.config['General']['next_gen']:\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['bonus_points'] / 2)\n else:\n self.gameevents.add(\"score-\", \"bonus\", self.config['Score']['bonus_points'] / 2)\n elif command == \"destroyed\":\n if obj == \"ship\":\n self.deaths += 1\n self.reset_position()\n self.reset_mines()\n elif command == \"bonus_available\":\n self.totalBonuses += 1\n elif command == \"first_tag\":\n if obj == \"foe\":\n self.mine_list.iff_flag = True\n self.mine_list.iff_timer.reset()\n elif len(self.mine_list) > 0:\n self.mine_list[0].tagged = \"fail\"\n elif command == \"second_tag\":\n self.mine_list.iff_flag = False\n if obj == \"foe\" and len(self.mine_list) > 0:\n self.mine_list[0].tagged = \"tagged\"\n elif command == \"release\":\n if obj == \"left\":\n self.ship.turn_left_flag = False\n elif obj == \"right\":\n self.ship.turn_right_flag = False\n elif obj == \"thrust\":\n self.ship.thrust_flag = False\n elif command == \"warp\":\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['warp_penalty'])\n self.gameevents.add(\"score-\", \"flight\", self.config['Score']['warp_penalty'])\n elif command == \"activate\":\n if obj == \"bonus\":\n self.bonus.visible = True\n self.bonus.timer.reset()\n self.bonus.get_new_symbol()\n self.gameevents.add(\"new_bonus\", self.bonus.current_symbol, self.bonus.prior_symbol)\n if self.bonus.current_symbol == self.bonus.prior_symbol == self.bonus.bonus_symbol:\n self.gameevents.add(\"bonus_available\")\n #\"reset\" the bonus flag (which prevents premature capture) if symbol is not bonus\n if self.bonus.current_symbol != self.bonus.bonus_symbol:\n self.bonus.flag = False\n elif command == \"deactivate\":\n if obj == \"bonus\":\n self.bonus.visible = False\n self.bonus.timer.reset()\n elif command == \"spawn\":\n self.totalMines += 1\n self.mine_list.flag = True\n self.mine_list.timer.reset()\n self.mine_list.add()\n if self.mine_list[0].iff in self.mine_list.foe_letters:\n self.gameevents.add(\"new_mine\", \"foe\")\n else:\n self.gameevents.add(\"new_mine\", \"friend\")\n elif command == \"timeout\":\n self.mine_list.flag = False\n self.mine_list.iff_flag = False\n self.mine_list.timer.reset()\n if len(self.mine_list) > 0:\n del self.mine_list[0]\n self.score.iff = ''\n self.score.intrvl = 0\n self.gameevents.add(\"score-\", \"mines\", self.config['Score']['mine_timeout_penalty'])\n elif command == \"score++\":\n if obj == \"bonus_points\":\n self.gameevents.add(\"score+\", \"pnts\", int(target))\n elif command == \"score+\":\n self.score.__setattr__(obj, self.score.__getattribute__(obj) + float(target))\n if self.score.shots > self.config['Missile']['missile_max']:\n self.score.shots = self.config['Missile']['missile_max']\n elif command == \"score-\":\n self.score.__setattr__(obj, self.score.__getattribute__(obj) - float(target))\n elif command == \"collide\":\n self.process_collision(obj, target)\n elif command == \"joyaxismotion\":\n if obj == 0:\n self.ship.joy_turn = target\n elif obj == 1:\n self.ship.joy_thrust = target", "def updateEvents(self):\n # Update calendar data\n d_start = datetime.datetime.today()\n d_end = d_start + datetime.timedelta(self.delta_days)\n results = self.cal_cal.date_search(d_start, d_end)\n\n # Flush the events dict\n self.events = []\n # Add each events\n for event in results:\n # Format the title of the event\n str_title = event.instance.vevent.summary.value\n if len(str_title) > 20:\n str_title = str_title[:17] + \"...\"\n # Format the date of the event\n vdate = event.instance.vevent.dtstart.value\n d = datetime.datetime.strptime(\n vdate.strftime(\"%d %m %Y\"), \"%d %m %Y\")\n str_date = \"%s %d %s\" % (\n self.days_french[d.weekday()],\n d.day,\n self.months_french[d.month -1])\n # Format the date gap\n gap = 1 + (d - d_start).days\n # Save the event\n self.events.append((str_title, str_date, gap))", "def build_events(self) -> list:\n raise NotImplementedError()", "def test_streamNewEvents(self):\n events = (\n dict(eventID=u\"1\", eventText=u\"A\"),\n dict(eventID=u\"2\", eventText=u\"B\"),\n dict(eventID=u\"3\", eventText=u\"C\"),\n dict(eventID=u\"4\", eventText=u\"D\"),\n )\n\n resource = self.eventSourceResource()\n\n response = self.render(resource)\n\n # The first read should block on new events.\n d = response.stream.read()\n self.assertFalse(d.called)\n\n # Add some events\n resource.addEvents(events)\n\n # We should now be unblocked\n self.assertTrue(d.called)\n\n # Each result from read() is another event\n for i in range(len(events)):\n if d is None:\n result = yield response.stream.read()\n else:\n result = yield d\n d = None\n\n self.assertEquals(\n result,\n textAsEvent(\n text=events[i][\"eventText\"],\n eventID=(events[i][\"eventID\"])\n )\n )\n\n # The next read should block on new events.\n d = response.stream.read()\n self.assertFalse(d.called)\n\n d.addErrback(lambda f: None)\n d.cancel()", "def collect_data(self):\n self.logger.info(\"Waiting for incoming data ...\")\n while True:\n item = self.in_queue.get()\n self.logger.info(\"Received data!\")\n self.collector_process_data(item)", "def _run_events(self):\n new_futures = set()\n while len(self.events) > 0:\n LOG.debug('processing events (%s remaining)', len(self.events))\n # Get next event\n event = self.events.popleft()\n LOG.debug('processing event: %s', event)\n # Handle the event\n for handler in self.get_handlers(event):\n # Attempt to run the handler, but don't break everything if the handler fails\n LOG.debug('running handler: %r', handler)\n future = self._run_handler(handler, event)\n if future:\n new_futures.add(future)\n self.new_events.clear()\n if len(new_futures) > 0:\n LOG.debug('got %s new futures', len(new_futures))\n return new_futures", "def events(self, events: object):\n\n self._events = events", "def __init__(self, events):\n for event in events:\n #do stuff\n pass", "def _get_events(self):\n self.cache = []\n\n # Test if we have event table\n with datascope.closing(datascope.dbopen(self.db, 'r')) as db:\n dbtable = db.lookup(table='event')\n if dbtable.query(datascope.dbTABLE_PRESENT):\n steps = ['dbopen event']\n steps.extend(['dbjoin origin'])\n steps.extend(['dbsubset origin.orid != NULL'])\n steps.extend(['dbsubset origin.orid == prefor'])\n fields = ['evid']\n else:\n steps = ['dbopen origin']\n steps.extend(['dbsubset orid != NULL'])\n fields = []\n\n fields.extend(['orid','time','lat','lon','depth','auth','nass',\n 'ndef','review'])\n\n for v in extract_from_db(self.db, steps, fields, self.db_subset):\n if not 'evid' in v:\n v['evid'] = v['orid']\n\n self.logging.debug( \"Events(): new event #%s\" % v['evid'] )\n\n v['allmags'] = []\n v['magnitude'] = '-'\n v['maglddate'] = 0\n v['srname'] = '-'\n v['grname'] = '-'\n v['time'] = parse_sta_time(v['time'])\n v['strtime'] = readable_time(v['time'], self.timeformat, self.timezone)\n\n try:\n v['srname'] = stock.srname(v['lat'],v['lon'])\n except Exception,e:\n warninig('Problems with srname for orid %s: %s' % (v['orid'],\n v['lat'],v['lon'],e) )\n\n try:\n v['grname'] = stock.grname(v['lat'],v['lon'])\n except Exception,e:\n warninig('Problems with grname for orid %s: %s' % (v['orid'],\n v['lat'], v['lon'],e) )\n\n orid = v['orid']\n if orid in self.mags:\n for o in self.mags[orid]:\n v['allmags'].append(self.mags[orid][o])\n if self.mags[orid][o]['lddate'] > v['maglddate']:\n v['magnitude'] = self.mags[orid][o]['strmag']\n v['maglddate'] = self.mags[orid][o]['lddate']\n\n\n self.cache.append( v )" ]
[ "0.7208618", "0.69300455", "0.67458445", "0.6702277", "0.64130634", "0.6407206", "0.6382773", "0.63692826", "0.6364064", "0.6345537", "0.6214517", "0.6210405", "0.61788917", "0.61758673", "0.6163426", "0.6151569", "0.6115069", "0.6052047", "0.6035892", "0.6028083", "0.6023511", "0.60190445", "0.6014081", "0.6009814", "0.5984677", "0.5942899", "0.59298426", "0.5914273", "0.5894038", "0.5879677" ]
0.81267637
0
Return the conditional tasks of this bot.
def get_conditional_tasks(self, scope: EndpointScope=None): from nudgebot.tasks import ConditionalTask conditional_tasks = [task for task in self._tasks if issubclass(task, ConditionalTask)] if scope: static_hierarchy = [ps.__class__ for ps in scope.hierarchy] conditional_tasks = [task for task in conditional_tasks if task.EndpointScope in static_hierarchy] return conditional_tasks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_created_tasks(self):\n tasks = []\n for task_config in get_task_configs(self.context):\n task = task_config.get_created_task(self.context)\n if not task:\n continue\n matched, not_matched = task.start_conditions_status()\n if not not_matched:\n continue\n tasks.append((task, not_matched))\n\n return tasks", "def get_ready_tasks(self) -> Sequence[str]:\n raise NotImplementedError", "def get_tasks(self):\n return self.tasks", "def tasks_for_user(kls, user):\n tasks = SpecialQuestTask.objects.all()\n tasks_done = [t for t in tasks if t in user.done_tasks.all()]\n tasks_not_done = [t for t in tasks if t not in user.done_tasks.all()]\n tasks_not_done = [t for t in tasks_not_done if t.is_active()]\n return tasks_done, tasks_not_done", "def normalTasks(self):\n return self._tasks", "def custom_tasks(self) -> Dict[str, Task]:\n return {k: v for k, v in self.tasks.items() if not k.startswith(\"celery.\")}", "def get_tasks(self):\n return self.stn.get_tasks()", "def compare_tasks(self):\n return self._compare_tasks", "def get_archieve(self):\n all_tasks = self.task_controller.get_list()\n return [task for task in all_tasks if task.is_completed == Status.DONE]", "def get_tickable_tasks(self):\n return (self.get_running_tasks() +\n self.get_nearest_tickable_pending_tasks())", "def get_tasks(self):\n return self.tasks.all()", "def available_tasks(cls):\n return cls._available_tasks", "def get_active_tasks(self):\n qry = Task.query.filter_by(user=self.id)\n qry = qry.filter_by(completed_on=None)\n return qry.all()", "def get_ready_tasks(self) -> Sequence[str]:\n done_tasks = {\n node\n for node in self.task_graph.nodes\n if self.get_info(node).status.value > TaskStatus.SKIPPED.value\n }\n not_started_tasks = {\n node\n for node in self.task_graph.nodes\n if self.get_info(node).status == TaskStatus.NOT_STARTED\n }\n out = []\n for task_key in not_started_tasks:\n # We will mark tasks as ready if all their predecessors are done;\n # it's left to the executor to skip other tasks as needed\n if set(self.task_graph.pred[task_key]) - done_tasks:\n continue\n out.append(task_key)\n return out", "def get_ready_tasks(self):\n return self._gdb_interface.get_ready_tasks()", "def tasks(self):\n args = Namespace(rev=self.rev)\n data = run_query('push_results', args)['data']\n\n tasks = []\n for kwargs in data:\n # Do a bit of data sanitization.\n if any(a not in kwargs for a in ('label', 'duration', 'result', 'classification')):\n continue\n\n if kwargs['duration'] <= 0:\n continue\n\n tasks.append(Task(**kwargs))\n\n return tasks", "def tasks(self) -> List[TaskStatusDefinition]:\n return self._tasks", "def get_all_tasks(self):\n return [\n self.create_virtual_environment,\n self.doc,\n self.install,\n self.lint,\n self.make_distribution,\n self.reset,\n self.setup,\n self.test,\n ]", "def get(self):\n response = {\"tasks\": []}\n user_roles = [role[\"name\"] for role in g._user[\"roles\"]]\n if \"administrator\" in user_roles or \"tasks_all\" in user_roles:\n for k, v in available_tasks_by_path.items():\n response[\"tasks\"].append(v)\n else:\n for k, v in available_tasks_by_path.items():\n if f\"task_{k}\" in user_roles:\n response[\"tasks\"].append(v)\n return response", "def get_running_condor_jobs(self):\n return Utils.condor_q(selection_pairs=[[\"taskname\",self.unique_name]], extra_columns=[\"jobnum\"])", "def get_all_tasks(self):\r\n\t\twith self.conn:\r\n\t\t\tself.c.execute(\"\"\"SELECT task FROM goals\"\"\")\r\n\t\t\ttup_list = self.c.fetchall()\r\n\t\treturn [tup[0] for tup in tup_list]", "def get_tasks(self):\n return [getattr(self, k).value() for k in self._node_dict.values()]", "def task_get(self):\n for task in self.task_manager.task():\n if task.status in (TASK.UNSCHEDULED, TASK.DONE):\n yield task", "def get_ready_tasks(self):\n task_records = self._read_transaction(tx.get_ready_tasks)\n tuples = self._get_task_data_tuples(task_records)\n return [_reconstruct_task(tup[0], tup[1], tup[2], tup[3], tup[4]) for tup in tuples]", "def get_tasks(self):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks\")\n return res.fetchall()", "def get_tasks(taskid_list, module):\n tasks = module.client.api.get_tasks_by_status('Pending')\n task_list = list()\n for task in tasks:\n if task['workOrderId'] in taskid_list:\n task_list.append(task)\n return task_list", "def get_pending(tasks):\n results = [task for task in tasks if task != main_poller_task and task.done() == False]\n return results", "def get_workflow(cls):\n return (\n cls.initial_processing +\n cls.match_processing +\n cls.before_halt_check + [\n IF_ELSE(cls.halt_check, cls.on_halt, cls.on_no_halt)\n ] + cls.before_upload_check + [\n IF_ELSE(cls.upload_check, cls.on_upload, cls.on_no_upload)\n ] + cls.final_processing\n )", "def get_tasks(self):\n return self.task_collection", "def get_started_tasks(self):\n tasks = []\n for task_config in get_task_configs(self.context):\n task = task_config.get_started_task(self.context)\n if not task:\n continue\n matched, not_matched = task.end_conditions_status()\n if not not_matched:\n continue\n\n subtasks_open = False\n for sub_task in task.get_subtasks():\n if sub_task.get_status() == STARTED:\n subtasks_open = True\n break\n if subtasks_open:\n continue\n\n tasks.append((task, not_matched))\n\n return tasks" ]
[ "0.651819", "0.6293233", "0.62041074", "0.61520755", "0.6030803", "0.60058683", "0.59421945", "0.59181774", "0.58944106", "0.5864872", "0.5841039", "0.57834464", "0.57781357", "0.57735986", "0.5766781", "0.5705261", "0.5684001", "0.5660684", "0.5619601", "0.5606973", "0.56057334", "0.55880123", "0.5543353", "0.5527569", "0.5510547", "0.54779524", "0.5472705", "0.5466368", "0.54419506", "0.5421373" ]
0.7749001
0
If the bot is pollable, performing a poll, collecting all the scopes from the scopes collectors, updating statistics and handling tasks.
def poll(self): if not self.pollable: self.logger.warning('Poll has been triggered but the bot is not pollable! Return;') return self._busy_mutext.acquire() try: self.logger.info('Stating poll') for scope in self.ScopeCollector.collect_all(): stats_collection = [] for stat_class in self._statistics: for parent in scope.hierarchy: if stat_class.EndpointScope == parent.__class__: statistics = stat_class(**parent.query) # TODO: Init from scope statistics.set_endpoint_scope(parent) self.logger.debug(f'Collecting statistics: {statistics}') statistics.collect() stats_collection.append(statistics) for task_cls in self.get_conditional_tasks(scope): task = task_cls(scope, stats_collection) task.handle() self.logger.info('Finished poll') finally: self._busy_mutext.release()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def poll(self, ctx, choice=None):\n\n if choice is None or choice.lower() in (\"online\", \"voice\"):\n suggestions = get_suggestions(get_users(ctx, choice))\n\n if suggestions:\n poll_id = create_strawpoll(\"What to play?\", suggestions)\n\n if poll_id:\n await self.bot.say(\"Here's your strawpoll link: https://www.strawpoll.me/{}\".format(poll_id))\n else:\n await self.bot.say(\"Phew! You have way too many games to create a poll. You should try `{}game suggest` instead.\".format(ctx.prefix))\n else:\n await self.bot.say(\"You have exactly **zero** games in common, go buy a 4-pack!\")\n else:\n await self.bot.say(\"Please enter a valid filter -> either use `online` (default) for all online users or `voice` for all users in a voice channel\")", "def poll(self):\n self.get_peers()\n self.get_trackers()\n self.get_files()", "def poll(self):\n self.poll_function(self.connection)", "def polling_call(self) -> global___Snippet.ClientCall:", "def _start_polling(self):\n self._handle = asyncio.get_event_loop().create_task(self._poll())", "def poll(self, poll_input):", "def poll(self):\n raise NotImplementedError()", "def poll(self) -> None:\n self._resolve_rdates()\n self._resolve_queries()\n self._process_special_cells()\n self._fetch_queries()", "async def poll_refresh(self) -> None:\n await self._send_message_get_response(OutgoingMessage(OutgoingMessageType.poll_refresh))", "def privileged_polling(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"privileged_polling\"), kwargs)", "def pollable(self):\n return bool(self.ScopeCollector)", "def poll(update: Update, context: CallbackContext) -> None:\n questions = [\"Good\", \"Really good\", \"Fantastic\", \"Great\"]\n message = context.bot.send_poll(\n update.effective_chat.id,\n \"How are you?\",\n questions,\n is_anonymous=False,\n allows_multiple_answers=True,\n )\n # Save some info about the poll the bot_data for later use in receive_poll_answer\n payload = {\n message.poll.id: {\n \"questions\": questions,\n \"message_id\": message.message_id,\n \"chat_id\": update.effective_chat.id,\n \"answers\": 0,\n }\n }\n context.bot_data.update(payload)", "async def run_service(loop):\n curr = time.time()\n results = []\n\n while True:\n # First get the list of pending tasks, if there exists any\n results = get_pending(results)\n\n # Now poll the endpoints\n for url in API_URLS:\n future = loop.create_task(poll_endpoint(url))\n results.append(future)\n \n await asyncio.gather(*results)\n \n delta = time.time() - curr\n diff = max(0, POLL_INTERVAL - delta)\n await asyncio.sleep(diff)\n curr = time.time()", "async def async_poll(self):\r\n socket = WebSocketClient(7001)\r\n await socket.connect()\r\n\r\n while not self.polling_thread.stopped():\r\n await asyncio.sleep(1)\r\n if not self.should_poll:\r\n continue\r\n\r\n song_request = self.get_currently_playing()\r\n if song_request.status_code != requests.codes.ok:\r\n continue\r\n\r\n data = song_request.json()\r\n if not self.data_is_valid(data):\r\n logger.info(f\"SPOTIFY ERROR: {data}\")\r\n continue\r\n\r\n await socket.send({\r\n \"album_image_url\": data[\"item\"][\"album\"][\"images\"][1][\"url\"],\r\n \"artist\": \", \".join([artist[\"name\"] for artist in data[\"item\"][\"artists\"]]),\r\n \"album\": data[\"item\"][\"album\"][\"name\"],\r\n \"song\": data[\"item\"][\"name\"],\r\n \"progress_ms\": data[\"progress_ms\"],\r\n \"duration_ms\": data[\"item\"][\"duration_ms\"]\r\n })", "def on_connect(self):\n if current_user.is_authenticated:\n polls = Poll.query \\\n .filter(User.rooms.any(User.id == current_user.id)) \\\n .filter(or_(Poll.visible.is_(True), Room.owner_id == current_user.id)).all()\n else:\n if session.get(\"rooms\") is not None:\n polls = Poll.query \\\n .filter(Room.id.in_(session.get(\"rooms\"))) \\\n .filter(Poll.visible.is_(True)).all()\n else:\n polls = []\n\n for poll in polls:\n join_room(poll.id)", "async def poll(self) -> List[Message]:\n if not self._session:\n await self._create_session()\n \n res = await self._session.get(self._network.SERVER_ADDR + '/api/poll')\n obj = await res.json()\n self._network.connected_robots = obj['robots']\n ret = []\n for m in obj['messages']:\n ret.append(Message.from_dict(m))\n return ret", "def _compute_global_stats():\n global_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Check how many HITs have been completed. We now consider a HIT to be\n # completed once it has been annotated by one or more annotators.\n #\n # Before we required `hit.users.count() >= 3` for greater overlap.\n hits_completed = HIT.objects.filter(mturk_only=False, completed=True).count()\n \n # Check any remaining active HITs which are not yet marked complete.\n for hit in HIT.objects.filter(active=True, mturk_only=False, completed=False):\n if hit.users.count() >= 1:\n hits_completed = hits_completed + 1\n hit.completed = True\n hit.save()\n \n # Compute remaining HITs for all language pairs.\n hits_remaining = HIT.compute_remaining_hits()\n \n # Compute number of results contributed so far.\n ranking_results = RankingResult.objects.filter(\n item__hit__completed=True, item__hit__mturk_only=False)\n \n from math import factorial\n system_comparisons = 0\n for result in ranking_results:\n result.reload_dynamic_fields()\n # TODO: this implicitly counts A=B comparisons for multi systems.\n # Basically, inflating the number of pairwise comparisons... Fix!\n combinations = factorial(result.systems)/(factorial(result.systems-2) * 2) if result.systems > 2 else 0\n system_comparisons = system_comparisons + combinations\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # Compute average/total duration over all results.\n durations = RankingResult.objects.all().values_list('duration', flat=True)\n total_time = sum([datetime_to_seconds(x) for x in durations])\n avg_time = total_time / float(hits_completed or 1)\n avg_user_time = total_time / float(3 * hits_completed or 1)\n \n global_stats.append(('Users', len(wmt16_users)))\n global_stats.append(('Groups', len(groups)))\n global_stats.append(('HITs completed', '{0:,}'.format(hits_completed)))\n global_stats.append(('HITs remaining', '{0:,}'.format(hits_remaining)))\n global_stats.append(('Ranking results', '{0:,}'.format(ranking_results.count())))\n global_stats.append(('System comparisons', '{0:,}'.format(system_comparisons)))\n global_stats.append(('Average duration (per HIT)', seconds_to_timedelta(avg_time)))\n global_stats.append(('Average duration (per task)', seconds_to_timedelta(avg_user_time)))\n global_stats.append(('Total duration', seconds_to_timedelta(total_time)))\n \n # Create new status data snapshot\n TimedKeyValueData.update_status_if_changed('users', str(len(wmt16_users)))\n TimedKeyValueData.update_status_if_changed('groups', str(len(groups)))\n TimedKeyValueData.update_status_if_changed('hits_completed', str(hits_completed))\n TimedKeyValueData.update_status_if_changed('hits_remaining', str(hits_remaining))\n TimedKeyValueData.update_status_if_changed('ranking_results', str(ranking_results.count()))\n TimedKeyValueData.update_status_if_changed('system_comparisons', str(system_comparisons))\n TimedKeyValueData.update_status_if_changed('duration_per_hit', str(seconds_to_timedelta(avg_time)))\n TimedKeyValueData.update_status_if_changed('duration_per_task', str(seconds_to_timedelta(avg_user_time)))\n TimedKeyValueData.update_status_if_changed('duration_total', str(seconds_to_timedelta(total_time)))\n \n return global_stats", "def startPolling(self):\n\n #Notify the GUI that we are polling\n self.applicationCallback(MessageTypes.MSG_CLIENT_WAITING, {} )\n\n keepPolling = True\n while(keepPolling):\n time.sleep(self.interval)\n postData = {\n 'client_id': self.clientId,\n 'client_secret': self.clientSecret,\n 'code': self.deviceCode,\n 'grant_type': self.grantType }\n postFields = urlencode(postData)\n\n buffer = BytesIO()\n c = pycurl.Curl()\n try:\n c.setopt(c.URL, self.pollServer)\n c.setopt(c.POSTFIELDS, postFields)\n c.setopt(c.WRITEDATA, buffer)\n c.perform()\n\n responsecode = c.getinfo(c.RESPONSE_CODE)\n reqResp = json.loads(buffer.getvalue().decode('iso-8859-1'))\n except pycurl.error as err:\n msgData = { 'error_code': GDataOAuthError.ERR_NETWORK, 'error_string': c.errstr() }\n self.applicationCallback(MessageTypes.MSG_OAUTH_FAILED, msgData)\n return\n finally:\n c.close()\n \n if(responsecode == 200):\n keepPolling = False\n expiration = int(time.time()) + int(reqResp['expires_in'])\n token = OAuth2Token(reqResp['refresh_token'], reqResp['token_type'], reqResp['access_token'], expiration)\n self.applicationCallback(MessageTypes.MSG_OAUTH_SUCCESS, token)\n elif(responsecode == 400):\n errorType = reqResp['error']\n #The google api has combined legit errors with the \"still waiting\" response. Need to decide if it's an error or to just try again\n if(errorType == \"authorization_pending\"):\n print(\"Still waiting...\")\n else:\n keepPolling = False\n msgData = { 'error_code': GDataOAuthError.ERR_PROTOCOL, 'error_string': reqResp['error'] + \": \" + reqResp['error_description']}\n self.applicationCallback(MessageTypes.MSG_OAUTH_FAILED, msgData)\n elif(responsecode == 403):\n keepPolling = False\n msgData = { 'error_code': GDataOAuthError.ERR_AUTH_FAILED, 'error_string': reqResp['error'] + \": User cancelled authorization\" }\n self.applicationCallback(MessageTypes.MSG_OAUTH_FAILED, msgData)\n elif(responsecode == 429):\n #if we are going too fast. add 2 seconds to the interval\n print(\"Too fast, increasing interval..\")\n self.interval += 2\n else:\n keepPolling = False\n msgData = { 'error_code': GDataOAuthError.ERR_UNKNOWN, 'error_string': reqResp['error'] + \": \" + reqResp['error_description'] }\n self.applicationCallback(MessageTypes.MSG_OAUTH_FAILED, msgData)", "async def initialize(self):\r\n self.access_token = await async_get_value(SPOTIFY_ACCESS_TOKEN)\r\n self.refresh_token = await async_get_value(SPOTIFY_REFRESH_TOKEN)\r\n self.should_poll = await async_get_value(SPOTIFY_SHOULD_POLL)\r\n request_code = self.get_currently_playing().status_code\r\n if request_code == requests.codes.ok or request_code == requests.codes.no_content:\r\n self.start_polling_and_refresh()\r\n return\r\n\r\n # Go through the oauth flow.\r\n self.auth_thread = StoppableThread(target=self.check_and_test_auth)\r\n self.auth_thread.start()\r\n return", "def listpolls(self, irc, msg, args, channel):\n if channel and msg.args[0] in irc.state.channels:\n if self.polls is None:\n self.polls = []\n if self.polls is []:\n irc.reply(\"No Polls.\")\n for idx, entry in enumerate(self.polls[channel]):\n entry_string = []\n question = entry['question']\n yays = entry['yays']\n nays = entry['nays']\n added_by = entry['added_by']\n # concluded = entry['concluded']\n entry_string.append(\"%d: %s\" % (idx, question))\n entry_string.append(\"Yes: %s\" % (' '.join(yays) if yays != [] else 'none'))\n entry_string.append(\"No: %s\" % (' '.join(nays) if nays != [] else 'none'))\n entry_string.append(\"Question asked by %s\" % added_by)\n irc.reply(' / '.join(entry_string), notice=True, private=True, prefixNick=False)\n\n else:\n try:\n if ircdb.checkCapability(msg.prefix, 'admin') or ircdb.checkCapability(msg.prefix, 'owner'):\n if self.polls is None:\n self.polls = []\n if self.polls is []:\n irc.reply(\"No Polls.\")\n for idx, entry in enumerate(self.polls[channel]):\n entry_string = []\n question = entry['question']\n yays = entry['yays']\n nays = entry['nays']\n added_by = entry['added_by']\n # concluded = entry['concluded']\n entry_string.append(\"%d: %s\" % (idx, question))\n entry_string.append(\"Yays: %s\" % (' '.join(yays) if yays != [] else 'none'))\n entry_string.append(\"Nays: %s\" % (' '.join(nays) if nays != [] else 'none'))\n entry_string.append(\"Question asked by %s\" % added_by)\n irc.reply(' / '.join(entry_string), notice=True, private=True, prefixNick=False)\n else:\n irc.errorInvalid('argument', channel)\n\n except KeyError:\n return", "async def _async_status_request(self) -> None:\n try:\n # status_response = await self._hass.async_add_executor_job(\n # self._mc_status.status, self._MAX_RETRIES_STATUS\n # )\n if self.access_token:\n if (time.time() - self.last_request) > 1800:\n phantom = await self._hass.async_add_executor_job(\n self._phantom_load\n )\n if phantom.status_code == HTTP_OK:\n self.phantom_load = round(phantom.json().get(\"power\") / 1000, 3)\n else:\n _LOGGER.warning(phantom.content)\n\n # Got answer to request, update properties.\n live = await self._hass.async_add_executor_job(self._live_data)\n\n if live.status_code == HTTP_OK:\n self.power_usage = round(abs(live.json().get(\"power\")) / 1000, 3)\n else:\n _LOGGER.warning(live.content)\n\n self.last_request = time.time()\n self._last_status_request_failed = False\n except OSError as error:\n # No answer to request, set all properties to unknown.\n self.power_usage = None\n self.phantom_load = None\n\n # Inform user once about failed update if necessary.\n if not self._last_status_request_failed:\n _LOGGER.warning(\n \"Updating the properties of '%s' failed - OSError: %s\",\n self.unique_id,\n error,\n )\n self._last_status_request_failed = True", "def handler(event, context):\n log.debug('Running poller. Configuration: {}'.format(event))\n\n for account in get_historical_accounts():\n # Skip accounts that have role assumption errors:\n try:\n create_polling_event(account['id'], os.environ.get(\"HISTORICAL_STREAM\", \"HistoricalS3PollerStream\"))\n except ClientError as e:\n log.warning('Unable to generate events for account. AccountId: {account_id} Reason: {reason}'.format(\n account_id=account['id'],\n reason=e\n ))\n\n log.debug('Finished generating polling events. Events Created: {}'.format(len(account['id'])))", "async def run(self):\n while True:\n await asyncio.sleep(0)\n # See if any sockets have anything\n try:\n socks, events = self.poller.poll(1000)\n for sock, event in zip(socks,events):\n if sock in self.subscriptions:\n states = sock.recv_json()\n await self.main_server.sync_states(states)\n\n # Nothing to report - Poller did not find any sockets with updates\n except ValueError:\n pass\n # Exiting\n except KeyboardInterrupt:\n break", "def poll_and_save():\n\tusers = User.query.all()\n\tfor user in users:\n\t\tlogging.debug(\"polling for {}\".format(user))\n\t\t# API call to WEconnect activities-with-events\n\t\tactivity_events = weconnect.get_todays_events(user)\n\t\tlogging.debug(activity_events)\t\n\t\n\tfor activity in activity_events:\n\t\tfor ev in activity[\"events\"]:\n\t\t\tevent = session.query(Event).filter_by(eid == ev[\"eid\"]).first()\n\t\t\tif event:\n\t\t\t\t#update the completion\n\t\t\t\tevent.completed = (ev[\"didCheckin\"] == True)\n\t\t\telse: #eid doesn't exist, add new event\n\t\t\t\tnewEvent = weconnect.createNewEvent(ev)\n\t\t\t\tsession.add(newEvent)\n\ttry:\t\t\n\t\tsession.commit()\n\t\tprint(\"Received {} Activity events in last poll.\").format(len(activity_events))\n\texcept:\n\t\tsession.rollback()\n\t\tprint(\"Session Commit failed\")", "def _summarize_period(self):\n print(\"entering _summarize_period()\")\n for i in range(TIME_BETWEEN_FEEDBACK // SONG_OVER_CHECK_TIME): ### Wait 30 seconds\n self._check_completion()\n if self._song_over is True:\n break\n self._set_last_30_sec()\n return", "def poll(self, poll_timeout):\n self.latest_poll_time = current_time()\n\n callbacks, self._callbacks = self._callbacks[:], []\n\n for timer in callbacks:\n try:\n timer.function()\n except Exception:\n log.exception(\"Exception raised while executing timer.\")\n\n if timer.requeue:\n self._callbacks.append(timer)\n\n while self._deferreds and self._deferreds[0].end <= self.latest_poll_time:\n timer = self._deferreds.pop(0)\n\n try:\n timer.function()\n except Exception:\n log.exception(\"Exception raised while executing timer.\")\n\n if timer.requeue:\n timer.end = self.latest_poll_time + timer.delay\n bisect.insort(self._deferreds, timer)\n\n if self._shutdown:\n return\n\n if self._deferreds:\n timeout = self._deferreds[0].end - self.latest_poll_time\n if timeout > 0.0:\n poll_timeout = max(min(timeout, poll_timeout), 0.01)\n\n if not self._channels:\n time.sleep(poll_timeout) # Don't burn CPU.\n return\n\n try:\n events = self._poller.poll(poll_timeout)\n except Exception as err:\n if err.args[0] == errno.EINTR:\n log.debug(\"Interrupted system call.\")\n return\n else:\n raise\n\n for fileno, events in events.iteritems():\n channel = self._channels[fileno]\n try:\n channel._handle_events(events)\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n log.exception(\"Error while handling events on %r.\" % channel)", "def collect(self):\n while True:\n if not self._queue.empty():\n message = self._queue.get()\n self.working_on = message['job_type']\n else:\n break\n logging.info(\"Popped off message: {}\\n\".format(str(message)))\n\n if message['job_type'] == 'STOP':\n break\n\n if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':\n raise ValueError('{} is not a recognized task type'.format(message['job_type']))\n pass\n\n # Query all repos with repo url of given task\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'\n \"\"\".format(message['given']['github_url']))\n repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])\n\n try:\n if message['models'][0] == 'pull_requests':\n self.pull_requests_model(message, repo_id)\n elif message['models'][0] == 'pull_request_commits':\n self.pull_request_commits_model(message, repo_id)\n elif message['models'][0] == 'pull_request_files':\n self.pull_requests_graphql(message, repo_id)\n except Exception as e:\n register_task_failure(self, message, repo_id, e)\n pass", "def poller(once=False, activities=None, sleep_time=60,\n fts_bulk=100, db_bulk=1000, older_than=60, activity_shares=None, partition_wait_time=10):\n\n try:\n timeout = config_get('conveyor', 'poll_timeout')\n timeout = float(timeout)\n except NoOptionError:\n timeout = None\n\n multi_vo = config_get_bool('common', 'multi_vo', False, None)\n logger_prefix = executable = 'conveyor-poller'\n if activities:\n activities.sort()\n executable += '--activities ' + str(activities)\n if activity_shares:\n activities.sort()\n executable += '--activity_shares' + str(activity_shares)\n if FILTER_TRANSFERTOOL:\n executable += ' --filter-transfertool ' + FILTER_TRANSFERTOOL\n\n with HeartbeatHandler(executable=executable, logger_prefix=logger_prefix) as heartbeat_handler:\n logger = heartbeat_handler.logger\n logger(logging.INFO, 'Poller starting - db_bulk (%i) fts_bulk (%i) timeout (%s)' % (db_bulk, fts_bulk, timeout))\n activity_next_exe_time = defaultdict(time.time)\n\n if partition_wait_time:\n graceful_stop.wait(partition_wait_time) # To prevent running on the same partition if all the poller restart at the same time\n while not graceful_stop.is_set():\n\n try:\n heart_beat, logger = heartbeat_handler.live(older_than=3600)\n if activities is None:\n activities = [None]\n for activity in activities:\n if activity_next_exe_time[activity] > time.time():\n graceful_stop.wait(1)\n continue\n\n start_time = time.time()\n logger(logging.DEBUG, 'Start to poll transfers older than %i seconds for activity %s using transfer tool: %s' % (older_than, activity, FILTER_TRANSFERTOOL))\n transfs = request_core.get_next(request_type=[RequestType.TRANSFER, RequestType.STAGEIN, RequestType.STAGEOUT],\n state=[RequestState.SUBMITTED],\n limit=db_bulk,\n older_than=datetime.datetime.utcnow() - datetime.timedelta(seconds=older_than) if older_than else None,\n total_workers=heart_beat['nr_threads'], worker_number=heart_beat['assign_thread'],\n mode_all=True, hash_variable='id',\n activity=activity,\n activity_shares=activity_shares,\n transfertool=FILTER_TRANSFERTOOL)\n\n record_timer('daemons.conveyor.poller.get_next', (time.time() - start_time) * 1000)\n\n if TRANSFER_TOOL and not FILTER_TRANSFERTOOL:\n # only keep transfers which don't have any transfertool set, or have one equal to TRANSFER_TOOL\n transfs_tmp = [t for t in transfs if not t['transfertool'] or t['transfertool'] == TRANSFER_TOOL]\n if len(transfs_tmp) != len(transfs):\n logger(logging.INFO, 'Skipping %i transfers because of missmatched transfertool', len(transfs) - len(transfs_tmp))\n transfs = transfs_tmp\n\n if transfs:\n logger(logging.DEBUG, 'Polling %i transfers for activity %s' % (len(transfs), activity))\n\n transfs.sort(key=lambda t: (t['external_host'] or '',\n t['scope'].vo if multi_vo else '',\n t['external_id'] or '',\n t['request_id'] or ''))\n for (external_host, vo), transfers_for_host in groupby(transfs, key=lambda t: (t['external_host'],\n t['scope'].vo if multi_vo else None)):\n transfers_by_eid = {}\n for external_id, xfers in groupby(transfers_for_host, key=lambda t: t['external_id']):\n transfers_by_eid[external_id] = list(xfers)\n\n for chunk in dict_chunks(transfers_by_eid, fts_bulk):\n try:\n poll_transfers(external_host=external_host, transfers_by_eid=chunk, vo=vo, timeout=timeout, logger=logger)\n except Exception:\n logger(logging.ERROR, 'Exception', exc_info=True)\n\n if len(transfs) < fts_bulk / 2:\n logger(logging.INFO, \"Only %s transfers for activity %s, which is less than half of the bulk %s, will sleep %s seconds\" % (len(transfs), activity, fts_bulk, sleep_time))\n if activity_next_exe_time[activity] < time.time():\n activity_next_exe_time[activity] = time.time() + sleep_time\n except Exception:\n logger(logging.CRITICAL, \"Exception\", exc_info=True)\n if once:\n raise\n\n if once:\n break", "def get_available_polls(game_type_id):\n\n poll_response = requests.get(\n url=f'{settings.GAME_SETUP_URL}/all-polls/{game_type_id}/',\n timeout=5 # in sec\n )\n if poll_response.status_code == 200:\n return poll_response.json()\n return {}", "def refresh(self):\n self._policies = self._get_policies()" ]
[ "0.54762065", "0.53788805", "0.53186905", "0.53170043", "0.5184612", "0.5109608", "0.5096446", "0.50746995", "0.50663227", "0.5019184", "0.5006015", "0.4936928", "0.4913336", "0.4876137", "0.48729563", "0.48675004", "0.4866862", "0.48607504", "0.48592043", "0.48447618", "0.48181924", "0.47783744", "0.47454023", "0.4743472", "0.4727668", "0.47262433", "0.47186005", "0.46756938", "0.4653509", "0.4626212" ]
0.8014664
0
Pulling new events from the event factory, collecting statistics and handling tasks
def handle_events(self): self._busy_mutext.acquire() try: event = self.EventsFactory.pull_event() while event: self.logger.debug('Handling new event: {}'.format(event.id)) event_endpoint_scope_classes = event.EndpointScope.get_static_hierarchy() stat_collection = [] for statistics_cls in self._statistics: if statistics_cls.EndpointScope in event_endpoint_scope_classes: statistics = statistics_cls.init_by_event(event) self.logger.debug(f'Collecting statistics: {statistics}') stat_collection.append(statistics) statistics.collect() self.logger.debug('Checking for tasks to run') for task_cls in self.get_conditional_tasks(): if task_cls.EndpointScope in event_endpoint_scope_classes: task_endpoint_scope_classes = task_cls.EndpointScope.get_static_hierarchy() statistics = [] for stats in stat_collection: if stats.Endpoint == task_cls.Endpoint and stats.EndpointScope in task_endpoint_scope_classes: statistics.append(stats) task = task_cls(event.EndpointScope.init_by_event(event), statistics, event) task.handle() event = self.EventsFactory.pull_event() finally: self._busy_mutext.release()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_new_events(self, events):\n for event in events:\n self.events.append(\n self.create_event_object(\n event[0],\n event[1],\n int(event[2])))", "def collect_new_events(self) -> list:\n self.logger.debug('Collecting new events...')\n events = self.build_events()\n if not events:\n self.logger.debug('No new events.')\n for event in events:\n self.logger.info('A new event has been detected: {}'.format(event))\n self._buffer_buisy_mutex.acquire()\n self._events_buffer.append(event)\n self._buffer_buisy_mutex.release()", "def _get_events(self):\n self.cache = []\n\n # Test if we have event table\n with datascope.closing(datascope.dbopen(self.db, 'r')) as db:\n dbtable = db.lookup(table='event')\n if dbtable.query(datascope.dbTABLE_PRESENT):\n steps = ['dbopen event']\n steps.extend(['dbjoin origin'])\n steps.extend(['dbsubset origin.orid != NULL'])\n steps.extend(['dbsubset origin.orid == prefor'])\n fields = ['evid']\n else:\n steps = ['dbopen origin']\n steps.extend(['dbsubset orid != NULL'])\n fields = []\n\n fields.extend(['orid','time','lat','lon','depth','auth','nass',\n 'ndef','review'])\n\n for v in extract_from_db(self.db, steps, fields, self.db_subset):\n if not 'evid' in v:\n v['evid'] = v['orid']\n\n self.logging.debug( \"Events(): new event #%s\" % v['evid'] )\n\n v['allmags'] = []\n v['magnitude'] = '-'\n v['maglddate'] = 0\n v['srname'] = '-'\n v['grname'] = '-'\n v['time'] = parse_sta_time(v['time'])\n v['strtime'] = readable_time(v['time'], self.timeformat, self.timezone)\n\n try:\n v['srname'] = stock.srname(v['lat'],v['lon'])\n except Exception,e:\n warninig('Problems with srname for orid %s: %s' % (v['orid'],\n v['lat'],v['lon'],e) )\n\n try:\n v['grname'] = stock.grname(v['lat'],v['lon'])\n except Exception,e:\n warninig('Problems with grname for orid %s: %s' % (v['orid'],\n v['lat'], v['lon'],e) )\n\n orid = v['orid']\n if orid in self.mags:\n for o in self.mags[orid]:\n v['allmags'].append(self.mags[orid][o])\n if self.mags[orid][o]['lddate'] > v['maglddate']:\n v['magnitude'] = self.mags[orid][o]['strmag']\n v['maglddate'] = self.mags[orid][o]['lddate']\n\n\n self.cache.append( v )", "def fetch_events(self):\n while 1:\n try:\n self.events_local.append(self._q.get(False))\n except queue.Empty:\n break", "def main():\n credentials = get_credentials()\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n max = 7\n events = getEvents(credentials, now, max)\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])\n #addEvent(credentials)", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 10 events')\n eventsResult = service.events().list(\n calendarId='[email protected]', timeMin=now, maxResults=10, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n\n\n # TODO noitem found\n print(datetime.datetime.strptime(events[0]['start']['dateTime'], '%Y-%m-%dT%H:%M:%S+09:00'))\n\n nextStartTime = datetime.datetime.strptime(events[0]['start']['dateTime'], '%Y-%m-%dT%H:%M:%S+09:00')\n delta = (nextStartTime - datetime.datetime.now()).total_seconds()\n\n if delta < 0:\n print(\"capture next\")\n nextStartTime = datetime.datetime.strptime(events[1]['start']['dateTime'], '%Y-%m-%dT%H:%M:%S+09:00')\n delta = (nextStartTime - datetime.datetime.now()).total_seconds()\n\n print(delta)\n\n if NOTIFY_THRESHOLD_SECOND > delta:\n alert_time_limit()\n else:\n set_normal()\n\n\n\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])", "def updateEvents(self):\n # Update calendar data\n d_start = datetime.datetime.today()\n d_end = d_start + datetime.timedelta(self.delta_days)\n results = self.cal_cal.date_search(d_start, d_end)\n\n # Flush the events dict\n self.events = []\n # Add each events\n for event in results:\n # Format the title of the event\n str_title = event.instance.vevent.summary.value\n if len(str_title) > 20:\n str_title = str_title[:17] + \"...\"\n # Format the date of the event\n vdate = event.instance.vevent.dtstart.value\n d = datetime.datetime.strptime(\n vdate.strftime(\"%d %m %Y\"), \"%d %m %Y\")\n str_date = \"%s %d %s\" % (\n self.days_french[d.weekday()],\n d.day,\n self.months_french[d.month -1])\n # Format the date gap\n gap = 1 + (d - d_start).days\n # Save the event\n self.events.append((str_title, str_date, gap))", "def collect_events(helper, ew):\n\n '''\n # The following example writes a random number as an event. (Multi Instance Mode)\n # Use this code template by default.\n import random\n data = str(random.randint(0,100))\n event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\n ew.write_event(event)\n '''\n\n '''\n # The following example writes a random number as an event for each input config. (Single Instance Mode)\n # For advanced users, if you want to create single instance mod input, please use this code template.\n # Also, you need to uncomment use_single_instance_mode() above.\n import random\n input_type = helper.get_input_type()\n for stanza_name in helper.get_input_stanza_names():\n data = str(random.randint(0,100))\n event = helper.new_event(source=input_type, index=helper.get_output_index(stanza_name), sourcetype=helper.get_sourcetype(stanza_name), data=data)\n ew.write_event(event)\n '''\n\n if helper.get_log_level() == \"DEBUG\":\n import traceback\n debug = True\n else:\n debug = False\n\n try:\n # Construct Workday client from the provided global config\n rest_api_endpoint = helper.get_global_setting(\"rest_api_endpoint\")\n token_endpoint = helper.get_global_setting(\"token_endpoint\")\n client_id = helper.get_global_setting(\"client_id\")\n client_secret = helper.get_global_setting(\"client_secret\")\n refresh_token = helper.get_global_setting(\"refresh_token\")\n\n empty_fields = []\n if not rest_api_endpoint:\n empty_fields.append(\"Workday REST API Endpoint\")\n if not token_endpoint:\n empty_fields.append(\"Token Endpoint\")\n if not client_id:\n empty_fields.append(\"Client ID\")\n if not client_secret:\n empty_fields.append(\"Client Secret\")\n if not refresh_token:\n empty_fields.append(\"Refresh Token\")\n if len(empty_fields) > 0:\n raise ValueError(\"Empty fields in global configuration: {}\".format(\", \".join(empty_fields)))\n\n wday = Workday(rest_api_endpoint, token_endpoint, client_id, client_secret, refresh_token, http_user_agent=USER_AGENT, helper=helper)\n except ValueError as e:\n helper.log_error(str(e))\n if debug: helper.log_debug(\"\".join(traceback.format_exc()))\n sys.exit(1)\n\n stanza_names = helper.get_input_stanza_names()\n if not isinstance(stanza_names, list):\n stanza_names = [stanza_names]\n\n for stanza_name in stanza_names:\n input_type = helper.get_input_type()\n input_name = helper.get_arg(\"input_name\")\n include_target = helper.get_arg(\"include_target\")\n\n index = helper.get_output_index(stanza_name)\n sourcetype = \"workday:{}\".format(input_name)\n\n if input_name == \"user_activity\":\n\n # Pull checkpoint value and setup query range for this run\n # Only pull up to 5 minutes in the past to allow time for events to be available in the report\n checkpoint_format = \"%Y-%m-%dT%H:%M:%SZ\"\n end = datetime.datetime.utcnow() - datetime.timedelta(minutes=5)\n start = helper.get_check_point(input_name)\n if start is None:\n start = end\n helper.log_info(\"No timestamp checkpoint found for input \\\"{}\\\", starting from now ({})\".format(\n input_name,\n start.strftime(checkpoint_format)\n ))\n # Save current time now to preserve original start time in case of errors\n helper.save_check_point(input_name, end.strftime(checkpoint_format))\n\n else:\n # Confirm that the checkpoint is in the correct format\n try:\n start = datetime.datetime.strptime(start, checkpoint_format)\n except ValueError as e:\n helper.log_error(\"Invalid checkpoint value for input \\\"{}\\\", aborting ({})\".format(input_name, str(e)))\n continue\n\n\n helper.log_info(\"Starting input \\\"{}\\\" for window ({}, {})\".format(\n input_name,\n start.strftime(checkpoint_format),\n end.strftime(checkpoint_format)\n ))\n\n try:\n input_start = time.time()\n results = list(wday.audit_logs(start, end, include_target=include_target))\n\n except requests.exceptions.ConnectionError as e:\n helper.log_error(\"Unable to connect to host\")\n if debug: helper.log_debug(\"\".join(traceback.format_exc()))\n\n except requests.exceptions.Timeout as e:\n helper.log_error(\"Request timed out, retries exhausted\")\n if debug: helper.log_debug(\"\".join(traceback.format_exc()))\n\n except requests.exceptions.HTTPError as e:\n helper.log_error(\"Request failed with error code ({}), retries exhausted\".format(e.response.status_code))\n if debug: helper.log_debug(\"\".join(traceback.format_exc()))\n\n except Exception as e:\n helper.log_error(\"Unknown exception occurred ({})\".format(str(e)))\n if debug: helper.log_debug(\"\".join(traceback.format_exc()))\n\n else:\n\n # Deliberately wait to write events until all are collected with no errors\n # otherwise errors or restarts could cause missing / duplicate events\n for result in results:\n event = helper.new_event(\n source = input_type,\n index = index,\n sourcetype = sourcetype,\n data = json.dumps(result)\n )\n ew.write_event(event)\n\n input_runtime = time.time() - input_start\n event_count = len(results)\n helper.log_info(\"Finished input \\\"{}\\\" for window ({}, {}) in {} seconds, {} events written\".format(\n input_name,\n start.strftime(checkpoint_format),\n end.strftime(checkpoint_format),\n round(input_runtime, 2),\n event_count\n ))\n\n helper.save_check_point(input_name, end.strftime(checkpoint_format))\n\n else:\n helper.log_warning(\"Invalid input \\\"{}\\\", supported values are \\\"{}\\\"\".format(input_name, \"|\".join(VALID_INPUTS)))", "def __calender_events(self):\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n pt=\"Getting the upcoming latest events\"\n requests.get(\"http://localhost:8080/statement?text=%s\" % pt)\n self.speech.synthesize_text(pt)\n eventsResult = service.events().list(\n calendarId='primary', timeMin=now, maxResults=1, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n\n if not events:\n pq=\"No upcoming events found.\"\n requests.get(\"http://localhost:8080/statement?text=%s\" % pt)\n self.speech.synthesize_text(pq)\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n #start1=''.join(start)\n summary=event['summary']\n print start,summary\n requests.get(\"http://localhost:8080/statement?text=\"+start+\" \"+summary)", "def run(self):\n self.logger.info(f'Running {self.__class__.__name__}')\n while True:\n last_check = time.time()\n self.collect_new_events()\n while time.time() - last_check < self._check_for_new_events_interval:\n self.logger.debug('Waiting for new events collection: new collection in {}s'.format(\n self._check_for_new_events_interval - (time.time() - last_check)))\n time.sleep(1)", "def get_events():\n url = app.config['EVENTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_events(response.json())\n raise RuntimeError('Error in retrieving events.')", "def slurp_events(self):\n while self.has_event():\n self.get_event()", "def test_streamNewEvents(self):\n events = (\n dict(eventID=u\"1\", eventText=u\"A\"),\n dict(eventID=u\"2\", eventText=u\"B\"),\n dict(eventID=u\"3\", eventText=u\"C\"),\n dict(eventID=u\"4\", eventText=u\"D\"),\n )\n\n resource = self.eventSourceResource()\n\n response = self.render(resource)\n\n # The first read should block on new events.\n d = response.stream.read()\n self.assertFalse(d.called)\n\n # Add some events\n resource.addEvents(events)\n\n # We should now be unblocked\n self.assertTrue(d.called)\n\n # Each result from read() is another event\n for i in range(len(events)):\n if d is None:\n result = yield response.stream.read()\n else:\n result = yield d\n d = None\n\n self.assertEquals(\n result,\n textAsEvent(\n text=events[i][\"eventText\"],\n eventID=(events[i][\"eventID\"])\n )\n )\n\n # The next read should block on new events.\n d = response.stream.read()\n self.assertFalse(d.called)\n\n d.addErrback(lambda f: None)\n d.cancel()", "def handleEvents(self, events):\n self.virtual_time = events[0].timestamp\n now = self.virtual_time\n\n # handle events based on type\n for evt in events:\n logging.debug('\\t Handle %s' % str(evt))\n if evt.evt_type == BBEventType.Submitted:\n self.scheduler.insertToInputQ(evt.job)\n elif evt.evt_type == BBEventType.FinishIn:\n self.scheduler.insertToRunQ(evt.job)\n elif evt.evt_type == BBEventType.ReleaseInBB:\n self.scheduler.releaseBB(evt.job.demand.data_in)\n elif evt.evt_type == BBEventType.FinishRun:\n self.scheduler.insertToOutputQ(evt.job)\n elif evt.evt_type == BBEventType.ReleaseRunCN:\n self.scheduler.releaseCN(evt.job.demand.num_core)\n elif evt.evt_type == BBEventType.FinishOut:\n self.scheduler.insertToCompleteQ(evt.job)\n else:\n logging.warn('\\t Unable to handle event %s' % str(evt))\n jobs = self.scheduler.schedule(now)\n if jobs:\n new_events = self.generator.generateEvents(jobs)\n for evt in new_events:\n self.event_q.append(evt)", "def runEventCreation():\r\n config = CONFIG['steps']['EventCreation']\r\n ci = config['inputs']\r\n co = config['outputs']\r\n\r\n min_window_size = ci['min_window_size']\r\n change_speed_by = ci['change_speed_by']\r\n speed_ratio = ci['train_zero_speed_ratio']\r\n datetime_limit = ci['datetime_limit']\r\n csv_name_prefix = ci['csv_name_prefix']\r\n input_bucket = ci['bucket']\r\n window_event_bucket = ci['window_event_bucket']\r\n window_events_file = ci['window_events_file']\r\n\r\n output_bucket = co['bucket']\r\n event_dir = co['event_dir']\r\n filename_include = co['filename_include']\r\n\r\n minio_config = CONFIG['artifacts']['minio']\r\n minioClient = create_minio_client(minio_config[\"endpoint_url\"],\r\n access_key=minio_config[\"access_key\"],\r\n secret_key=minio_config[\"secret_key\"],\r\n secure=minio_config['secure'])\r\n\r\n boto_client = boto3.client(\"s3\",\r\n endpoint_url=minio_config[\"endpoint_url\"],\r\n aws_access_key_id=minio_config[\"access_key\"],\r\n aws_secret_access_key=minio_config[\"secret_key\"],\r\n region_name=minio_config[\"region_name\"])\r\n\r\n csv_files = get_files(input_bucket, boto_client,\r\n file_type='csv', prefix='filtered')\r\n csv_files = ['filtered/7016_2020-09-09.csv']\r\n create_window_event(files=csv_files,\r\n input_bucket=input_bucket,\r\n output_bucket=output_bucket,\r\n minio_client=minioClient,\r\n min_window_size=min_window_size,\r\n ouput_dir=event_dir,\r\n window_event_bucket=window_event_bucket,\r\n window_events_file=window_events_file,\r\n csv_name_prefix=csv_name_prefix,\r\n change_speed_by=change_speed_by,\r\n train_zero_speed_ratio=speed_ratio,\r\n datetime_limit=datetime_limit,\r\n filename_include=filename_include)", "def _default_events_fetcher(self):\n raise NotImplementedError", "def _default_events_fetcher(self):\n raise NotImplementedError", "def load_new_events_list(self):\n self._event_index_list = self.gdc.new_events_indices\n self.populate_event_list_from_index_list()", "async def events(self) -> Iterable[Event]:", "def _run_events(self):\n new_futures = set()\n while len(self.events) > 0:\n LOG.debug('processing events (%s remaining)', len(self.events))\n # Get next event\n event = self.events.popleft()\n LOG.debug('processing event: %s', event)\n # Handle the event\n for handler in self.get_handlers(event):\n # Attempt to run the handler, but don't break everything if the handler fails\n LOG.debug('running handler: %r', handler)\n future = self._run_handler(handler, event)\n if future:\n new_futures.add(future)\n self.new_events.clear()\n if len(new_futures) > 0:\n LOG.debug('got %s new futures', len(new_futures))\n return new_futures", "def events(self):\n self.add_events(Event.objects.filter(event_end__gt=timezone.now()).order_by('event_start'))\n self.filename = 'events'", "def events(self):\n self.add_events(Event.objects.filter(event_end__gt=timezone.now()).order_by('event_start'))\n self.filename = 'events'", "def resolve_events(self, args, context, info):\n params = {\n 'execution_id': self.id,\n }\n return EventLoader.get().load(params)", "def get_events(self):\n\n print \"\\ngetting new Events\"\n path = os.path.join(self.path, 'no_consent')\n for d_cnt, date in sorted(enumerate(os.listdir(path))):\n\n if os.path.isdir(os.path.join(self.events_path, date)):\n print \"%s already processed\" % date\n continue\n\n directory = os.path.join(path, date)\n for recording in os.listdir(directory):\n if os.path.isdir(os.path.join(directory, recording)):\n\n # Can we reduce this list of objects using ROI information?\n try:\n use_objects = {}\n for region, objects in self.soma_objects.items():\n for ob, position in objects.items():\n use_objects[ob] = position\n\n ce.get_event(recording, directory, use_objects, self.config['events'])\n except:\n print \"recording: %s in: %s is broken.\" %(recording, directory)\n else:\n print \"already processed: %s\" % recording\n print \"done.\"", "def handleEvents(self, events):\n pass", "def load_events():\n\n print('load_events')\n\n Event.query.delete()\n\n for row in open(\"seed_data/events.csv\"):\n row = row.rstrip()\n private, \\\n host_id, \\\n venue, \\\n title, \\\n time_begin, \\\n time_end, \\\n max_cap, \\\n url = row.split(',')\n\n private = int(private)\n host_id = int(host_id)\n\n ven = Venue.query.filter_by(name=venue).first()\n\n begin_at = datetime.strptime(time_begin, \"%y-%m-%d %H:%M:%S\")\n\n end_at = datetime.strptime(time_end, \"%y-%m-%d %H:%M:%S\")\n\n evt = Event(private=private,\n host_id=host_id,\n venue_id=ven.id,\n title=title,\n begin_at=begin_at,\n end_at=end_at,\n max_cap=max_cap,\n url=url)\n\n db.session.add(evt)\n\n db.session.commit()", "def create_new_event(self):\n pass", "def test_get_Events(self):\n event_a = Event.objects.create(title=\"christmas party\",\n start=datetime.strptime(\"2020-12-03 12:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-12-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=4),\n recurrence_interval=0, description=\"happy christmas party\", website_publish=True)\n event_a.invites.add(self.comms_grp)\n event_a.save()\n event_b = Event.objects.create(title=\"Spring clean\",\n start=datetime.strptime(\"2020-04-03 09:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-04-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=7),\n recurrence_interval=0, description=\"get the church clean\", website_publish=True)\n event_b.invites.add(self.comms_grp)\n event_b.save()\n client = APIClient()\n resp = client.get('/api/events')\n self.assertEqual(resp.status_code, 200)\n events = Event.objects.all()\n self.assertEqual(events[0].title, json.loads(resp.content)[1]['title'])\n self.assertEqual(events[1].title, json.loads(resp.content)[0]['title'])", "def __init__(self, events):\n for event in events:\n #do stuff\n pass", "def collect_events(helper, ew):\n\n opt_start_time_start = helper.get_arg('start_time_start')\n opt_endpoints = helper.get_arg('endpoints')\n opt_interval = int(helper.get_arg('interval'))\n opt_live = False\n\n proxy = helper.get_proxy()\n if proxy:\n proxy_auth = \"{}:{}\".format(\n proxy['proxy_username'], proxy['proxy_password'])\n proxies = {\n \"https\": \"{protocol}://{auth}@{host}:{port}/\".format(protocol=proxy['proxy_type'], auth=proxy, host=proxy['proxy_url'], port=proxy['proxy_port']),\n \"http\": \"{protocol}://{auth}@{host}:{port}/\".format(protocol=proxy['proxy_type'], auth=proxy, host=proxy['proxy_url'], port=proxy['proxy_port'])\n }\n else:\n proxies = None\n\n helper.log_debug(\n \"[-] webex password_type: {}\".format(helper.get_global_setting(\"password_type\")))\n\n params = {\"opt_username\": helper.get_global_setting(\"username\"),\n \"opt_password\": helper.get_global_setting(\"password\"),\n \"opt_site_name\": helper.get_global_setting(\"site_name\"),\n \"limit\": 500,\n \"timezone\": \"20\",\n # \"password_type\": authentication_type[\"Password Authentication\"],\n # \"password_type\": authentication_type[\"OAuth\"],\n \"password_type\": authentication_type[helper.get_global_setting(\"password_type\")],\n \"client_id\": helper.get_global_setting(\"client_id\"),\n \"client_secret\": helper.get_global_setting(\"client_secret\"),\n \"refresh_token\": helper.get_global_setting(\"refresh_token\"),\n \"proxies\": proxies}\n\n # Historical Data\n helper.log_debug(\"Historical Data\")\n for opt_endpoint in opt_endpoints:\n helper.log_debug(\"[-] \\t At {}\".format(opt_endpoint))\n\n # endtime is midnight of GMT - 3days\n enddt = datetime.utcnow().date() - timedelta(3)\n end_time = datetime.combine(\n enddt, datetime.max.time()).strftime('%m/%d/%Y %H:%M:%S')\n\n # create checkpoint key for offest and timestamp\n timestamp_key = \"timestamp_{}_{}_processing\".format(\n helper.get_input_stanza_names(), opt_endpoint)\n\n start_time = helper.get_check_point(timestamp_key)\n if start_time is None:\n # if it's the 1st time, get the start_time from UI, and then save it in checkpoint\n start_time = opt_start_time_start\n helper.save_check_point(timestamp_key, start_time)\n else:\n # shift the start_time by 1 second\n start_time = (datetime.strptime(start_time, '%m/%d/%Y %H:%M:%S') +\n timedelta(seconds=1)).strftime('%m/%d/%Y %H:%M:%S')\n\n helper.log_debug(\"Start time: {}\".format(start_time))\n helper.log_debug(\"End time: {}\".format(end_time))\n\n # Update Parameters\n params.update({\"mode\": \"historical\"})\n params.update({\"opt_endpoint\": opt_endpoint})\n params.update({\"start_time\": start_time})\n params.update({\"end_time\": end_time})\n params.update({\"timestamp_key\": timestamp_key})\n\n records = params['limit']\n offset = 1\n while (records == params['limit']):\n helper.log_debug(\"current_offset: {}\".format(offset))\n params['offset'] = offset\n records = fetch_webex_logs(ew, helper, params)\n helper.log_debug(\"\\t Offet:{}\\tLimit: {}\\tRecords Returned: {}\".format(\n offset, params['limit'], records))\n if records:\n offset += records" ]
[ "0.6928928", "0.66840243", "0.65027535", "0.64619464", "0.6460602", "0.6405665", "0.635793", "0.6357241", "0.6314258", "0.62627983", "0.62376845", "0.6235711", "0.62312156", "0.62292194", "0.6201306", "0.61748487", "0.61748487", "0.61693776", "0.61435163", "0.61425567", "0.6127309", "0.6127309", "0.6102173", "0.6099917", "0.60848475", "0.6074446", "0.6057109", "0.6045342", "0.6044003", "0.60387164" ]
0.75559413
0
Return feature complexity value
def complexity(self): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_complexity(self):\n if self.layer_type() == nn.Conv2d:\n return pow(self.layer_type.get_sub_value(\"conv_window_size\"), 2) * self.layer_type.get_sub_value(\n \"out_features\")\n elif self.layer_type() == nn.Linear:\n return self.layer_type.get_sub_value(\"out_features\")\n else:\n raise Exception()", "def complexity(self) -> str:\n return pulumi.get(self, \"complexity\")", "def complexity(model):\n size = cfg.TRAIN.IM_SIZE\n cx = {\"h\": size, \"w\": size, \"flops\": 0, \"params\": 0, \"acts\": 0}\n cx = model.complexity(cx)\n return {\"flops\": cx[\"flops\"], \"params\": cx[\"params\"], \"acts\": cx[\"acts\"]}", "def complexity(self, mode='#nodes'):\n if mode == '#nodes':\n return len(self.nodes)", "def cost(self) -> float:", "def agility(self):\n return self._getAttribute(Attribute.agility)", "def complexity(self):\n from kinbaku._pygenie import gettype, getpath\n from kinbaku.python import Dotpath\n if not self.contents:\n return\n try:\n stats = measure_complexity(self.contents, self.fname)\n except SyntaxError:\n return None\n\n out = PrettyPrinter(StringIO()).flatten_stats(stats)\n out = [ [ gettype(_type),\n getpath(_type,dotpath),\n score ] for _type, dotpath, score in out ]\n return out", "def _entropy(self, feature, node):\n entropy = 0\n categories = np.unique(feature)\n num_point = len(feature)\n for category in categories:\n # for each category in that feature\n num_category = len(feature[feature == category])\n for c in self.num_class:\n # count the number of each class\n num_category_class = len(feature[np.logical_and(feature == category, node.y == c)])\n if num_category_class == 0:\n continue\n # compute entropy/information gain or classification error\n entropy += num_category / num_point * (\n -num_category_class / num_category * log2(num_category_class / num_category))\n return entropy", "def _calculate_complexity(workflow):\n complexity = estimate_complexity(workflow.type_, workflow.reana_specification)\n workflow.complexity = complexity\n Session.commit()\n return complexity", "def functionality(self):\n self._functionality = 0.12 * self.CAMC + 0.22 * self.NOP + 0.22 * self.CIS + 0.22 * self.DSC + 0.22 * self.NOH\n return round(self._functionality, 5)", "def fitness(self):\n # TO BE DECIDED\n return 1", "def getFeatures(self, gameState, action):\n features = util.Counter()\n successor = self.getSuccessor(gameState, action)\n features['successorScore'] = self.getScore(successor)\n return features", "def getFeatures(self, gameState, action):\n features = util.Counter()\n successor = self.getSuccessor(gameState, action)\n features['successorScore'] = self.getScore(successor)\n return features", "def getFeatures(self, gameState, action):\n features = util.Counter()\n successor = self.getSuccessor(gameState, action)\n features['successorScore'] = self.getScore(successor)\n return features", "def getFeatures(self, gameState, action):\n features = util.Counter()\n successor = self.getSuccessor(gameState, action)\n features['successorScore'] = self.getScore(successor)\n return features", "def getFeatures(self, gameState, action):\n features = util.Counter()\n successor = self.getSuccessor(gameState, action)\n features['successorScore'] = self.getScore(successor)\n return features", "def feature():\n pass", "def getFeatures(self, gameState, action):\n features = util.Counter()\n successor = self.getSuccessor(gameState, action)\n features['successorScore'] = self.getScore(successor)\n\n return features", "def commands_complexity():\n complexity()", "def length(x):\r\n return Feature(x, \"length\")", "def features(self, state):\n jdecays = state[\"decays\"]\n cor_mean = state[\"means\"] / (1 - jdecays**(state[\"iteration\"]))\n # longest running decay\n approx_max = cor_mean[1:]\n cor_mean = cor_mean[0:-1]\n running_min = state[\"running_min\"][0:-1]\n\n den = jnp.maximum(1e-8, (approx_max - running_min))\n pre_center = (cor_mean - running_min) / den\n feature1 = (pre_center - 1.0)\n feature1 = jnp.clip(feature1, -1, 1)\n # first couple features are bad.\n return jnp.where(state[\"iteration\"] <= 2, feature1 * 0, feature1)", "def getFeatures(self, gameState, action):\n features = util.Counter()\n successor = self.getSuccessor(gameState)\n features['successorScore'] = self.getScore(successor)\n return features", "def calcFeatureDescr(covarianceMatrix):\n D, V = scplinag.eig(covarianceMatrix)\n # We sort the array with eigenvalues by size (from smallest to largest value)\n D.sort()\n # Get eigenvectors\n e1 = V[2] # eigenvector in direction of largest variance\n e2 = V[1] # second eigenvector, perpend. to e1\n e3 = V[0]\n # Find the eigenvalues\n evalue1 = D[2] # largest\n evalue2 = D[1]\n evalue3 = D[0] # smallest\n\n # Linearity\n lambda1 = (evalue1 - evalue2) / evalue1\n # Planarity\n lambda2 = (evalue2 - evalue3) / evalue1\n # Scattering\n lambda3 = evalue3 / evalue1\n # Omnivariance\n misc1 = np.prod(D)\n lambda4 = pow(misc1,(1.0/3))\n # Anisotropy\n lambda5 = (evalue1 - evalue3) / evalue1\n # Eigentropy\n s = 0\n count = 0\n for elem in D:\n if elem == 0:\n s = 0\n count = 1\n else:\n # Only if bigger than 0\n misc2 = (elem*np.log(elem))\n if misc2 == 0:\n print \"Multiplication result too close to zero.\"\n s = 0\n else:\n s = s + misc2\n lambda6 = (-1)*s \n # Sum of eigenvalues\n lambda7 = sum(D)\n # Change of curvature\n lambda8 = evalue3/sum(D)\n \n featureDescriptor = np.array([lambda1, lambda2, lambda3, lambda4, lambda5, lambda6, lambda7, lambda8])\n return featureDescriptor, count", "def value_head(features):\n with tf.variable_scope('critic', reuse=tf.AUTO_REUSE):\n features = tf.layers.dense(features, units=1, activation=None, name='output')\n return tf.squeeze(features, axis=-1)", "def test_cognitive_complexity(\n get_code_snippet_complexity,\n mode,\n code,\n complexity,\n):\n assert get_code_snippet_complexity(mode(code)) == complexity", "def get_agility(self):\n return self.__agility", "def get_entropy_feature(self, feature, df=None):\n if df is None:\n df = self.df\n target = self.target\n\n target_variables = df[target].unique()\n variables = df[feature].unique()\n entropy = 0\n\n # Aggregate entropy for each unique value in 'feature' feature on each unique value in target feature\n for variable in variables:\n entropy_inner = 0\n for target_variable in target_variables:\n # Number of values of 'variable' in 'feature' feature that matches current target value\n num = len(df[feature][df[feature] == variable][df[target] == target_variable])\n # Number of values of 'variable' in 'feature' feature\n den = len(df[feature][df[feature] == variable])\n # Machine epsilon\n eps = np.finfo(np.float).eps\n fraction_inner = num/(den+eps)\n entropy_inner += -fraction_inner*np.log(fraction_inner+eps)\n fraction = den/len(df)\n entropy += -fraction*entropy_inner\n\n return abs(entropy)", "def information_gain(features, attribute_index, targets):\r\n\r\n possible_feature_values = [0,1]\r\n \r\n possible_classifications = [0,1]\r\n \r\n feature = features[:,attribute_index]\r\n \r\n \r\n number_of_samples = len(feature)\r\n \r\n import math\r\n \r\n \r\n #current_entropy = np.sum([-(len(targets[targets==possible_classification])/number_of_samples)*math.log(len(targets[targets==possible_classification])/number_of_samples, 2) for possible_classification in possible_classifications])\r\n \r\n terms_to_be_summed_for_current_entropy = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_elements_with_this_classification = len(targets[targets==classification])\r\n \r\n p_for_this_classification = number_of_elements_with_this_classification/len(targets)\r\n \r\n if p_for_this_classification != 0:\r\n terms_to_be_summed_for_current_entropy.append(-p_for_this_classification*math.log(p_for_this_classification,2))\r\n else:\r\n terms_to_be_summed_for_current_entropy.append(0)\r\n \r\n current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n \r\n \r\n terms_to_be_summed_for_weighted_entropy = []\r\n \r\n for possible_value in possible_feature_values:\r\n \r\n targets_split_by_feature_value = targets[feature.flatten() == possible_value]\r\n \r\n if len(targets_split_by_feature_value) != 0:\r\n \r\n \r\n weight_of_feature_value = len(targets_split_by_feature_value)/len(targets)\r\n \r\n terms_for_entropy_within_subset = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_subset_elements_with_this_classification = len(targets_split_by_feature_value[targets_split_by_feature_value==classification])\r\n \r\n p_in_subset_for_this_classification = number_of_subset_elements_with_this_classification/len(targets_split_by_feature_value)\r\n \r\n if p_in_subset_for_this_classification != 0:\r\n terms_for_entropy_within_subset.append(-p_in_subset_for_this_classification*math.log(p_in_subset_for_this_classification,2))\r\n else:\r\n terms_for_entropy_within_subset.append(0)\r\n \r\n entropy_within_subset = np.sum(terms_for_entropy_within_subset)\r\n \r\n terms_to_be_summed_for_weighted_entropy.append(weight_of_feature_value*entropy_within_subset)\r\n \r\n weighted_entropy = np.sum(terms_to_be_summed_for_weighted_entropy)\r\n \r\n \r\n #current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n #weighted_entropy = np.sum([(len(feature[feature==possible_value])/number_of_samples)*(len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value]))*math.log((len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value])), 2) for possible_classification in possible_classifications for possible_value in possible_feature_values])\r\n\r\n information_gain = current_entropy - weighted_entropy \r\n \r\n return information_gain", "def getFeatures(self, gameState, action):\n # features = util.Counter()\n # successor = self.getSuccessor(gameState, action)\n # features['successorScore'] = self.getScore(successor)\n # return features\n if self.isOffensive:\n return self.getOffensiveFeatures(gameState, action)\n else:\n return self.getDefensiveFeatures(gameState, action)", "def compute_cost(self, chrome):\n return 1" ]
[ "0.6790964", "0.66218597", "0.641455", "0.6044887", "0.60209787", "0.5925947", "0.591959", "0.5895237", "0.58917314", "0.5765962", "0.56985533", "0.567084", "0.567084", "0.567084", "0.567084", "0.567084", "0.565596", "0.564267", "0.56411445", "0.5639347", "0.5630512", "0.56282103", "0.5624736", "0.5623647", "0.56235474", "0.5609101", "0.5604291", "0.55956507", "0.55951977", "0.55722344" ]
0.7181252
0
r"""compute amplitude phase error compute amplitude phase error of two complex valued matrix
def ampphaerror(orig, reco): amp_orig = np.abs(orig) amp_reco = np.abs(reco) pha_orig = np.angle(orig) pha_reco = np.angle(reco) # print(np.abs(amp_orig - amp_reco)) # print(np.abs(pha_orig - pha_reco)) # print(np.mean(np.abs(amp_orig - amp_reco))) # print(np.mean(np.abs(pha_orig - pha_reco))) amperror = np.mean(np.abs(amp_orig - amp_reco)) phaerror = np.mean(np.abs(pha_orig - pha_reco)) return amperror, phaerror
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_phase_amplitude_damping_error_noncanonical(self):\n error = phase_amplitude_damping_error(0.25, 0.5, 0.3, canonical_kraus=False)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n self.assertEqual(len(circ[0]['params']), 6,\n msg=\"Incorrect number of kraus matrices\")", "def test_phase_amplitude_damping_error_canonical(self):\n error = phase_amplitude_damping_error(0.25, 0.5, 0.3, canonical_kraus=True)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n self.assertEqual(len(circ[0]['params']), 4,\n msg=\"Incorrect number of kraus matrices\")", "def phaseangle(complexr):\n return numpy.arctan2(complexr.imag,complexr.real)", "def _amp_ ( self , x ) :\n v = self.amplitude ( x )\n #\n return complex( v.real () , v.imag () )", "def fringes_morlet_phase(m1,m2, quasi_pi=False):\n ### cross spectrum\n cross_spec = np.conj(m1.cwt)*m2.cwt\n phi = np.angle(cross_spec)\n if quasi_pi:\n phi = np.mod(phi + np.pi/2, 2*np.pi)\n weight = abs(m1.cwt)*abs(m2.cwt)\n phase = np.sum(phi*weight, axis=0)/np.sum(weight, axis=0)\n if quasi_pi:\n phase -= np.pi/2\n return phase", "def phase(self):\r\n\r\n #XXX calcluate this from the standard output, instead of recalculating:\r\n\r\n tseries_length = self.input.data.shape[0]\r\n spectrum_length = self.spectrum.shape[-1]\r\n\r\n phase = np.zeros((tseries_length,\r\n tseries_length,\r\n spectrum_length))\r\n\r\n for i in range(tseries_length):\r\n for j in range(i, tseries_length):\r\n phase[i][j] = np.angle(\r\n self.spectrum[i][j])\r\n\r\n phase[j][i] = np.angle(\r\n self.spectrum[i][j].conjugate())\r\n return phase", "def mean_phase_coherence(phase: np.ndarray, amplitude: np.ndarray) -> float:\n z = amplitude * np.exp(1.0j*phase)\n return np.abs(np.mean(z))", "def envelope(signal2): \n analytic_signal = hilbert(signal2)\n amplitude_envelope = np.abs(analytic_signal) \n return amplitude_envelope", "def cphase(h1, h2):\n\n for h in (h1, h2):\n h.assert_ket_space()\n\n field = h1.base_field\n\n d = h1.dim()\n if h2.dim() != d:\n raise HilbertError('spaces must be of the same dimension')\n\n ret = (h1*h2).O.array()\n for (j, a) in enumerate(h1.index_iter()):\n for (k, b) in enumerate(h2.index_iter()):\n ret[{ h1: a, h1.H: a, h2: b, h2.H: b }] = field.fractional_phase(j*k, d)\n return ret", "def complex_difference(c_1,c_2):\n return c_1 - c_2", "def error_metric(phi_1, phi_2, spherical=False, xpts=None):\n if spherical:\n return sum(abs(phi_1-phi_2)*(xpts**2))/(2.0*sum(abs(phi_1)*(xpts**2)))\n else:\n return sum(abs(phi_1-phi_2))/(2.0*sum(phi_1))", "def phase_dist(phi1,phi2=None):\n shape = phi1.shape\n \n if phi2 is None:\n dist = np.abs(phi1).ravel()\n else:\n dist = np.abs(phi1-phi2).ravel()\n dist[dist>np.pi] = np.pi - dist[dist>np.pi]%np.pi\n return dist.reshape(shape)", "def test_fold_along_delay_amplitude_check_complex():\n delays = np.arange(-10, 11) * units.s\n array = np.ones((1, 10, 21)) * (1 + 1j) * units.mK**2 * units.Mpc**3\n array[:, :, 11:].real *= np.sqrt(2)\n array[:, :, 10].real *= 5\n array[:, :, 11:].imag *= np.sqrt(3)\n array[:, :, 10].imag *= 6\n axis = -1\n errs = np.ones_like(array)\n array_out, errs_out = utils.fold_along_delay(delays, array, errs, axis=axis)\n test_value_array = np.ones((1, 10, 11)).astype(np.complex64)\n test_value_array[:, :, 1:] *= np.mean([np.sqrt(2), 1]) + 1j * np.mean(\n [np.sqrt(3), 1]\n )\n test_value_array[:, :, 0] = 5 + 6j\n assert np.allclose(test_value_array, array_out.value)", "def make_phase(mag, omega, phi, samples, end_time):\n\n array_time = np.linspace(0, end_time, samples)\n\n x = omega * array_time + phi\n\n return to_complex(mag, x), array_time", "def relative_phase_error(x_pred, x_val):\n ref_crossings = zero_crossings(x_val[:, 0])\n pred_crossings = zero_crossings(x_pred[:, 0])\n t_ref = np.mean(np.diff(ref_crossings)) * 2\n t_pred = np.mean(np.diff(pred_crossings)) * 2\n phase_error = t_ref/t_pred - 1\n if len(pred_crossings) < len(ref_crossings) - 2:\n phase_error = np.nan\n return phase_error", "def phase(freqs, p0, p1, p2):\n x = utils.reduce_by_midpoint(freqs)\n phi = p0 + p1 * x + p2 * x ** 2\n return np.exp(1j * phi)", "def app_phase(data_pupil,data_phase,oversize=4):\n return phaseangle(app_complex(data_pupil,data_phase,oversize))", "def find_error(p_s, p_t, A_d,\n A, b):\n def T(x):\n return(A.dot(x) + b)\n\n# TODO: add in w_j here\n second_sum = np.array([np.sqrt(np.linalg.norm(T(p_s[i]) - p_t[i]))\n for i in A_d])\n #error = second_sum.sum() / len(A_d)\n# TODO: the below is temprorary!! Need to figure out something not a hack!!\n# the 1/det(A) is to prevent us from pushing A towards zero\n error = second_sum.sum() / len(A_d) + 1 / np.linalg.det(A) + np.linalg.det(A)\n return(error)", "def eval_hamiltonian(num_atoms, h_poly, (phase0, phase1)):\n # print \"phase=\",(phase0, phase1)\n h = numpy.zeros((num_atoms, num_atoms),\n dtype = numpy.complex64)\n\n for (exp0, exp1) in h_poly:\n # print phase0, phase1, exp0, exp1\n h += h_poly[(exp0, exp1)] * phase0**exp0 * phase1**exp1\n\n return h", "def two_boson_amplitude(matrix, input_mal, output_mal):\n sub = matrix[np.ix_(input_mal, output_mal)]\n out = sub[0, 0] * sub[1, 1] + sub[0, 1] * sub[1, 0]\n mu_factor = _compute_mu_factor(\n _mal_to_mol(input_mal), _mal_to_mol(output_mal))\n return out / mu_factor", "def complex_sum(c_1,c_2):\n return c_1 + c_2", "def app(data_pupil,data_phase,oversize=4):\n complexr=app_complex(data_pupil,data_phase,oversize)\n amp=(abs(complexr)**2)\n return amp", "def c1(adp1, adp2):\n\n def get_axis(adp):\n \"\"\"\n Returns ADP as its three principle axis representation.\n :param adp: List/Array type of length 6.\n :returns: List of three arrays of length 3.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n w, v = np.linalg.eig(adp)\n return [np.array((w[j] * v[:, j]).flatten().tolist()[0]) for j \\\n in xrange(3)]\n\n adp1_axis = get_axis(adp1)\n adp2_axis = get_axis(adp2)\n\n val = 0\n for i in xrange(3):\n addval = abs(norm(adp1_axis[i] - adp2_axis[i]))\n addval = addval * abs((1 - abs(np.dot(adp1_axis[i], adp2_axis[i]))))\n val += addval\n return val", "def phase_locking_value(z1, z2):\n\n assert len(z1) == len(z2), \"Signals must be same length! len(z1)=%d, len(z2)=%d\" % (len(z1), len(z2))\n N = len(z1)\n theta = np.angle(z2) - np.angle(z1)\n\n p = np.exp(complex(0, 1)*theta)\n plv = np.abs(p.sum()) / N\n\n return plv", "def exp(q):\n normv = amplitude(q[:,:3])\n res = np.zeros_like(q)\n res[:,3:] = np.exp(q[:,3:]) * np.cos(normv)\n res[:,:3] = np.exp(q[:,3:]) * q[:,:3] / normv \n res[:,:3] *= np.sin(normv)\n return res", "def method3(self):\n cres=0.\n Ux_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.ALDM[ix ,iy, : , : ]\n mat2=self.ALDM[(ix%self.kS.Nx)+1, iy, : , : ]\n mat3=self.ALDM[ix ,(iy%self.kS.Ny)+1, : , : ]\n \n Ux_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[self.NL-1:,self.NL-1:])\n Uy_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[self.NL-1:,self.NL-1:])\n\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_aloc[ix,iy]*Uy_aloc[ix+1,iy]/Ux_aloc[ix,iy+1]/Uy_aloc[ix,iy])\n cres+=ftemp/2./pi/1j\n \n return cres.real\n #End of method3", "def analyze_orbit_corrector(OC1, OC2, beamline, phase_beg):\n\n M = np.identity(4)\n OC_parameters = np.zeros(4)\n\n for element in beamline:\n M = np.dot(element.M1, M)\n\n # Since the X and Y are decoupled, we can treat them separately.\n M_x = M[0:2, 0:2]\n M_y = M[2:4, 2:4]\n\n L1 = [[OC1.length/2], [1]]\n L2 = [[OC2.length/2], [1]]\n\n M_OC1 = np.array(OC1.M1)[0:2, 0:2]\n M_OC2 = np.array(OC2.M1)[0:2, 0:2]\n\n # The following part solve the cx_1 and cx_2\n M1_x = np.linalg.multi_dot([M_OC2, M_x, L1])\n M2_x = np.linalg.multi_dot([M_OC2, M_x, M_OC1])\n M_OC_x = np.hstack((M1_x, L2))\n\n OC_parameters[0:2] = -np.linalg.multi_dot([np.linalg.inv(M_OC_x), M2_x, phase_beg[0:2]])\n # The end of the X-part\n\n # The following part solve the cy_1 and cy_2\n M1_y = np.linalg.multi_dot([M_OC2, M_y, L1])\n M2_y = np.linalg.multi_dot([M_OC2, M_y, M_OC1])\n M_OC_y = np.hstack((M1_y, L2))\n\n OC_parameters[2:4] = -np.linalg.multi_dot([np.linalg.inv(M_OC_y), M2_y, phase_beg[2:4]])\n # The end of the Y-part\n\n\n return OC_parameters", "def phase(self):\n return np.arctan(np.sum(np.imag(self.values)) / np.sum(np.real(self.values)))", "def real_imag_to_mag_phase(real_imag):\n return np.abs(real_imag), np.angle(real_imag, deg=True)", "def compose_spectra(amplitude, phase, duplicate=True, **kwargs):\n if duplicate:\n amplitude = deepcopy(amplitude)\n phase = deepcopy(phase)\n s = amplitude * np.exp(np.array([1.j]) * phase)\n return s" ]
[ "0.57301235", "0.5672069", "0.5634887", "0.55991733", "0.5460378", "0.5439556", "0.5384441", "0.53555703", "0.53211105", "0.53139055", "0.53101045", "0.5309513", "0.5285496", "0.5270891", "0.5257104", "0.5252667", "0.5211097", "0.5206943", "0.52064764", "0.519271", "0.5184854", "0.5184649", "0.51529115", "0.5139593", "0.5118987", "0.51185954", "0.5081803", "0.50800335", "0.5079135", "0.50607926" ]
0.63241947
0
Print 6 power 3
def six_cubed(): print(math.pow(6, 3))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def six_cubed():\n print(math.pow(6,3))", "def print_pow():\n a = get_inp_pow()\n n = get_inp_pow('power')\n print(a, \"^\", n, \" = \", pow(a, n), sep='')", "def print_power(x):\r\n if(type(x)!=int):\r\n if(power(x)==1 or power(x)==0):\r\n print(calc_power(x))\r\n else:\r\n print(\"{0}^{1}\".format(base(x),power(x)))\r\n else:\r\n print(x)", "def main():\n print_powers_of()\n print_even_powers_of_in_reverse()", "def myfunc(num):\n print(num**2 - 3*num + 7)", "def print_powers_of():\n base = int(input(\"Please enter a positive integer to serve as the base: \"))\n power = int(input(\"Please enter a positive integer to serve as the highest power: \"))\n if base >= 0 and power >= 0:\n for x in range(0, power + 1, 1):\n result = base ** x\n print(str(base) + \" ^ \" + str(x) + \" = \" + str(result))\n else:\n print(\"ERROR: Both values must be POSITIVE INTEGERS.\")", "def MyPower2(self):\n print ('%s' % power + \" : \" + name + \"'s power sucks it twice! D:\")", "def cube(n):\n return n**3", "def __pow__(self,n):\r\n\t\t\r\n\t\t# take power\r\n\t\tp = self.power(n)\r\n\t\t\r\n\t\treturn p", "def cube(num):\n return num ** 3", "def power_list():", "def test_power_simple(self):\r\n self.assertEquals(preview.latex_preview('2^3^4'), '2^{3^{4}}')", "def MyPower1(self):\n print ('%s' % money + \" : \" + name + \"'s power sucks it! D:\")", "def calculateCrypt(asci: int, e: int, n: int) -> int:\n return pow(int(asci),e,n)", "def print_even_powers_of_in_reverse():\n base = int(input(\"Please enter a positive integer to serve as the base: \"))\n power = int(input(\"Please enter a positive integer to serve as the highest power: \"))\n if base >= 0 and power >= 0:\n if power % 2 == 1:\n power -= 1\n for x in range(power, -1, -2):\n if x >= 0:\n result = base ** x\n print(str(base) + \" ^ \" + str(x) + \" = \" + str(result))\n else:\n print(\"ERROR: Both values must be POSITIVE INTEGERS.\")", "def multiply(num):\n print(int(num * 4))", "def TL_power(self,power):\n self.write(self.headStr('TL')+'TPDB %d',power)", "def print_square(num):\n print(\"Square: {}\".format(num * num))", "def print_square(num):\n print(\"Square: {}\".format(num * num))", "def print_cube(num):\n print(\"Cube: {}\".format(num * num * num))", "def print_cube(num):\n print(\"Cube: {}\".format(num * num * num))", "def print_factorial():\n n = get_inp_factorial()\n print(n, \"! = \", factorial(n), sep='')", "def __ipow__(self,n):\r\n\t\t\r\n\t\treturn self.power(n)", "def main():\n result = 0\n for n in range(1, 1001):\n result += n**n\n\n result = str(result)\n answer = result[len(result)-10::]\n\n print \"answer: \" + answer", "def fatorial(n, show=False):\n f = 1\n for c in range(n, 0, -1):\n if show:\n if c >1:\n print(f'{c} x ', end='')\n else:\n print(' = ', end='')\n f *= c\n return f", "def powerize(n, p):\n return sum(int(d)**p for d in str(n))", "def test_power_parens(self):\r\n self.assertEquals(preview.latex_preview('2^3^(4+5)'), '2^{3^{4+5}}')", "def multtable(n):\n for i in range(1,n+1):\n for x in range(1, n+1):\n result = print(i * x, end =' ')\n print ()\n return result", "def cube(x):\n return x ** 3", "def cube(x):\n return x ** 3" ]
[ "0.80794406", "0.739419", "0.66700226", "0.6589733", "0.6485324", "0.638416", "0.6293852", "0.62722826", "0.61774296", "0.61550194", "0.61198187", "0.59969866", "0.5990775", "0.59640026", "0.59278244", "0.59264386", "0.59198284", "0.5873405", "0.5873405", "0.5857948", "0.5857948", "0.5848499", "0.5839858", "0.5827895", "0.578962", "0.57890266", "0.5765331", "0.57578653", "0.57136166", "0.57136166" ]
0.80480736
1
Print the hypotenuse of straight angled triangle with 3 and 5 rib lengths
def hypotenuse(): print(math.sqrt(5*5 + 3*3))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def triangular_area():\n print(1*1/2, 2*2/2, 3*3/2, 4*4/2, 5*5/2, 6*6/2, 7*7/2, 8*8/2, 9*9/2,\n 10*10/2)", "def hypotenuse():\n print(math.sqrt(5**2 +3**2))", "def triangle(self):\n \n R = Householder.triangle_operation(self)[0] \n \n return(R.round(10))", "def triangle_shape(height):\n mot = str()\n if height == 0:\n return str()\n else :\n for i in range (height):\n esp = height-1-i\n mot = mot+ esp*\" \" + (2*i+1)*\"x\" +esp*\" \"\n if i!=height-1:\n mot = mot+ \"\\n\"\n return(mot)", "def ex1() :\r\n print(\" - The right angled triangle - \")\r\n width = float(input(\"Enter the width of the triangle: \"))\r\n height = float(input(\"Enter the height of the triangle: \"))\r\n if width < 0 or height < 0 :\r\n print(\"Value entered can't be less than zero.\")\r\n else :\r\n hypo = (width * width) + (height * height) #hypotenuse calculation\r\n print(\"The hypotenuse of the triangle is: \", round(sqrt(hypo), 1)) #up to 1 decimal figures\r\n angleA = degrees(atan(height / width)) #calculating angleA with the functions degrees and atan\r\n angleB = 90 - angleA #we know one angle is 90 degrees, the other one we just calculated, so subtract those to get the 3rd angle\r\n print(\"The angles of the triangle are the following: 90 degrees, \",\r\n round(angleA,1), \" degrees, and \", round(angleB, 1), \" degrees.\")", "def triangle(t, s, l, l2):\n\tan = 360/s\n\tang = (180 - an)/2\n\tfor i in range (3):\n\t\tif i%2 == 0:\n\t\t\tfd(t, l2)\n\t\telse:\n\t\t\tfd(t, l)\n\t\tlt(t, 180 - ang)\n\tlt (t, ang)", "def triangle(height):\n for row in range(height):\n for column in range(1,row+2):\n print(CHAR, end = '')\n print()", "def triangle(self, freq: int, /) -> None:", "def triangle(halfSideLength = 15, robotHeight = -90):\n# ^ \n# / \\ \n# / \\ \n# / \\ \n# /_______\\\n# \n# | a | \n# a = halfSideLength\n\n hHalf = (halfSideLength * m.sqrt(3)/2)/2\n\n posTriangle = [\n [-hHalf,halfSideLength,robotHeight,0,0,0,'mov'],\n [-hHalf,-halfSideLength,robotHeight,0,0,0,'lin'],\n [hHalf,0,robotHeight,0,0,0,'lin'],\n [-hHalf,halfSideLength,robotHeight,0,0,0,'lin'],\n [0,0,-127,0,0,0,'mov']\n ]\n\n return posTriangle", "def aireTriangle(b, h):\n return (b * h) / 2", "def triangle(t):\n if int(t) % 2 == 0:\n y = t - int(t)\n else:\n y = 2 - (t - int(t) + 1)\n return abs(y)", "def triangle(num):\n a = list(numbers(num))\n m = len(' '.join(map(str, a[-1])))\n for x in a:\n print(' '.join(map(str, x)).center(m)+'\\n')", "def draw_triangle():\r\n turtle.forward(100)\r\n turtle.left(120)\r\n turtle.forward(100)\r\n turtle.left(120)\r\n turtle.forward(100)\r\n turtle.left(120)", "def triangle(length=40.0, r=3.175 / 2):\n\t# equilateral triangle:\n\ta = np.array([0, 0])\n\tb = np.array([length, 0])\n\tc = np.array([length / 2, length * math.sin(math.pi / 3)])\n\ttri_pts = PolyLine([a, b, c, a])\n\toffs_pts = addOffset(tri_pts, r)\n\ttri_pts = centerObjects(offs_pts, tri_pts)\n\treturn tri_pts, offs_pts", "def for_isosceles_triangle():\r\n\r\n for row in range(7):\r\n print(' '*(7-row), '* '*row)", "def area_triangle(w, h):\n return w * h / 2", "def print_triangles(triangle_list):\n triangles.sort(key=Triangle.get_square, reverse=True)\n result = \"==========Triangles list:==========\"\n for tr in range(len(triangle_list)):\n result += (\"\\n\" + str(tr + 1) +\n \". [Triangle {}]: {} cm\".format(triangles[tr].name,\n triangles[tr].square))\n return result", "def triangle(n):\n return n*(n+1)/2", "def triangle(n):\n return (n * (n + 1)) / 2", "def draw_triangle(tup):\n x, y, z = tup[0], tup[1], tup[2]\n t_draw = turtle.Turtle()\n for index in range(3):\n t_draw.forward()", "def triArea(base,height):\n return base * height /2", "def _triangleSymbol(angle):\n s = QtGui.QPainterPath()\n s.moveTo(0.5 * np.cos(-angle), 0.5 * np.sin(-angle))\n s.lineTo(0.5 * np.cos(-angle + 0.75 * np.pi),\n 0.5 * np.sin(-angle + 0.75 * np.pi))\n s.lineTo(0.5 * np.cos(-angle - 0.75 * np.pi),\n 0.5 * np.sin(-angle - 0.75 * np.pi))\n s.closeSubpath()\n return s", "def print_triangular_numbers(n):\r\n\r\n\tfor i in range(1, n+1):\r\n\t\tsum = int((i / 2)*(1 + i))\r\n\t\tprint(i, \"\\t\", sum)", "def draw_triangle(x, y, length=10):\n radius = length/math.sqrt(3)\n my_turtle.penup()\n my_turtle.goto(x, y+radius)\n my_turtle.pendown()\n my_turtle.right(60)\n for i in range(3):\n my_turtle.forward(length)\n my_turtle.right(120)\n\n my_turtle.left(60)\n my_turtle.penup()", "def test_case_05_not_legal_triangle(self):\n self.__assert_equals_test_case([(4, 6, 11)], 'NotATriangle')", "def draw_equitriangle(t,sz):\r\n\r\n\tdraw_poly(t, 3, sz)", "def area_triangle_sss(side1,side2,side3):\n semi_perim=(side1+side2+side3)/2.0\n return math.sqrt(semi_perim*\n (semi_perim - side1)*\n (semi_perim - side2)*\n (semi_perim - side3)\n )", "def triangle(n: int) -> int:\n return int(n * (n + 1) / 2)", "def trapezoid_area(lower, leg , upper):\n area = (((upper+lower)/2)*leg)\n return area", "def while_isosceles_triangle():\r\n row = 0\r\n while row<7:\r\n print(' '*(7-row), '* '*row)\r\n row += 1" ]
[ "0.716252", "0.69258684", "0.68606097", "0.68530416", "0.6835748", "0.67376614", "0.66448975", "0.66325754", "0.65477556", "0.65080655", "0.6503776", "0.64483434", "0.64430314", "0.6440897", "0.6418103", "0.6221835", "0.6206969", "0.6188036", "0.6169473", "0.6046872", "0.6028476", "0.6021882", "0.5999444", "0.59967345", "0.5980214", "0.597689", "0.59318024", "0.5930894", "0.59267855", "0.5914845" ]
0.70707786
1
Removes the citations that consist of a pair of brackets having a substring containing at least one digit inside them.
def remove_citations(text: str) -> str: text = re.sub("\[[a-zA-Z]\]", "", text) return re.sub(r"\[(\s|\w)*\d+(\s|\w)*\]", "", text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _remove_between_square_brackets(text):\n return re.sub('\\[[^]]*\\]', '', text)", "def clean_all_brackets(text):\n if \"[\" in text and \"]\" in text:\n text = delete_first_brackets(text)\n return clean_all_brackets(text)\n else:\n return text", "def clean_newick_string(self, newick_str):\n str_buff = []\n final_bracket, cur_len = 0, 0\n for data in self.separate_square_comments(newick_str):\n if data[0] != '[':\n clean_data = ''.join(data.split())\n str_buff.append(clean_data)\n brck_ind = clean_data.rfind(')')\n if brck_ind != -1:\n final_bracket = cur_len + brck_ind\n cur_len += len(clean_data)\n else:\n str_buff.append(data)\n cur_len += len(data)\n return ''.join(str_buff), final_bracket", "def stripBrackets(b):\n\n while b.startswith(b\"> \"):\n b = b[2:]\n return b", "def strip_all_unbalanced_parens(s):\n c = strip_unbalanced_parens(s, '()')\n c = strip_unbalanced_parens(c, '<>')\n c = strip_unbalanced_parens(c, '[]')\n c = strip_unbalanced_parens(c, '{}')\n return c", "def strip_brackets(text: str) -> str:\n\t\tpieces = [\n\t\t\t\t('(', ')'), ('[', ']'), ('[', ']'), ('{', '}'), ('<', '>'),\n\t\t\t\t(Chars.lshell, Chars.rshell), (Chars.langle, Chars.rangle),\n\t\t\t\t(Chars.ldparen, Chars.rdparen), (Chars.ldbracket, Chars.rdbracket), (Chars.ldangle, Chars.rdangle), (Chars.ldshell, Chars.rdshell)\n\t\t\t]\n\t\treturn StringTools.strip_paired(text, pieces)", "def removeLabels(str2: str):\n str2_arr = []\n last_seen_bracket = []\n for char in str2:\n if char == \"(\" or char == \"[\":\n last_seen_bracket.append(char)\n str2_arr.append(\"-\")\n elif char == \")\" or char == \"]\":\n if len(last_seen_bracket) >= 1:\n last_seen_bracket.pop()\n else:\n continue\n elif char == \"-\" or char == '$':\n continue\n elif len(last_seen_bracket) >= 1:\n continue\n else:\n str2_arr.append(char)\n\n if len(str2_arr) > 1:\n for i in range(len(str2_arr)):\n try:\n if str2_arr[i] == \"-\" and str2_arr[i - 1] == \"-\":\n str2_arr.pop(i - 1)\n # Some segments have dual purpose, so this removes dual dashes that result from this\n except IndexError:\n continue\n\n if str2_arr[len(str2_arr) - 1] == \"\\n\":\n str2_arr.pop()\n\n return \"\".join(str2_arr).rstrip(\"-\").lstrip(\"-\")", "def removeParentheses(text):\n\t#print text\n\tno_parentheses = re.sub(r'\\s?\\([^)]*\\)', '', text)\n\treturn no_parentheses", "def removeOuterParentheses(self, S):\n _open, _close = \"(\", \")\"\n oc, cc = 0, 0\n part, res = \"\", \"\"\n\n for i, p in enumerate(S):\n if p == _open:\n oc += 1\n elif p == _close:\n cc += 1\n\n part += p\n\n if oc == cc:\n res += part[1:-1]\n part = \"\"\n\n return res", "def remove_numbers(self, doc):\n regex = re.compile('[%s]' % re.escape(self.numbers))\n return regex.sub('', doc)", "def filter_references(tokens):\n toks = [token for token in tokens if not re.match(r'\\[\\d{1,2}\\]',token)]\n return toks", "def strip_unbalanced_parens(s, parens='()'):\n start, end = parens\n if not start in s and not end in s:\n return s\n\n unbalanced = []\n unbalanced_append = unbalanced.append\n\n stack = []\n stack_append = stack.append\n stack_pop = stack.pop\n\n for i, c in enumerate(s):\n if c == start:\n stack_append((i, c,))\n elif c == end:\n try:\n stack_pop()\n except IndexError:\n unbalanced_append((i, c,))\n\n unbalanced.extend(stack)\n pos_to_del = set([i for i, c in unbalanced])\n cleaned = [c if i not in pos_to_del else ' ' for i, c in enumerate(s)]\n return type(s)('').join(cleaned)", "def strip_brackets_and_quotes(text: str) -> str:\n\t\tpieces = [\n\t\t\t\t('(', ')'), ('[', ']'), ('[', ']'), ('{', '}'), ('<', '>'),\n\t\t\t\t(Chars.lshell, Chars.rshell), (Chars.langle, Chars.rangle),\n\t\t\t\t('`', '`'),\n\t\t\t\t(Chars.lsq, Chars.rsq), (Chars.ldq, Chars.rdq), (\"'\", \"'\"), ('\"', '\"'),\n\t\t\t\t(Chars.ldparen, Chars.rdparen), (Chars.ldbracket, Chars.rdbracket), (Chars.ldangle, Chars.rdangle), (Chars.ldshell, Chars.rdshell)\n\t\t\t]\n\t\treturn StringTools.strip_paired(text, pieces)", "def has_balanced_parens(string):", "def remove_punc(self, r):\n c = ''\n useless = [',', '+', '-', '*', '/', '=', ',', '.']\n for d in r:\n if d not in useless:\n c += d\n brackets = ['(', ')', '[', ']', '{', '}', '<', '>']\n d = str(c)\n c = ''\n brac_cnt = 0\n for i in d:\n if i == '(' or i == '[' or i in '{':\n brac_cnt += 1\n if i == ')' or i == ']' or i == '}':\n brac_cnt -= 1\n if i not in brackets:\n if brac_cnt <= 0:\n c += i\n return c", "def strip_brackets(text) -> str:\n if text is None:\n return \"\"\n\n if text.startswith(\"[\") and text.endswith(\"]\"):\n return text[1:len(text) - 1]\n\n return text", "def sparse_substrings(text: str):\n groups = re.findall(r\"\\D{1,}\", text)\n\n out = text\n # print(f\"{out} - Cleanup starting...\")\n for substring in groups:\n sparsed = f\" {squash(substring)} \"\n out = out.replace(substring, sparsed)\n # print(f\"{out} - (Found r: '{result}' -> '{sparsed}')\")\n\n # print(f\"{out} - after sparse \")\n return out", "def fix_errors_in_citation(citation):\n result = regex.sub(r\"\\s+\", \" \", citation)\n result = regex.sub(r\"§(?=\\d)\", \"§ \", result)\n result = regex.sub(r\",\\sbis\\s\", \" bis \", result)\n return result", "def check_matching_brackets(s, opening=\"([{\", closing=\")]}\"):\n\n stack = Stack()\n\n for item in s:\n # Stores any opening character\n if item in opening:\n stack.push(item)\n else:\n # if a closing bracket is found and we try to remove the matching opening bracket from the top of the stack\n # in case the top of the stack is not a matching bracket, the string is not properly nested\n if stack.length() < 1 or stack.pop() is not opening[closing.index(item)]:\n return 0\n\n return 0 if stack.length() > 0 else 1", "def delete_bracket(self, nid):\n if 0 < nid < len(self.__brackets):\n del self.__brackets[nid]\n return self", "def parse_tags(source):\n unmatched_count = 0\n start_pos = 0\n opened = False\n open_pos = 0\n cur_pos = 0\n\n finished = []\n segments = []\n\n for character in source:\n #scan for mismatched parenthesis:\n if character == '(':\n unmatched_count += 1\n if not opened:\n open_pos = cur_pos\n opened = True\n\n if character == ')':\n unmatched_count -= 1\n\n if opened and unmatched_count == 0:\n clean = source[start_pos:open_pos]\n clean = clean.strip()\n if clean:\n finished.extend(clean.split())\n\n segment = source[open_pos:cur_pos+1]\n #segments.append(segment)\n \n #get rid of bounding parentheses:\n pruned = segment[1:-1]\n group = pruned.split()\n finished.append(group)\n\n opened = False\n start_pos = cur_pos+1\n \n cur_pos += 1\n\n assert unmatched_count == 0\n\n if start_pos != cur_pos:\n #get anything that was left over here\n remainder = source[start_pos:cur_pos].strip()\n finished.extend(remainder.split())\n \n ## #now check on recursion:\n ## for item in segments:\n ## #get rid of bounding parentheses:\n ## pruned = item[1:-1]\n ## if recurse:\n ## results = parse_tags(pruned, recurse)\n ## finished.expand(results)\n ## else:\n ## finished.append(pruned.strip())\n \n return finished", "def get_country_codes(prices):\n # your code here\n \n#_________________# 1. Break the string into a list. \n prices = prices.split('$') # breaks the list into a list of elements.\n \n#_________________# 2. Manipulate the individual elements.\n\n #_________________# A. Remove integers\n# nation = prices[0], prices[1]\n length = len(prices)\n\n for nation in (prices):\n nation == prices[0:]\n print(nation)\n #_________________# B.\n \n nations = []\n for each_char in (0, prices, 2):\n if each_char in prices[0:2]:\n nation = each_char\n nations = list(nations)\n # lastitem = nations.pop()\n print(nations)", "def _remove_digit_blocks(self, text: str) -> str:\n return re.sub(r\"\\b\\d+\\b\", \" \", str(text))", "def is_substring_enclosed_in_brackets(sub, string):\n # to get (...)\n pattern = re.compile(\"\\([^\\)]*\\)\")\n # to get: (...(...)...)\n pattern2 = re.compile(\"\\([^\\)]*\\([^\\)]*\\)[^\\)]*\\)\")\n # to get: (...(...(...)...)...)\n pattern3 = re.compile(\"\\([^\\)]*\\([^\\)]*\\([^\\)]*\\)[^\\)]*\\)[^\\)]*\\)\")\n\n string = str(string)\n search = re.findall(pattern, string)\n search2 = re.findall(pattern2, string)\n search3 = re.findall(pattern3, string)\n\n search3 = set(search + search2 + search3)\n if any(str(sub) in s for s in search3): # or any(str(sub) in s for s in search2):\n return True\n return False", "def remove_stray_braces(self, tex):\n num_lefts, num_rights = [\n tex.count(char)\n for char in \"{}\"\n ]\n if num_rights > num_lefts:\n backwards = tex[::-1].replace(\"}\", \"\", num_rights - num_lefts)\n tex = backwards[::-1]\n elif num_lefts > num_rights:\n tex = tex.replace(\"{\", \"\", num_lefts - num_rights)\n return tex", "def clean_numbers(self, x):\n\n # remove \"th\" after a number\n matches = re.findall(r'\\b\\d+\\s*th\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*th\\b', \" \", x)\n\n # remove \"rd\" after a number\n matches = re.findall(r'\\b\\d+\\s*rd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*rd\\b', \" \", x)\n\n # remove \"st\" after a number\n matches = re.findall(r'\\b\\d+\\s*st\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*st\\b', \" \", x)\n\n # remove \"nd\" after a number\n matches = re.findall(r'\\b\\d+\\s*nd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*nd\\b', \" \", x)\n\n # replace standalone numbers higher than 10 by #\n # this function does not touch numbers linked to words like \"G-20\"\n matches = re.findall(r'^\\d+\\s+|\\s+\\d+\\s+|\\s+\\d+$', x)\n if len(matches) != 0:\n x = re.sub('^[0-9]{5,}\\s+|\\s+[0-9]{5,}\\s+|\\s+[0-9]{5,}$', ' ##### ', x)\n x = re.sub('^[0-9]{4}\\s+|\\s+[0-9]{4}\\s+|\\s+[0-9]{4}$', ' #### ', x)\n x = re.sub('^[0-9]{3}\\s+|\\s+[0-9]{3}\\s+|\\s+[0-9]{3}$', ' ### ', x)\n x = re.sub('^[0-9]{2}\\s+|\\s+[0-9]{2}\\s+|\\s+[0-9]{2}$', ' ## ', x)\n # we do include the range from 1 to 10 as all word-vectors include them\n # x = re.sub('[0-9]{1}', '#', x)\n\n return x", "def valid_parenthesis(string):\n if len(string) % 2:\n print(\"Invalid Parenthesis\")\n return\n stack = []\n bracket_hash = {\"}\": \"{\", \")\": \"(\", \"]\": \"[\"}\n\n for bracket in string:\n if bracket in bracket_hash:\n if stack:\n top = stack.pop()\n if bracket_hash[bracket] is not top:\n print(\"Invalid Parenthesis\")\n return\n else:\n top = \"\"\n else:\n if bracket in bracket_hash.values():\n stack.append(bracket)\n else:\n print(\"Invalid String\")\n return\n\n if not stack:\n print(\"Valid Parenthesis\")\n return", "def paren_references(article,word):\r\n all_references = ''\r\n # extract text inside parentheses containing the word\r\n pattern = r'\\(([^\\)]*\\b{}\\b.*?)\\)'.format(word)\r\n #[^5] will match any character except '5'\r\n matches = re.findall(pattern,article,re.IGNORECASE|re.DOTALL)\r\n if matches:\r\n all_references = '\\n'.join(matches)\r\n return all_references", "def parentheses_are_uneven(input_string):\n pcounter = 0\n for char in input_string:\n if char == '(':\n pcounter += 1\n elif char == ')':\n pcounter -= 1\n if pcounter != 0:\n return False\n else:\n return True", "def remove_non_narration_strings(transcription_row):\n sentence = transcription_row[\"text\"]\n # filter out (CAPITALIZED WORD) and \"CAPITALIZED WORD\". These are not enunciated in the voiceover, but rather\n # indicate noise/words from the original audio track that get interspersed into the voice\n # Might contain special characters\n # Update: Capitalization etc are inconsistent. But all follow the pattern \"text\" and (text). Remove these instead\n crosstalk_pattern = '\\(.*?\\)|\\\".*?\\\"'\n # crosstalk_findings = re.findall(crosstalk_pattern, sentence)\n # print(\"Crosstalk: \"+str(crosstalk_findings))\n sentence = re.sub(crosstalk_pattern, \" \", sentence)\n # filter out ' s ' ' Ss ' etc\n s_pattern = r'\\b[sS]+\\b'\n s_pattern_findings = re.findall(s_pattern, sentence)\n # if len(s_pattern_findings) > 0:\n # print(\"S-pattern: \"+str(s_pattern_findings))\n sentence = re.sub(s_pattern, \" \", sentence)\n transcription_row[\"text\"] = sentence\n return transcription_row" ]
[ "0.627086", "0.6270785", "0.605527", "0.5885382", "0.58816636", "0.5716584", "0.55261326", "0.54964125", "0.5492156", "0.5491435", "0.54455703", "0.5420037", "0.54059094", "0.5369905", "0.5256885", "0.52483344", "0.5220134", "0.51920635", "0.51877683", "0.51309067", "0.512497", "0.512404", "0.51183903", "0.51106954", "0.509883", "0.5080346", "0.50136524", "0.49949715", "0.497915", "0.49746105" ]
0.6384374
0
Generates a JSON Web Token that stores this user's ID and has an expiry date set to 60 days into the future.
def _generate_jwt_token(self): import jwt from datetime import datetime, timedelta from django.conf import settings dt = datetime.now() + timedelta(days=60) token = jwt.encode({ 'id': self.pk, 'username': self.username, 'exp': int(dt.strftime('%s')), }, settings.SECRET_KEY, algorithm='HS256') # print(token) return token
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_jwt_token(self):\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'exp': int(dt.strftime('%s'))\n }, settings.SECRET_KEY, algorithm='HS256')\n\n return token.decode('utf-8')", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'STULOGINID': self.STULOGINID, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def generateAuthToken(self):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=0, minutes=30),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n return jwt.encode(payload, current_app.config['SECRET_KEY'], algorithm='HS256').decode()\n except Exception as error:\n print(error)\n return error", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'loginid': self.loginid, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def generate_auth_token(self, expiration):\n ser = Serializer(current_app.config['SECRET_KEY'],\n expires_in=expiration)\n return ser.dumps({'id': self.id}).decode('utf-8')", "def generate_auth_token(self):\n token = Serializer(\n app.config['API_SECRET_KEY'],\n expires_in=app.config['JWT_TOKEN_EXPIRATION']\n )\n return token.dumps({'id': self.id})", "def generate_jwt(self):\n\n # Generate a random token\n random_token = secrets.token_hex(12)\n\n # Update database\n self.user_in_db.update({'token': random_token})\n User.users_db.put(self.user_in_db)\n\n # Create timestamps for the token\n generated = time.time()\n expires = generated + TWO_WEEKS\n\n # Return the generated jwt\n return manage_tokens.encode({\n 'email': self.email,\n 'token': random_token,\n 'generated': generated,\n 'expires': expires,\n })", "def token_generate(self, user_id):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=200),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n encoded_token = jwt.encode(\n payload, current_app.config['SECRET_KEY'], algorithm='HS256'\n )\n return encoded_token\n\n except Exception:\n return str(Exception)", "def token(self):\n payload = {\n 'id': str(self.id),\n 'username': self.username,\n \"exp\": datetime.now() + timedelta(days=2)\n }\n return jwt.encode(payload, SECRET_KEY).decode('utf-8')", "def token(self):\n token = jwt.encode(\n {\n \"id\": self.pk,\n \"username\": self.get_full_name,\n \"email\": self.email,\n \"iat\": datetime.utcnow(),\n \"exp\": datetime.utcnow() + timedelta(minutes=int(os.getenv('TIME_DELTA')))\n },\n settings.SECRET_KEY, algorithm='HS256').decode()\n return token", "def generate_token(self, user_id):\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=10),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n # create the byte string encoded token using payload and SECRET key\n jwt_string = jwt.encode(\n payload,\n SECRET_KEY,\n algorithm='HS256'\n )\n return jwt_string\n except Exception as e:\n # return an error in string format if an exception occurs\n return str(e)", "def generate_token_for_user(user: User, expiration: datetime.timedelta=datetime.timedelta(days=7)):\n\n return generate_token({'id': user.id}, expiration)", "def generate_access_token(user_id, is_expired=False):\n\n iat = datetime.datetime.utcnow()\n\n return jwt.encode({\n 'sub': user_id, # Subject of this token\n 'iat': iat, # Issued at\n 'exp': iat + datetime.timedelta(hours=1) # Expired at\n if not is_expired\n else iat - datetime.timedelta(minutes=5)\n }, config.SECRET_KEY)", "def generate_token(self):\n\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=45),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n # create the byte string token using the payload and the SECRET key\n jwt_string = jwt.encode(\n payload,\n app.config.get('SECRET_KEY'),\n algorithm='HS256'\n )\n return jwt_string\n\n except Exception as exception:\n # return an error in string format if an exception occurs\n return str(exception)", "def generate_token(self):\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=100),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n # create the byte string token using the payload and the SECRET key\n jwt_bytes = jwt.encode(\n payload,\n os.environ.get('SECRET', 'test'),\n algorithm='HS256'\n )\n return jwt_bytes.decode('utf-8')\n except Exception as e:\n # return an error in string format if an exception occurs\n raise Exception(str(e))", "def generate_token(usr):\n token = jwt.encode({\"user\":usr, \"exp\":datetime.datetime.utcnow()\n + datetime.timedelta(minutes=30)}, KEY)\n user = User.update(token=token).where(User.username == usr)\n user.execute()\n return token", "def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()", "def get_auth_token():\n token = g.user.generate_auth_token(24*3600)\n return jsonify({'user_id': g.user.id, 'token': token.decode('ascii')})", "def generate_refresh_token(self):\n return gen_api_key(length=self.token_length)", "def _generate_jwt_token(self):\n payload = jwt_payload_handler(self)\n token = jwt_encode_handler(payload)\n return token", "def encode_auth_token(self, id):\n payload = {\n \"exp\": datetime.utcnow()\n + timedelta(\n days=current_app.config.get(\"TOKEN_EXPIRATION_DAYS\"),\n seconds=current_app.config.get(\"TOKEN_EXPIRATION_SECONDS\"),\n ),\n \"iat\": datetime.utcnow(),\n \"sub\": id,\n }\n return jwt.encode(\n payload, current_app.config.get(\"SECRET_KEY\"), algorithm=\"HS256\"\n )", "def generate_auth_token(self):\n s = Serializer(app.config['SECRET_KEY'])\n return s.dumps({'email': self.email})", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def generate_token(user: dict):\n\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1),\n 'iat': datetime.datetime.utcnow(),\n 'user': user\n }\n token = jwt.encode(\n payload,\n os.getenv('SECRET_KEY'),\n algorithm='HS256'\n )\n return token.decode('UTF-8')", "def post(self):\n _purge_expired_user_tokens()\n\n request_dict = get_json_and_verify_params({\n 'description': {'type': str, 'optional': True},\n 'expiration_date': {'optional': True},\n })\n\n expiration_date = request_dict.get('expiration_date')\n if expiration_date:\n expiration_date = parse_utc_datetime(\n expiration_date, timezone=\"UTC\")\n\n return current_user.create_auth_token(request_dict.get('description'),\n expiration_date)", "def get_auth_token_teacher():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def encode_token(userId):\n token = jwt.encode({'userId': userId, 'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=20)},\n secret_key).decode('utf-8')\n return token", "def create_token(self):\n ts_datetime = self.logged_at or self.created_at\n ts = int(mktime(ts_datetime.timetuple()))\n key = base64.encodestring(self.email)\n base = \"{}{}\".format(key, ts)\n salt, hsh = self.password.split('$')\n return \"{}$${}\".format(key, get_hexdigest(salt, base))" ]
[ "0.793682", "0.77396196", "0.77252686", "0.7569785", "0.7550483", "0.7504947", "0.74958557", "0.74798924", "0.745215", "0.7271606", "0.72704905", "0.7229112", "0.7227402", "0.71781677", "0.7081612", "0.69910264", "0.69628245", "0.69411725", "0.69014037", "0.6862004", "0.6856111", "0.68399364", "0.6796004", "0.6796004", "0.6796004", "0.67873275", "0.67445266", "0.67438114", "0.67156184", "0.67134756" ]
0.8095364
0
Validates if all ConfigurationOption names are unique in the ConfigurationOptions instance.
def configuration_options_object_processor(configuration_options): option_names = [option.name for option in configuration_options.configuration_options] for option_name in option_names: if option_names.count(option_name) > 1: raise WashError(f'Configuration option with the name {option_name} already exists. ' f'Names of configuration options in a WASH internal script must be unique.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate_options(self):\r\n valid_choices = ('correct', 'partially-correct', 'incorrect')\r\n for option in self.options:\r\n choice = option['choice']\r\n if choice is None:\r\n raise ValueError('Missing required choice attribute.')\r\n elif choice not in valid_choices:\r\n raise ValueError('Invalid choice attribute: {0}. Must be one of: {1}'.format(\r\n choice, ', '.join(valid_choices)))", "def check_options(self, options):\n return not any(not isinstance(element, str) for element in options)", "def validate(dic, option_list):\n\tfor key in dic.viewkeys():\n\t\tif key in option_list:\n\t\t\tfor option in option_list:\n\t\t\t\tif option != key:\n\t\t\t\t\tif dic[option] and dic[key]:\n\t\t\t\t\t\traise click.UsageError('Invalid option combination --%s \\\n\t\t\t\t\t\t\tcannot be used with --%s' % (option, key))\n\n\treturn True", "def check_conflicting_options(channel_options, task_recipe_options):\n double_specified_options = set(task_recipe_options.keys()).intersection(\n set(channel_options.keys())\n )\n conflicting_options = [\n optname\n for optname in double_specified_options\n if task_recipe_options[optname] != channel_options[optname]\n ]\n if conflicting_options:\n raise TaskOptionsError(\n f\"Recipe options cannot conflict: {conflicting_options}\"\n )", "def clean_and_validate_options(self):\n options = self.options\n\n id = options.get('id', None)\n assert(isinstance(id, str) or id is None)\n options['id'] = id\n\n name = options.get('name', None)\n assert(isinstance(name, str) or name is None)\n options['name'] = name\n\n version = options.get('version', None)\n assert(isinstance(version, str) or version is None)\n options['version'] = version", "def check_options(*options):\n def wrapper(fn):\n @wraps(fn)\n def wrapped(*args, **kwargs):\n if not options:\n raise ValueError(\n 'At least one option set is needed: '\n '{need}'.format(\n need=', '.join(unique_options.keys())\n )\n )\n check = [\n v for (k, v) in unique_options.items()\n if k in options\n ]\n if len(options) != len(check):\n diff = set(options) - set(unique_options.keys())\n raise ValueError(\n 'Invalid option set: {options}'.format(\n options=', '.join(diff)\n )\n )\n for unique in check:\n found = [\n k for k in kwargs.keys() if k in unique\n ]\n if not found:\n raise ValueError(\n 'At least one option is needed: {need}'.format(\n need=', '.join(unique)\n )\n )\n if len(found) > 1:\n raise ValueError(\n 'Only one option can be specified: '\n '{need}'.format(\n need=', '.join(unique),\n )\n )\n return fn(*args, **kwargs)\n return wrapped\n return wrapper", "def check_unique(self):\n pass", "def check_cls_choices_slugs(cls, slugs):\n for s in slugs:\n if settings.DJCAT_ITEM_SLUG_DELIMITER in s:\n raise ItemAttributeChoicesSlugNotValid(cls)\n\n if not len(set(slugs)) == len(slugs):\n raise ItemAttributeChoicesSlugsDuplicate(cls)", "def __validate_options__(cls, options):\n pass", "def configuration_option_object_processor(configuration_option):\n parameter_names = [parameter.name for parameter in configuration_option.parameters]\n for parameter_name in parameter_names:\n if parameter_names.count(parameter_name) > 1:\n raise WashError(f'Parameter with the name {parameter_name} already exists '\n f'in the configuration option {configuration_option.name}. '\n f'Names of parameters in a configuration option type must be unique.')\n\n if all(not parameter.required for parameter in configuration_option.parameters):\n raise WashError(f'No required parameters are specified '\n f'in the configuration option {configuration_option.name}. '\n f'Each configuration option requires at least 1 required parameter.')", "def _is_valid_platform_option(self, name: str) -> bool:\n disallowed_platform_options = self.disallow.get(self.platform, set())\n if name in disallowed_platform_options:\n return False\n\n allowed_option_names = self.default_options.keys() | self.default_platform_options.keys()\n\n return name in allowed_option_names", "def _ValidateUniqueNames(pools):\n used_names = set()\n for pool in pools:\n name = pool.nodePool\n if name in used_names:\n raise exceptions.InvalidArgumentException(\n '--pools', 'Pool name \"%s\" used more than once.' % name)\n used_names.add(name)", "def _is_valid_global_option(self, name: str) -> bool:\n allowed_option_names = self.default_options.keys() | PLATFORMS | {\"overrides\"}\n\n return name in allowed_option_names", "def check_key_exists(self) -> None:\n omitted_configs = self.necessary_config_names - set(self.config.keys())\n assert len(omitted_configs) == 0, omitted_configs", "def has_all_unique_users_names(value):\n names = [user.get(CONF_NAME) for user in value]\n if None in names and any(name is not None for name in names):\n raise vol.Invalid(\"user names of all users must be set if any is set\")\n if not all(name is None for name in names):\n has_unique_values(names)\n return value", "def enforce_unique_values(self):\n return self.properties.get('enforceUniqueValues', None)", "def _validate_options(self, rules, operator_name):\n values = []\n option_values = []\n for argument in rules[operator_name]:\n if isinstance(argument, dict) and argument.get(\"source\") == \"answers\":\n option_values = (\n self.questionnaire_schema.answer_id_to_option_values_map.get(\n argument[\"identifier\"]\n )\n )\n else:\n values = argument if isinstance(argument, list) else [argument]\n\n if values and option_values:\n for value in values:\n # Null values are allowed and will not exist in answer options\n if value and value not in option_values:\n self.add_error(\n self.VALUE_DOESNT_EXIST_IN_ANSWER_OPTIONS,\n value=value,\n answer_options=list(option_values),\n )", "def validate_list(self, field: str, valid_options: List[str]):\n val = getattr(self, field)\n if isinstance(val, list):\n for v in val:\n if v not in valid_options:\n raise ConfigError(f'{v} is not a valid option for {field}')\n else:\n if val not in valid_options:\n raise ConfigError(f'{val} is not a valid option for {field}')", "def validate_unique_taxon_slugs(cls, values):\n if 'attributes' in values:\n # count occurrence of each taxon slug in attributes\n attributes: List[FdqModelAttribute] = values['attributes']\n taxon_slugs = cls._get_available_attrs_taxon_slugs(attributes)\n\n taxon_slugs_counter = Counter(taxon_slugs)\n\n multiple_taxon_slugs = [\n taxon_slug for taxon_slug, occurrence in taxon_slugs_counter.items() if occurrence > 1\n ]\n if len(multiple_taxon_slugs):\n raise ValueError('Following fields are mapped more than once - ' + ','.join(multiple_taxon_slugs))\n\n return values", "def validateOptions(self):\n SubCommand.validateOptions(self)\n if not re.match('^yes$|^no$', self.options.usedbs):\n raise ConfigurationException(\"--dbs option only accepts the yes and no values (--dbs=yes or --dbs=no)\")\n self.usedbs = 1 if self.options.usedbs == 'yes' else 0\n\n self.outdir = self.options.outdir", "def _check_prefixes(self, docstring: PetscDocStringImpl) -> None:\n for key, opts in sorted(self.items.items()):\n lopts = len(opts)\n assert lopts >= 1, f'number of options {lopts} < 1, key: {key}, items: {self.items}'\n\n if lopts == 1:\n # only 1 option, should start with '.'\n self._check_opt_starts_with(docstring, opts[0], 'Solitary', '.')\n else:\n # more than 1, should be '+', then however many '.', then last is '-'\n self._check_opt_starts_with(docstring, opts[0], 'First multi', '+')\n for opt in opts[1:-1]:\n self._check_opt_starts_with(docstring, opt, 'Multi', '.')\n self._check_opt_starts_with(docstring, opts[-1], 'Last multi', '-')\n return", "def _verify_options(config: configuration.Config) -> None:\n\n if not config.config['species']:\n log._logger.error('You must specify a species (-s/--species)')\n exit(1)\n\n if config.config['hpc'] and config.config['local']:\n log._logger.error('You can only use one of the config options (hpc/local)')\n exit(1)\n\n if config.config['hpc'] and config.config['custom']:\n log._logger.error('You can only use one of the config options (hpc/custom)')\n exit(1)\n\n if config.config['local'] and config.config['custom']:\n log._logger.error('You can only use one of the config options (local/custom)')\n exit(1)\n\n if (not config.config['hpc']) and\\\n (not config.config['local']) and\\\n (not config.config['custom']):\n log._logger.error(\n 'You must specify a compute cluster environment (hpc/local/custom)'\n )\n exit(1)\n\n if config.config['custom'] and (not config.config['scheduler']):\n log._logger.error(\n 'The custom compute environment requires a scheduler address to be set'\n )\n exit(1)", "def is_manually_set(option_name: str) -> bool:\n return get_where_defined(option_name) not in (\n ConfigOption.DEFAULT_DEFINITION,\n ConfigOption.STREAMLIT_DEFINITION,\n )", "def _check_duplicates(self):\n # check variables\n counter = Counter(self.variables())\n duplicates = [key for key, value in counter.items() if value > 1]\n if duplicates != []:\n raise DuplicateVariables(duplicates)\n\n # check parameters\n counter = Counter(self.parameters())\n duplicates = [key for key, value in counter.items() if value > 1]\n if duplicates != []:\n raise DuplicateParameters(duplicates)", "def _validate_unique_merge_col(self):\n msg = (\"Duplicate {}s were found. This is likely due to resource \"\n \"class binning, which is not supported at this time. \"\n \"Please re-run supply curve aggregation without \"\n \"resource class binning and ensure there are no duplicate \"\n \"values in {!r}. File: {!r}\")\n\n mc = ColNameFormatter.fmt(MERGE_COLUMN)\n for ds, cols, fp in zip([self.solar_meta, self.wind_meta],\n [self.__solar_cols, self.__wind_cols],\n [self.solar_fpath, self.wind_fpath]):\n merge_col = ds.columns[cols == mc].item()\n if not ds[merge_col].is_unique:\n e = msg.format(merge_col, merge_col, fp)\n logger.error(e)\n raise FileInputError(e)", "def validate_unique_header(self):\n valid = False\n unique_headers = set(self.headers)\n if len(unique_headers) == len(self.headers):\n valid = True\n else:\n seen_headers = set()\n duplicate_headers = set()\n for x in self.headers:\n if x in seen_headers or seen_headers.add(x):\n duplicate_headers.add(x)\n msg = f\"Duplicated header names are not allowed: {duplicate_headers}\"\n log_exception(Annotations.dev_logger, Annotations.user_logger, msg)\n self.store_validation_issue(\"error\", msg, \"format:cap:unique\")\n valid = False\n if any(\"Unnamed\" in s for s in list(unique_headers)):\n msg = \"Headers cannot contain empty values\"\n log_exception(Annotations.dev_logger, Annotations.user_logger, msg)\n self.store_validation_issue(\"error\", msg, \"format:cap:no-empty\")\n valid = False\n return valid", "def prevent_duplicate_names(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"prevent_duplicate_names\")", "def prevent_duplicate_names(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"prevent_duplicate_names\")", "def task_submit_check_options():\n if not (task_has_option('all') or task_has_option('collection') \\\n or task_has_option('field') or task_has_option('pattern') \\\n or task_has_option('matching') or task_has_option('recids')):\n task_set_option('without', 1)\n task_set_option('last', 1)\n return True", "def _validate_options(options):\n if not options.pythons:\n raise Exception(\"No Pythons given - see -p.\")\n for python in options.pythons:\n if not shutil.which(python):\n raise Exception(\n \"Python %(python)s not found.\" % dict(python=python))\n if not options.requirements:\n raise Exception(\"No requirements file specified - see -r.\")\n if not os.path.exists(options.requirements):\n raise Exception(\n \"Requirements file %(req)s not found.\"\n % dict(req=options.requirements))\n if options.blacklist and not os.path.exists(options.blacklist):\n raise Exception(\n \"Blacklist file %(path)s not found.\"\n % dict(path=options.blacklist))\n version_map = {}\n for map_entry in options.version_map:\n if ':' not in map_entry:\n raise Exception(\n \"Invalid version-map entry %(map_entry)s\"\n % dict(map_entry=map_entry))\n src, dst = map_entry.split(':')\n version_map.setdefault(src, set())\n version_map[src].add(dst)\n options.version_map = version_map" ]
[ "0.66978174", "0.6242515", "0.6204667", "0.61873025", "0.6138533", "0.60712093", "0.6030964", "0.598881", "0.5862973", "0.5836668", "0.5816893", "0.5737586", "0.5737399", "0.57218176", "0.570948", "0.5619496", "0.55831975", "0.55713516", "0.55708873", "0.5567247", "0.55379665", "0.5527299", "0.5522503", "0.5502638", "0.549951", "0.5488067", "0.5474865", "0.5474865", "0.5452059", "0.5444156" ]
0.7466442
0
Validates if all ConfigurationOptionParameter names are unique in the ConfigurationOption instance. Also validates if at least one required parameter is specified in the ConfigurationOption instance.
def configuration_option_object_processor(configuration_option): parameter_names = [parameter.name for parameter in configuration_option.parameters] for parameter_name in parameter_names: if parameter_names.count(parameter_name) > 1: raise WashError(f'Parameter with the name {parameter_name} already exists ' f'in the configuration option {configuration_option.name}. ' f'Names of parameters in a configuration option type must be unique.') if all(not parameter.required for parameter in configuration_option.parameters): raise WashError(f'No required parameters are specified ' f'in the configuration option {configuration_option.name}. ' f'Each configuration option requires at least 1 required parameter.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate(self):\n for p in self.parameters:\n #Check for missing required parameters:\n if p.is_required and not(p.is_set):\n raise ValueError(\"Parameter %s is not set.\" \\\n % p.names[-1])\n #Also repeat the parameter validation here, just in case?", "def _validate_parameter_combinations(self):\n parameters = [\"type\", \"path\", \"mode\", \"default\", \"min\", \"max\"]\n parameters = {key: getattr(self, key, None) for key in parameters}\n type = parameters.pop(\"type\")\n\n # validate parameter combination\n if type in self._TYPE_COMBINATION_MAPPING:\n valid_parameters = self._TYPE_COMBINATION_MAPPING[type]\n for key, value in parameters.items():\n if key not in valid_parameters and value is not None:\n msg = \"Invalid parameter for '{}' Input, parameter '{}' should be None but got '{}'\"\n raise ValidationException(\n message=msg.format(type, key, value),\n no_personal_data_message=msg.format(\"[type]\", \"[parameter]\", \"[parameter_value]\"),\n error_category=ErrorCategory.USER_ERROR,\n target=ErrorTarget.PIPELINE,\n )", "def _validate_params(self):\n assert set(self.required_params) - set(self._params) == set()\n for par, val in self.optional_params.items():\n if par not in self._params:\n self._params[par] = val", "def validate_params(params, expected, opt_param=set()):\n expected = set(expected)\n opt_param = set(opt_param)\n pkeys = set(params)\n if expected - pkeys:\n raise ValueError(\"Required keys {} not in supplied parameters\"\n .format(\", \".join(expected - pkeys)))\n defined_param = expected | opt_param\n for param in params:\n if param not in defined_param:\n logger.warning(\"Unexpected parameter {} supplied\".format(param))", "def validate_params(params, expected, opt_param=set()):\n expected = set(expected)\n opt_param = set(opt_param)\n pkeys = set(params)\n if expected - pkeys:\n raise ValueError(\"Required keys {} not in supplied parameters\"\n .format(\", \".join(expected - pkeys)))\n defined_param = expected | opt_param\n for param in params:\n if param not in defined_param:\n logging.warning(\"Unexpected parameter {} supplied\".format(param))", "def configuration_options_object_processor(configuration_options):\n option_names = [option.name for option in configuration_options.configuration_options]\n for option_name in option_names:\n if option_names.count(option_name) > 1:\n raise WashError(f'Configuration option with the name {option_name} already exists. '\n f'Names of configuration options in a WASH internal script must be unique.')", "def check_mandatory_options (options, mandatory_options, helpstr):\n missing_options = []\n for o in mandatory_options:\n if not getattr(options, o):\n missing_options.append(\"--\" + o)\n \n if not len(missing_options):\n return\n \n raise Exception(\"Missing mandatory parameter%s: %s.\\n\\n%s\\n\\n\" %\n (\"s\" if len(missing_options) > 1 else \"\",\n \", \".join(missing_options),\n helpstr))", "def _validate(self, **parameters):\n provided = set(parameters.keys())\n required = set([\n field.name for field in self.fields if field.required\n ])\n optional = set([\n field.name for field in self.fields if not field.required\n ])\n\n # Determine any parameter names supplied that are not valid.\n unexpected = provided - (optional | required)\n unexpected = ['\"' + item + '\"' for item in sorted(unexpected)]\n if unexpected:\n prefix = len(unexpected) > 1 and 'parameters ' or 'parameter '\n raise ValueError('Unknown ' + prefix + ', '.join(unexpected))\n\n # Determine if any required field names not supplied.\n missing = required - provided\n missing = ['\"' + item + '\"' for item in sorted(missing)]\n if missing:\n prefix = len(missing) > 1 and 'parameters ' or 'parameter '\n raise ValueError('Missing required ' + prefix + ', '.join(missing))\n\n # Ensure all parameter values are valid types.\n for value in parameters.values():\n _validate_parameter(value)", "def checkNeededParams(self):\n for clp,value in self.neededParamsNames.items():\n if value[0] not in self.neededParams:\n print >> sys.stderr, clp+\" is a mandatory parameter \"\n self.printUsage()\n sys.exit(1)", "def validate(self):\r\n for opt in self.required:\r\n if not getattr(self, opt):\r\n print \"Error: %s is not specified.\" % opt\r\n self.optp.print_help()\r\n sys.exit(1)", "def validate_plugin_configuration(cls, plugin_configuration: \"PluginConfiguration\"):\n missing_fields = []\n configuration = plugin_configuration.configuration\n configuration = {item[\"name\"]: item[\"value\"] for item in configuration}\n if not configuration[\"instance_id\"]:\n missing_fields.append(\"Instance Id\")\n if not configuration[\"secret_key\"]:\n missing_fields.append(\"Secret Key\")\n\n if plugin_configuration.active and missing_fields:\n error_msg = (\n \"To enable a plugin, you need to provide values for the \"\n \"following fields: \"\n )\n raise ValidationError(error_msg + \", \".join(missing_fields))", "def check_mandatory_options (options, mandatory_options, help_text):\n missing_options = []\n for o in mandatory_options:\n if not getattr(options, o):\n missing_options.append(\"--\" + o)\n\n if not len(missing_options):\n return\n\n raise Exception(\"Missing mandatory parameter%s: %s.\\n\\n%s\\n\\n\" %\n (\"s\" if len(missing_options) > 1 else \"\",\n \", \".join(missing_options),\n help_text))", "def __verify_required_parameters(self, parameters, required_parameters):\n\n\t\tfor parameter in required_parameters:\n\t\t\tif False == parameters.has_key(parameter):\n\t\t\t\traise MissingParameterError(parameter)\n\n\t\treturn True", "def _check_parameters(self, ep, params):\n\n any_group_satisfied = False\n for group in ep.REQUIRED:\n if all(required_param in params for required_param in group):\n any_group_satisfied = True\n\n if not any_group_satisfied:\n raise ValueError(f\"Got parameters {params}, expected one of {ep.REQUIRED}\")\n\n for key in params:\n if key not in ep.POSSIBLE:\n raise ValueError(f\"Got {key}, expected one of {ep.POSSIBLE}\")", "def _validate_options(self):\r\n valid_choices = ('correct', 'partially-correct', 'incorrect')\r\n for option in self.options:\r\n choice = option['choice']\r\n if choice is None:\r\n raise ValueError('Missing required choice attribute.')\r\n elif choice not in valid_choices:\r\n raise ValueError('Invalid choice attribute: {0}. Must be one of: {1}'.format(\r\n choice, ', '.join(valid_choices)))", "def validate_config_dict(self):\n config_options = [\"pipeline_name\",\n \"num_processors\",\n \"num_sessions_at_once\",\n \"available_memory\",\n \"cluster_system\",\n \"output_directory\",\n \"working_directory\",\n \"template_head_for_anat\",\n \"exclude_zeros\",\n \"start_idx\",\n \"stop_idx\",\n \"write_report\",\n \"write_graph\",\n \"write_all_outputs\",\n \"upload_to_s3\",\n \"bucket_prefix\",\n \"bucket_out_prefix\",\n \"local_prefix\",\n \"bucket_name\",\n \"creds_path\"]\n invalid = []\n for param in self._config.keys():\n if param not in config_options:\n invalid.append(param)\n if len(invalid) > 0:\n err = \"\\n[!] The following parameters in your configuration \" \\\n \"file are not recognized. Double-check the pipeline \" \\\n \"configuration template.\\n\"\n err += \"\\n\".join([x for x in invalid])\n raise Exception(err)\n else:\n return 0", "def _validate_params(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def _check_param(in_params, req_param, opt_param=list()):\n for param in req_param:\n if param not in in_params:\n raise ValueError('{} parameter is required'.format(param))\n defined_param = set(req_param + opt_param)\n for param in in_params:\n if param not in defined_param:\n print(\n \"WARNING: received unexpected parameter {}\".format(param))", "def _validate(self):\n if not isinstance(self.parameter_schema, dict):\n raise TypeError(\"parameter_schema must be a dictionary\")\n # TODO: Settle on an input file schema and validation library\n if 'num_simulations' not in self.parameter_schema.keys():\n raise AttributeError(\"Parameter schema is missing the required 'num_simulations' key\")\n elif not isinstance(self.parameter_schema['num_simulations'], int):\n raise TypeError(\"Parameter schema 'num_simulations' must be an integer.\")\n self._create_parameter_names()\n for name in self._parameter_names:\n parameter_keys = self.parameter_schema[name].keys()\n parameter_definition = self.parameter_schema[name]\n if 'distribution' not in parameter_keys:\n raise AttributeError(f\"Parameter '{name}' does not contain the required 'distribution' key\")\n elif not isinstance(parameter_definition['distribution'], str) or \\\n not parameter_definition['distribution'].isidentifier():\n raise TypeError(f\"Parameter '{name}' distribution '{parameter_definition['distribution']}' is not a \" \\\n \"valid Python identifier\")\n else:\n for key in parameter_keys:\n if not isinstance(key, str) or not key.isidentifier():\n raise TypeError(f\"Parameter '{name}' keyword argument '{key}' is not a valid \" \\\n \"Python identifier\")\n # TODO: Raise an execption if the current parameter distributions don't match the previous_parameter_study\n self.parameter_distributions = self._generate_parameter_distributions()", "def _validate(self):\n if not isinstance(self.parameter_schema, dict):\n raise TypeError(\"parameter_schema must be a dictionary\")\n # TODO: Settle on an input file schema and validation library\n self._parameter_names = list(self.parameter_schema.keys())\n # List, sets, and tuples are the supported PyYAML iterables that will support expected behavior\n for name in self._parameter_names:\n if not isinstance(self.parameter_schema[name], (list, set, tuple)):\n raise TypeError(f\"Parameter '{name}' is not one of list, set, or tuple\")", "def validate_params(self, params: Dict[str, Any]) -> bool:\n dict_set_defaults(params, self.DEFAULT_PARAMS)\n\n for k in self.params:\n if k in {\"name\", \"descr\", \"cache_file\"}:\n continue\n\n if self.params[k] != params.get(k):\n return False\n\n return True", "def __validate_options__(cls, options):\n pass", "def check_options(*options):\n def wrapper(fn):\n @wraps(fn)\n def wrapped(*args, **kwargs):\n if not options:\n raise ValueError(\n 'At least one option set is needed: '\n '{need}'.format(\n need=', '.join(unique_options.keys())\n )\n )\n check = [\n v for (k, v) in unique_options.items()\n if k in options\n ]\n if len(options) != len(check):\n diff = set(options) - set(unique_options.keys())\n raise ValueError(\n 'Invalid option set: {options}'.format(\n options=', '.join(diff)\n )\n )\n for unique in check:\n found = [\n k for k in kwargs.keys() if k in unique\n ]\n if not found:\n raise ValueError(\n 'At least one option is needed: {need}'.format(\n need=', '.join(unique)\n )\n )\n if len(found) > 1:\n raise ValueError(\n 'Only one option can be specified: '\n '{need}'.format(\n need=', '.join(unique),\n )\n )\n return fn(*args, **kwargs)\n return wrapped\n return wrapper", "def validate(self):\n validated = True \n # Check that all parameters exist in the self.parameters dictionary\n for param_name in self._SCALAR_PARAMETERS:\n if param_name not in self.parameters:\n LOG.critical('%s not found in %s', param_name, self.filename)\n validated = False \n \n for param_name in self._TABLE_PARAMETERS:\n if not all([elem for elem in self.parameters[param_name]]):\n LOG.critical('%s not found in %s', param_name, self.filename)\n validated = False\n \n return validated", "def _check_required_opts(self, namespace=None):\n for info, group in self._all_opt_infos():\n opt = info['opt']\n\n if opt.required:\n if 'default' in info or 'override' in info:\n continue\n\n if self._get(opt.dest, group, namespace) is None:\n raise RequiredOptError(opt.name, group)", "def clean_and_validate_options(self):\n options = self.options\n\n id = options.get('id', None)\n assert(isinstance(id, str) or id is None)\n options['id'] = id\n\n name = options.get('name', None)\n assert(isinstance(name, str) or name is None)\n options['name'] = name\n\n version = options.get('version', None)\n assert(isinstance(version, str) or version is None)\n options['version'] = version", "def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginControlAbsorptionv0_1.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")", "def _validate_parameters(self):\n errors = []\n for key in self.PARAMETERS.keys():\n if key not in self.request_obj.data_params:\n errors.append(key)\n\n if errors:\n raise DataParsingError('Following data items are missing: {}'.format(', '.join(errors)))\n\n for key, params in self.PARAMETERS.items():\n params[0].validate_type(key, self.request_obj.data_params.get(key), params[1])", "def _validate_args(self, args):\r\n invalid_args = [k for k in self.required_params if args.get(k) is None]\r\n if invalid_args:\r\n raise ArgumentError('Missing required options: %s'\r\n % ','.join(invalid_args))", "def params_optional(self) -> bool:\n result = True\n if self.no_params:\n # We will return False, because there are no params at all - optional or not.\n return False\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n # We should allow you to print out the options to a YAML file and fill it out like a form.\n # So right now, it will create a long Kubernetes policy, but it will have lots of empty lists that we have to fill out. Oh well.\n if not parameter_details.default_value:\n # if not parameter.default_value and parameter.default_value != [] and parameter.default_value != \"\":\n result = False\n break\n return result" ]
[ "0.6902311", "0.6522125", "0.6513697", "0.64732", "0.64559853", "0.6451961", "0.6323427", "0.6316556", "0.6186604", "0.6156855", "0.6142861", "0.612921", "0.60664105", "0.6061574", "0.6059569", "0.60359025", "0.5982344", "0.59677446", "0.5965603", "0.59638935", "0.5913237", "0.5907323", "0.5903783", "0.58877605", "0.5851769", "0.5849003", "0.58297265", "0.58285177", "0.58262205", "0.5825766" ]
0.7193669
0
Return a marker's initial pose if it was passed in.
def initial_pose(self): return self._initial_pose
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_init_pose(self):\n return self.init_pose_R, self.init_pose_t", "def start_pose():\n global start_pose\n while start_pose is None:\n pass\n return start_pose", "def _set_init_pose(self):\n raise NotImplementedError()", "def _set_init_pose(self):\n raise NotImplementedError()", "def _set_init_pose(self):\n raise NotImplementedError()", "def setInitialPose(self, initial_pose):\n self.initial_pose_received = False\n self.initial_pose = initial_pose\n self._setInitialPose()", "def find_start_pose(self):\n\n # Find start position\n y,x = [k for k,v in self.mp.items() if v == 94 or v == 60 \\\n or v == 62 or v == 118][0]\n\n\n # Assign orientation\n dy,dx, theta = 0,0, 0\n if self.mp[y,x] == ord('^'): theta = np.pi/2\n elif mp[y,x] == ord('<'): theta = -np.pi\n elif mp[y,x] == ord('>'): theta = 0\n else: theta = -np.pi/2\n\n return y, x, theta", "def get_pos_init(self):\n return self.pos_init", "def get_initial_point(self):\r\n return self._studio.get_initial_point()", "def current_pose():\n global current_pose\n while current_pose is None:\n pass\n return current_pose", "def update_initial_pose(self, msg):\n xy_theta = convert_pose_to_xy_and_theta(msg.pose.pose)\n self.initialize_particle_cloud(xy_theta)\n self.fix_map_to_odom_transform(msg)", "def get_goal_pose(self,pose=[0,0,0]):\n\t\treturn pose", "def current_pose_estimate(self):\n \n try:\n stamp = self._tf_listener.getLatestCommonTime(self._base_frame, self._map_frame)\n curr_pose = PoseStamped(header=Header(stamp=stamp, frame_id=self._base_frame))\n curr_pose = self._tf_listener.transformPose(self._map_frame, curr_pose)\n angles = tr.euler_from_quaternion([\n curr_pose.pose.orientation.x,\n curr_pose.pose.orientation.y,\n curr_pose.pose.orientation.z,\n curr_pose.pose.orientation.w])\n return Particle(curr_pose.pose.position.x, curr_pose.pose.position.y, angles[2],1)\n except (tf2.ExtrapolationException, tf2.LookupException, tf2.TransformException) as e:\n print(\"Robot pose estimate not ready yet: \", e.message)\n return Particle(0,0,0,1)", "def get_initial_point(self):\r\n if isinstance(self.pieces[0], LineSegment):\r\n return self.pieces[0].start", "def update_initial_pose(self, msg):\n xy_theta = \\\n self.transform_helper.convert_pose_to_xy_and_theta(msg.pose.pose)\n\n # TODO this should be deleted before posting\n self.transform_helper.fix_map_to_odom_transform(msg.pose.pose,\n msg.header.stamp)\n # initialize your particle filter based on the xy_theta tuple", "def get_pose(self):\n return self._model.get_pose()", "def getPose(self):\n\t\treturn self.__subs['pose'].getData()", "def set_init_pose(self):\n self.move_joints(self.init_cart_vel)\n\n return True", "def set_init_pose(self, init_pose):\n \tself.check_publishers_connection()\n \tself.move_joints(init_pose)", "def initial_position(self):\n\n if isinstance(self._initial_position, (list, tuple)):\n return self._initial_position\n if isinstance(self._initial_position, PositionAreaSampler):\n return self._initial_position.sample()\n\n return self._initial_position", "def get_front_center_pose(mike_pos, # Microphone position matrix, as elsewhere.\n center_dist=1., # meters from array center\n origin_in_front=True):\n mike_center = np.array([np.mean((mike_pos.T)[0]), np.mean((mike_pos.T)[1]),\n np.mean((mike_pos.T)[2]), 0, 0])\n dir_mat = get_dir(mike_center, mike_pos)\n mike_center = mike_center[:3]\n far_index = np.argmax(dir_mat, axis=0)[0]\n if far_index > 1:\n far2_index = np.argmax((dir_mat.T)[0][:far_index])\n else:\n far2_index = np.argmax((dir_mat.T)[0][(far_index+1):]) + far_index+1\n \n proj_vect = np.cross(mike_pos[far_index]-mike_center,\n mike_pos[far2_index]-mike_center)\n proj_vect = proj_vect*center_dist/la.norm(proj_vect) # Scale to desired distance\n \n # Set appropriate orientation\n if np.dot(proj_vect, mike_center) > 0 or not origin_in_front:\n proj_vect *= -1.\n \n theta = np.arctan2(-proj_vect[1], -proj_vect[0]) # Find angular direction\n phi = np.arctan2(-proj_vect[2], np.sqrt(proj_vect[0]**2 + proj_vect[1]**2))\n proj_vect += mike_center # Translate to global coordinates\n \n return np.array([proj_vect[0], proj_vect[1], proj_vect[2], theta, phi])", "def update_initial_pose(self, msg):\n xy_theta = \\\n self.tf_helper.convert_pose_to_xy_and_theta(msg.pose.pose)\n\n self.tf_helper.fix_map_to_odom_transform(msg.pose.pose,\n msg.header.stamp)\n self.tf_helper.send_last_map_to_odom_transform()\n # initialize your particle filter based on the xy_theta tuple", "def _get_pose_center(self, landmarks):\n left_hip = landmarks[self._landmark_names.index('left_hip')]\n right_hip = landmarks[self._landmark_names.index('right_hip')]\n center = (left_hip + right_hip) * 0.5\n return center", "def pose_cb(self, msg):\n self.current_pose = msg.pose", "def starting_position(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"starting_position\")", "def get_start_point_marker(self) -> PositionMarker: # pragma: no cover\n assert self.pos_marker\n return self.pos_marker.start_point_marker()", "def get_initial_pose(template_points, target_points):\n T = np.eye(4)\n\n # Your code goes here\n T[:3, 3] = np.mean(target_points, axis=0) - np.mean(template_points, axis=0)\n\n return T", "def init_joints_pose(self, init_pos):\n self.current_joint_pose =[]\n self.current_joint_pose = copy.deepcopy(init_pos)\n#\tprint(\"[current_joint_pose]:\", self.current_joint_pose, type(self.current_joint_pose))\n return self.current_joint_pose", "def get_pose(self, obs: np.array) -> Tuple[bool, Optional[Tuple[np.array, float]]]:\n detection, centers = cv2.findCirclesGrid(obs,\n patternSize=(self.width, self.height),\n flags=cv2.CALIB_CB_SYMMETRIC_GRID,\n blobDetector=self.detector)\n if detection:\n image_points = centers[:, 0, :]\n _, rotation_vector, translation_vector = cv2.solvePnP(objectPoints=self.circle_pattern,\n imagePoints=image_points,\n cameraMatrix=self.camera_matrix,\n distCoeffs=self.distortion_coefs)\n else:\n return detection, None\n\n theta = rotation_vector[1][0]\n\n x_global = translation_vector[2][0]\n y_global = translation_vector[0][0]\n z_global = translation_vector[1][0]\n\n x_but_global = x_global - self.target_distance\n y_but_global = y_global\n z_but_global = z_global\n\n x_but_robot = x_but_global * np.cos(theta)\n y_but_robot = y_but_global * np.cos(theta)\n z_but_robot = z_but_global\n\n return detection, (np.array([y_but_robot, z_but_robot, x_but_robot]), -np.rad2deg(theta))", "def getPose(self):\n return self.listener.pose" ]
[ "0.6584604", "0.6367691", "0.6337681", "0.6337681", "0.6337681", "0.6199046", "0.6134793", "0.60044897", "0.5989717", "0.5986071", "0.59107316", "0.5883016", "0.58752507", "0.58548284", "0.57934", "0.5782603", "0.57810706", "0.5780764", "0.5670274", "0.5653202", "0.56078744", "0.5604723", "0.5586568", "0.5584084", "0.5565126", "0.5493359", "0.5455966", "0.5427805", "0.5421708", "0.541259" ]
0.7495326
0
Get the interactive marker map of this marker template.
def marker_map(self): return self._marker_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_map(self):\n return self.map", "def get_map(self):\n return self.parent.controller.get_map()", "def mappable(self):\n return self._mappable.get(self._plotid, None)", "def markers (self):\n return self._markers", "def get_map(self):\n return self._locmap", "def GetMapToolbar(self):\n return self.toolbars['digitMap']", "def marker(self):\r\n return _marker_of(self.api.paramstyle)", "def MAP(self):\n return self.__map", "def get_hit_marker(self):\r\n return Marker((0, 0, 0), self._screen)", "def map(self) -> Map:\n return self._map", "def get_map(self):\n return self.get_raw_ys()", "def mapdata(self):\n return {\n 'lat': str(self.location.latitude),\n 'lng': str(self.location.longitude),\n 'options': {\n 'icon': self.species.marker.url\n },\n 'data': {\n 'href': str(self.get_absolute_url())\n }\n }", "def map( self ) :\n\n self.readMap( )\n\n return( self.__map )", "def build_I_map(self):\n raise NotImplementedError", "def Markers(cls):\n return cls._markers", "def icon(self):\n return 'mdi:map-marker-question'", "def draw(self):\n if not self._folium_map:\n self._set_folium_map()\n return self._folium_map", "def current(cls):\n ret_val = gxapi_cy.WrapEMAPTEMPLATE._current(GXContext._get_tls_geo())\n return GXEMAPTEMPLATE(ret_val)", "def marker(self):\n self.marker1 = MapMarker(lat=lat1, lon=lon1, source='green_marker.png')\n self.marker2 = MapMarker(lat=lat2, lon=lon2, source='red_marker.png')\n self.ids.mapview.add_marker(self.marker1)\n self.ids.mapview.add_marker(self.marker2)", "def map(self):\n return self.map_digis(self.group)", "def get_map(self) -> list:\n return self.map_obstacle", "def GetMarkerAlpha(self):\n return self._attalpha[\"marker\"]", "def generateMarkers(self, *args, **kwargs): \n return 'var PloneMapMarkers = [' + \\\n ''.join([\"{'type': '%s','options': { 'position': new google.maps.LatLng( %s, %s ), 'title' : '%s', 'title_' : '%s' }},\" \n % (object.markerIcon, object.latitude, object.longitude, object.Title(), object.getId()) \n for object in self.context.objectValues() \n if hasattr(object, 'latitude') and len(object.latitude) > 0 ])[:-1] \\\n + '];'", "def map():\n return render_template('map.html')", "def widget_map(self):\n return self._widget_map", "def map():\n\n return render_template(\"map.html\")", "def location_info(self) -> LocationInfoIm:\n return self._location_info", "def as_html(self):\n if not self._folium_map:\n self.draw()\n return self._inline_map(self._folium_map, self._width, self._height)", "def create_map(self):\n self.map = MapContainer(\n parent=self,\n style={\n 'top': self.margin[0],\n 'right': self.margin[1],\n 'bottom': self.margin[2],\n 'left': self.margin[3],\n 'aspect': 1.0,\n 'align': 'center',\n 'vertical-align': 'center' \n },\n map_size=self.map_size\n )\n self.add_node(self.map)", "def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"" ]
[ "0.6520739", "0.64780396", "0.63716984", "0.6316054", "0.6315033", "0.6121316", "0.60842246", "0.59842247", "0.5905344", "0.5881036", "0.5847196", "0.57981193", "0.5774668", "0.5716302", "0.569977", "0.56583416", "0.55513626", "0.5545483", "0.5514279", "0.5488387", "0.54816276", "0.54482913", "0.54256636", "0.5422471", "0.5409223", "0.53923887", "0.5376712", "0.537168", "0.5369357", "0.5369278" ]
0.77108824
0
Get the callback map of this marker template.
def callback_map(self): return self._callback_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def marker_map(self):\n return self._marker_map", "def get_map(self):\n return self.map", "def get_map(self):\n return self.parent.controller.get_map()", "def MAP(self):\n return self.__map", "def get_map(self):\n return self._locmap", "def get_callback(self):\n return self.callbacks[self.type]", "def get_map(self):\n return self.get_raw_ys()", "def map( self ) :\n\n self.readMap( )\n\n return( self.__map )", "def event_map(self) -> dict:\n return self._event_map", "def GetMapToolbar(self):\n return self.toolbars['digitMap']", "def widget_map(self):\n return self._widget_map", "def map(self) -> Map:\n return self._map", "def get_CallbackData(self):\n return self._output.get('CallbackData', None)", "def MetadataMap(self):\r\n return self._metadata_map", "def turbine_map(self):\n return self.flow_field.turbine_map", "def markers (self):\n return self._markers", "def get_data(self) -> Tuple[PoliciesMap, ZonesMap, LinksMap]:\n return self.policies_map, self.zones_map, self.links_map", "def _get_route_map(self):\n return self.__route_map", "def mappable(self):\n return self._mappable.get(self._plotid, None)", "def mapdata(self):\n return {\n 'lat': str(self.location.latitude),\n 'lng': str(self.location.longitude),\n 'options': {\n 'icon': self.species.marker.url\n },\n 'data': {\n 'href': str(self.get_absolute_url())\n }\n }", "def event(self):\n return self.get('callback_id')", "def get_tensor_map(self):\n return self.TENSOR_MAP", "def get_map(self) -> list:\n return self.map_obstacle", "def event_markers(self):\n return self._event_markers", "def callback(self):\n return self._callback", "def get_dict(self):\r\n return self.cmap", "def current(cls):\n ret_val = gxapi_cy.WrapEMAPTEMPLATE._current(GXContext._get_tls_geo())\n return GXEMAPTEMPLATE(ret_val)", "def mapping(self):\n return self._mapping", "def get_font_map(self): # real signature unknown; restored from __doc__\n pass", "def marker(self):\r\n return _marker_of(self.api.paramstyle)" ]
[ "0.7049451", "0.6784116", "0.6679001", "0.6338827", "0.6203012", "0.6103992", "0.6036267", "0.58860934", "0.58419704", "0.5811328", "0.571724", "0.5705887", "0.5624895", "0.54811853", "0.5466417", "0.5464931", "0.54548174", "0.5448449", "0.5445454", "0.54268634", "0.5374627", "0.5346221", "0.53278047", "0.5308043", "0.52864754", "0.52717674", "0.5241981", "0.52402514", "0.5232248", "0.5227423" ]
0.78254753
0
Get the menu handler of this marker template.
def menu_handler(self): return self._menu_handler
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_menu ( self, object ):\n return self.menu", "def menu(self):\n return self._menu", "def get_menu ( self, object, row ):\n return self.menu", "def GetMenu(self):\n return self._menu", "def menu(self):\n try:\n return get_template('{}/menu.html'.format(self.label))\n except TemplateDoesNotExist:\n return Template('')", "def menu(self) -> CursesMenu | None: # type: ignore[override]\n return self._menu", "def get_current_menu(self) -> Optional[Menu]:\n return self.menu_pointer", "def get_handler(self):\n return self._Handler(self)", "def getHandler(self):\n raise NotImplementedError(\"Shouldn't be called\")", "def getMenuItem(self, event):\n return self.GetMenuBar().FindItemById(event.GetId())", "def get_menus():\n\n pass", "def _get_handler(self, name):\n\n if name not in self._handlers:\n raise CLICoreTemplateHandlerNotFoundError('Command [{name}] is not valid. '\n 'available commands: {commands}.'\n .format(name=name,\n commands=\n list(self._handlers.keys())))\n\n return self._handlers[name][0]", "def get_menu(menu_name):\n\n pass", "def getMenuOption():\n return menu_option", "def _get_template_menu_lst(self):\n if self.menulst is None:\n template_menulst = os.path.join(self.options.sourcedir, \"hake\",\n self.menulst_template)\n with open(template_menulst) as f:\n self.menulst = f.readlines()\n\n return self.menulst", "def get_handler(cls):\n if not cls.hnd:\n raise ValueError((\"You must set handler by using set_hnd() method, \"\n \"before calling get_handler() method.\"))\n return cls.hnd", "def get_app_menu(self): # real signature unknown; restored from __doc__\n pass", "def _get_menu(menu_name=None):\n if menu_name is None:\n menu_name = pipeline._menu\n\n widgets = dict((\n w.objectName(), w) for w in QtWidgets.QApplication.allWidgets())\n menu = widgets.get(menu_name)\n return menu", "def getMenuItemID(self):\r\n return self.eventID", "def main_menu(self):\n return self.sitemap", "def get_to_main_menu(self):\n return self.__toMainMenu", "def getMenu(self, name):\n if self.__object is not None:\n return self.__object.getMenu(name)\n else:\n return None", "def contentHandler(self):\n return self.__contentHandler", "def _get_tag_handler(holder):\n return TagHandler(holder)", "def GetMenuContext(self):\n # type: () -> MenuContext\n if self._contextProvider is not None:\n return self._contextProvider.GetMenuContext()\n raise NotImplementedError('No context provider set and GetMenuContext '\n 'not reimplemented')", "def get_maya_menu():\n menuBar = [m for m in get_maya_window().children() if type(m) == QtWidgets.QMenuBar] or [None]\n return menuBar[0]", "def GetCurrentContext(self):\n # type: () -> MenuContext\n if self._contextCallback:\n return self._contextCallback()", "def file_menu(self):\n return self.GetMenu(self.FindMenu(\"File\"))", "def gettype(self):\r\n\r\n return self.__handler_type", "def get_menu_for_display(self):\n \n game = self.game\n if not game.menu:\n return\n\n menu = game.menu\n if not menu.is_visible:\n return None\n \n return menu" ]
[ "0.687998", "0.6734362", "0.66681284", "0.6660644", "0.6325513", "0.630204", "0.6267521", "0.608892", "0.60468376", "0.60233533", "0.60089546", "0.59873855", "0.5929736", "0.5915794", "0.5880168", "0.58781844", "0.5877921", "0.5829185", "0.58093506", "0.5780745", "0.5773996", "0.5658373", "0.565287", "0.5585791", "0.55634326", "0.54480594", "0.5413944", "0.538388", "0.53795713", "0.5346796" ]
0.8167384
0
Returns dictionary for CDP neighbor phone
def phone_parse(neighbor): mgmt_ip = neighbor[mgmt_ip_s] hostname = neighbor[hostname_s].split('.')[0] if nxos: sysname = neighbor['sysname'] if sysname != '': hostname = sysname if mgmt_ip == '': mgmt_ip = neighbor['interface_ip'] l_intf = neighbor['local_port'] intf = re.findall(r'.{2}', l_intf)[0] + re.findall(r'\d.+', l_intf)[0] macreg = re.findall(r'.{4}', hostname.replace('SEP', '')) mac_address = f'{macreg[0]}.{macreg[1]}.{macreg[2]}'.lower() voice_vlan = 'None' software_version = neighbor[version_s].replace('.loads', '') platform = neighbor['platform'] for switchport in switchports: if switchport['interface'] == intf: for mac_addr in mac_addrs: if mac_addr['vlan'] == switchport['voice_vlan']: voice_vlan = mac_addr['vlan'] break break if platform.__contains__('Cisco IP Phone'): platform = neighbor['platform'].replace('Cisco IP Phone ', '') else: platform = neighbor['platform'] phone = { 'hostname': hostname, 'neighbor': { 'hostname': session.hostname, 'ip_address': session.ip_address, 'remote_intf': l_intf }, 'ip_address': mgmt_ip, 'mac_addr': mac_address, 'voice_vlan': voice_vlan, 'software_version': software_version, 'model': platform } self.phones.append(phone)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Neighbors(vendor):\n neighbors = {\"cisco\" : \"\", \"juniper\" : \"\", \"vyatta\" : \"\" }\n cisco_neighbors = {}\n juniper_neighbors = {}\n vyatta_neighbors = {}\n while True:\n print \"***\\t\\t%s NEIGHBORS***\" % (vendor)\n n = raw_input(\"\\t\\tNeighbor information (Press any key to continue. Press 'q' to quit): \")\n if n is not 'q':\n neighbor_id = raw_input(\"\\t\\tNeighbor ID (eg. x.x.x.x): \")\n neighbor_as = raw_input(\"\\t\\tNeighbor AS: \")\n if vendor == \"cisco\":\n cisco_neighbors[neighbor_id] = neighbor_as\n neighbors[vendor] = cisco_neighbors\n elif vendor == \"juniper\":\n juniper_neighbors[neighbor_id] = neighbor_as\n neighbors[vendor] = juniper_neighbors\n else:\n vyatta_neighbors[neighbor_id] = neighbor_as\n neighbors[vendor] = vyatta_neighbors\n else:\n break\n return neighbors", "def get_contact_info(self):\n outputDict = {\"USERNAME\": consts.USERNAME,\n \"IP\": consts.IPADDRESS, \n \"MACHINE\": consts.HOSTNAME, \n \"EMAIL\": '[email protected]', \n \"PHONE\": '203-722-6620'} # ::: TO DO::: dynamically get phone and email info automatically\n return outputDict", "def neighbors(self, *args, **kwargs):\n return {\n 'neighbors': [\n {'ip': ip, 'port': port}\n for ip, port in self.neighbors\n ],\n }", "def device_info(self):\n info = {\n \"identifiers\": {\n (\n DOMAIN,\n \"serial-number\",\n self._ctrl.data[\"routerboard\"][\"serial-number\"],\n \"switch\",\n \"NAT\",\n )\n },\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} NAT\",\n }\n return info", "def get_neighbor_info(neigh_ip, nbrhosts):\n\n vm_info = get_vm_with_ip(neigh_ip, nbrhosts)\n nbr_vm = vm_info['vm']\n nbr_intf = vm_info['port']\n\n macs = get_eos_mac(nbrhosts[nbr_vm], nbr_intf)\n\n return {'mac': macs['mac'], \"port\": nbr_intf, \"shell_intf\": macs['shell_intf'], \"vm\": nbr_vm}", "def get_experiment_phn_info():\n phone_list = ['##', 'aa', 'ae', 'ao', 'aw', 'ax', 'ay', 'bb', 'br',\n 'ch', 'dd', 'dh', 'eh', 'er', 'ey', 'ff', 'gg', 'hh', 'ih',\n 'iy', 'jh', 'kk', 'll', 'mm', 'ng', 'nn', 'ow', 'oy', 'pp',\n 'rr', 'sh', 'sp', 'ss', 'th', 'tt', 'uh', 'uw', 'vv', 'ww',\n 'yy', 'zh', 'zz']\n ph2id = {ph: i for i, ph in enumerate(phone_list)}\n id2ph = {i: ph for i, ph in enumerate(phone_list)}\n\n return phone_list, ph2id, id2ph", "def get_phone_data(page):\n phone_data = dict()\n soup = BeautifulSoup(page.content, features='html.parser')\n phone_name = soup.find('h1', class_=\"specs-phone-name-title\").text\n logger.info(f'Extract {phone_name} data from {page.url}')\n phone_data[phone_name] = dict()\n for table in soup.find('div', id='specs-list').find_all('table'):\n title = table.find('th').text\n logger.debug(f'Extract {phone_name}: {title}')\n phone_data[phone_name][title] = dict()\n for sub_table in table.find_all('tr'):\n if sub_table:\n key = sub_table.find('td', class_='ttl')\n val = sub_table.find('td', class_='nfo')\n if key and key.text != NON_BREAK_SPACE:\n val = _get_table_val(val)\n phone_data[phone_name][title][key.text.replace(NON_BREAK_SPACE, ' ')] = val\n\n else:\n if val:\n val = _get_table_val(val)\n try:\n phone_data[phone_name][title]['other'].append(val)\n except KeyError:\n phone_data[phone_name][title]['other'] = []\n phone_data[phone_name][title]['other'].append(val)\n\n return phone_data", "def dict() -> Dict[str, Pin]:", "def parse_phone(parsed_data):\n result = []\n known_values = []\n\n contacts = {'registrant_contact': [], 'administrative_contact': [], 'technical_contact': [],\n 'domain_registrar' :[]}\n if 'registrant_contact' in parsed_data:\n contacts['registrant_contact'].append(parsed_data['registrant_contact'])\n if 'administrative_contact' in parsed_data:\n contacts['administrative_contact'].append(parsed_data['administrative_contact'])\n if 'technical_contact' in parsed_data:\n contacts['technical_contact'].append(parsed_data['technical_contact'])\n if 'domain_registrar' in parsed_data:\n contacts['domain_registrar'].append(parsed_data['domain_registrar'])\n # parsing phone number from contact block\n\n for contact, info in contacts.items():\n if info is not None:\n d = {'type': 4, 'data': '', 'properties': {}, 'special_properties': {}, 'ref': {}}\n # properties dictionary\n owener = {'type': 11, 'owner': ''}\n location = {'type': 11, 'location': ''}\n properties_list = []\n special_properties_list = []\n d.update({'ref': {'task': 'whois', 'whois_for': '', 'whois_from': ''}})\n if 'domain_name' in parsed_data and len(parsed_data['domain_name']) > 0:\n d['ref']['whois_for'] = parsed_data['domain_name']\n if 'whois_server' in parsed_data:\n d['ref']['whois_from'] = parsed_data['whois_server']\n\n for name in info:\n if \"phone_number\" in name:\n if name['phone_number'] in known_values:\n break\n for feature in name.keys():\n if feature == \"phone_number\":\n d['data'] = name['phone_number']\n known_values.append(name['phone_number'])\n if feature == \"full_name\":\n owener['owner'] = name['full_name']\n\n if feature ==\"registrar_name\":\n owener['owner'] = name['registrar_name']\n if feature == \"city_name\":\n location['location'] = name['city_name']\n # prevent from create result if phone number of contact is not available\n if d['data'] == '':\n continue\n properties_list.append(location)\n properties_list.append(owener)\n special_properties_list.append({'phone_type': '', 'type': 0})\n special_properties_list.append({'country_code': '', 'type': 0})\n special_properties_list.append({'operator': '', 'type': 0})\n special_properties_list.append({'is_valid': '', 'type': 0})\n d['special_properties'] = special_properties_list\n d['properties'] = properties_list\n result.append(d)\n return result", "def raw_data() -> Dict:\n return {\"neighbourhood\":\"Buttes-Montmartre\",\"room_type\":\"Entire home/apt\",\"minimum_nights\":1.555,\"mois\":2,\"voyageurs\":2.5,\"chambres\":1,\"lits\":1,\"salle_de_bains\":1}", "def device_info(self):\n return {\n \"connections\": {(CONNECTION_NETWORK_MAC, self._mac)},\n \"default_name\": self._device_name,\n \"default_model\": self._device[\"device_model\"],\n \"via_device\": (DOMAIN, self._router.unique_id),\n }", "def retrieve_data(self, device):\n CISCO_USER_MODE_LOGIN_INFO['device_type'] = 'cisco_ios'\n CISCO_USER_MODE_LOGIN_INFO['ip'] = device\n # add try catch\n device = ConnectHandler(**CISCO_USER_MODE_LOGIN_INFO)\n device.find_prompt()\n lldp_connections = device.send_command('show cdp neighbors')\n ram_usage = device.send_command('show processes memory | include Processor')\n cpu_usage = device.send_command('show processes cpu sorted | include CPU')\n errors = device.send_command('show interfaces | include CRC|Fast|Serial|Gig')\n unsed_port = device.send_command('show interfaces | include line protocol is down')\n device.disconnect()\n return lldp_connections, ram_usage, cpu_usage, errors, unsed_port", "def getcontactcongressdict(ccdump):\n d = {}\n for line in ccdump.strip().split('\\n'):\n (district, name, party, dc_office, dc_voice, district_voice, email_form, website) = line.split('\\t')\n dist = ''.join( (district[:2], '-', district[2:]) )\n d[dist] = email_form\n return d", "def retrieve_data(self, device):\n CISCO_USER_MODE_LOGIN_INFO['device_type'] = 'cisco_ios'\n CISCO_USER_MODE_LOGIN_INFO['ip'] = device\n # add try catch\n device = ConnectHandler(**CISCO_USER_MODE_LOGIN_INFO)\n device.find_prompt()\n lldp_connections = device.send_command('show cdp neighbors')\n ram_usage = device.send_command('show processes memory | include Processor')\n cpu_usage = device.send_command('show processes cpu sorted | include CPU')\n errors = device.send_command('show interfaces | include CRC|Fast|Serial|Gig')\n unsed_port = device.send_command('show interfaces | include line protocol is down')\n return lldp_connections, ram_usage, cpu_usage, errors, unsed_port", "def get_bgp_neighbors(self):\n\n router_id = self.device.get_bird_status()['router_id']\n\n field_map = {\n # 'local_as'\n 'asn': 'remote_as',\n 'router_id': 'remote_id',\n 'up': 'is_up',\n 'description': 'description',\n # 'uptime'\n }\n\n rv = {\n 'router_id': router_id,\n 'peers': {},\n }\n\n for peer in self.device.get_peer_status():\n if peer['protocol'] != 'BGP':\n continue\n\n addr = IPAddress(peer['address'])\n\n row = {v: peer.get(k, None) for k, v in field_map.items()}\n row['is_enabled'] = True\n row['address_family'] = {\n 'ipv{}'.format(addr.version): {\n 'received_prefixes': 0,\n 'accepted_prefixes': peer['routes_imported'],\n 'sent_prefixes': peer['routes_exported'],\n }\n }\n rv['peers'][addr] = row\n\n return rv", "def _get_port_info(self, context):\n port = {}\n data = dict()\n old_host_name = ''\n\n if context.original is not None:\n old_host_name = context.original.get('binding:host_id', '')\n\n context = context._port\n port_id = str(context.get('id', ''))\n data['device_owner'] = str(context.get('device_owner', ''))\n # don't create port \"network:floating_ip\n if data['device_owner'] == \"network:floatingip\":\n return None\n data['host_name'] = str(context.get('binding:host_id', ''))\n if len(context.get('fixed_ips', [])) > 0:\n data['subnet_id'] = str(context['fixed_ips'][0].get('subnet_id', ''))\n data['ip_address'] = str(context['fixed_ips'][0].get('ip_address', ''))\n data['device_id'] = str(context.get('device_id', ''))\n data['mac'] = str(context.get('mac_address', ''))\n data['network_id'] = str(context.get('network_id', ''))\n data['admin_state_up'] = context.get('admin_state_up', '')\n data['port_id'] = port_id\n data['tenant_id'] = str(context.get('tenant_id', ''))\n\n context_str = json.dumps(data, sort_keys=True)\n data['md5sum'] = hashlib.md5(context_str).hexdigest()\n\n data['field_not_in_md5'] = ['md5sum']\n data['field_not_in_md5'].append('old_host_name')\n data['old_host_name'] = old_host_name\n\n if data['port_id'] == '':\n LOG.error(_('Get creating port information failed'))\n return None\n\n if port_id != '':\n port[port_id] = data\n return port", "def device_info(self) -> dict:\n return {\n \"connections\": {(DOMAIN, self._unique_id)},\n \"name\": self._host,\n \"manufacturer\": \"IMAP E-Mail\",\n \"sw_version\": VERSION,\n }", "def draggableCircuitResults(self):\n returnedDictionary={}\n self.blochSpheres=self.separatedBlochSpheres()\n returnedDictionary[\"probabilities\"] = self.separatedProbabilities()\n #returnedDictionary[\"blochSpheres\"] = self.separatedBlochSpheres()\n returnedDictionary[\"diracNotation\"] = self.diracNotation()\n returnedDictionary[\"link\"] = \"\"\n returnedDictionary['chart'] = self.graph()\n try:\n returnedDictionary[\"qasm\"] = self.circuit.qasm()\n except Exception:\n #str(Exception)\n returnedDictionary[\"qasm\"] = \"//You are using custom gate\\n//with size more than 2 qubits\\n//sorry, this version doesn't support that\\n//qiskit version 0.19.1\"\n \n if self.API_TOKEN != \"\":\n returnedDictionary[\"link\"] = self.runOnIBMQ()\n \n return returnedDictionary", "def device_info(self):\n if self._mac:\n mac = {(CONNECTION_NETWORK_MAC, self._mac)}\n else:\n mac = {}\n\n device_info = {\n ATTR_IDENTIFIERS: {(DOMAIN, self._item_id)},\n ATTR_NAME: self._name,\n ATTR_CONNECTIONS: mac,\n ATTR_MANUFACTURER: \"Google\",\n ATTR_MODEL: DEV_CLIENT_MODEL,\n \"via_device\": (DOMAIN, self._system_id),\n }\n\n return device_info", "def get_moore_neighbor_info(self, i, j, cell_info) -> dict:\n neighbor_info = []\n for a in range(-1, 2):\n for b in range(-1, 2):\n if not (a == b == 0): \n neighbor_info.append(cell_info[i + a][j + b])\n return neighbor_info", "def device_info(self):\n info = {\n \"connections\": {(CONNECTION_NETWORK_MAC, self._data[\"port-mac-address\"])},\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} {self._data['default-name']}\",\n }\n return info", "def CiscoLoopback():\n cisco = {}\n print \"***Cisco router configuration***\"\n as_number = raw_input(\"\\tInsert BGP AS number: \")\n while True:\n cisco_loopback = {}\n loopback_name = raw_input(\"\\tInsert loopback name (Press 'q' to quit): \")\n if loopback_name is not 'q':\n loopback_address = raw_input(\"\\tLoopback address (eg. x.x.x.x): \")\n loopback_network = raw_input(\"\\tLoopback network (eg. x.x.x.0): \")\n loopback_mask = raw_input(\"\\tLoopback mask (eg. 255.255.255.0): \")\n cisco_loopback[\"address\"] = loopback_address\n cisco_loopback[\"network\"] = loopback_network\n cisco_loopback[\"mask\"] = loopback_mask\n cisco[loopback_name] = cisco_loopback\n elif loopback_name == 'q':\n break\n else:\n print \"You just typed something incorrect!\"\n return (cisco, as_number)", "def other_parse(neighbor):\n mgmt_ip = neighbor[mgmt_ip_s]\n hostname = neighbor[hostname_s].split('.')[0]\n if nxos:\n sysname = neighbor['sysname']\n if sysname != '':\n hostname = sysname\n if mgmt_ip == '':\n mgmt_ip = neighbor['interface_ip']\n software_version = neighbor[version_s]\n if software_version.__contains__(','):\n for software in software_version.split(','):\n if software.__contains__('Version'):\n software_version = software.split('Version')[1].split('REL')[0]\n if software_version.__contains__(':'):\n software_version = software_version.replace(': ', '')\n else:\n software_version = software_version.replace(' ', '')\n break\n elif software_version.__contains__('Version'):\n found_1 = False\n for x in software_version.split(' '):\n if x.__contains__('Version'):\n found_1 = True\n continue\n if found_1:\n software_version = x\n break\n elif software_version.__contains__('version'):\n found_1 = False\n for x in software_version.split(' '):\n if x.__contains__('version'):\n found_1 = True\n continue\n if found_1:\n software_version = x\n break\n platform = neighbor['platform']\n if platform.__contains__('cisco '):\n platform = neighbor['platform'].replace('cisco ', '')\n elif platform.__contains__('Cisco '):\n platform = neighbor['platform'].replace('Cisco ', '')\n else:\n platform = neighbor['platform']\n other = {\n 'hostname': hostname,\n 'ip_address': mgmt_ip,\n 'neighbor': {\n 'hostname': session.hostname,\n 'ip_address': session.ip_address,\n 'remote_intf': neighbor['local_port'],\n 'local_intf': neighbor['remote_port']\n },\n 'software_version': software_version,\n 'model': platform\n }\n self.others.append(other)", "def emulation_bgp_info(self, *args, **kwargs):\n if 'neighbour_keys' in kwargs:\n is_neighbour_handler = True\n neighbour_key_list = kwargs.pop('neighbour_keys')\n\n the_port = None\n for port in args:\n self.bgp_dict[port][\"info\"] = {}\n self.bgp_dict[port][\"bgp_info\"] = {}\n if is_neighbour_handler:\n key_list = neighbour_key_list[args.index(port)]\n else:\n key_list = list(self.bgp_dict[port][\"n_handler\"].keys())\n\n # create bgp info dictionary:\n for key in key_list:\n n_handle = self.bgp_dict[port]['n_handler'][key]\n self.bgp_dict[port][\"info\"][n_handle] = {}\n cfg_name = \"bgp_info_{0}\".format(n_handle.replace(\"bgp_neighbour_\", \"\"))\n self.ixia.puts(\"${0}\".format(n_handle))\n kwargs[\"handle\"] = \"${0}\".format(n_handle)\n\n self.ixia.ixia_emulation_bgp_info(**kwargs)\n assert self.ixia.check_return_code() == \"\"\n self.ixia.set_var(**{cfg_name: \"$return_code\"})\n self.bgp_dict[port][\"bgp_info\"][key] = cfg_name\n self.ixia.puts(\"$return_code\")\n\n # create list of info objects keys:\n _rlist = self.ixia.tcl(\"keylkeys {0}\".format(cfg_name))\n _rlist = _rlist.split(\" \")\n for key_item in _rlist:\n self.bgp_dict[port][\"info\"][n_handle][key_item] = self.ixia.tcl(\"keylget {0} {1}\".format(cfg_name, key_item))\n\n the_port = self.bgp_dict[port]['info']\n\n if the_port is not None:\n return copy.deepcopy(the_port)", "def device_info(self) -> dict[str, any]:\n device_information = {\n \"identifiers\": {(DOMAIN, self._dev_id)},\n \"name\": self._device_name,\n \"manufacturer\": self._manufacturer,\n \"model\": self._model,\n \"sw_version\": self._fw_version,\n }\n\n if self._dev_id != self._api.gateway_id:\n device_information[\"via_device\"] = (DOMAIN, self._api.gateway_id)\n else:\n device_information[\"name\"] = f\"Smile {self._api.smile_name}\"\n\n return device_information", "def getPCAdress(self) -> ghidra.program.model.address.Address:\n ...", "def wap_parse(neighbor):\n mgmt_ip = neighbor[mgmt_ip_s]\n hostname = neighbor[hostname_s].split('.')[0]\n if nxos:\n sysname = neighbor['sysname']\n if sysname != '':\n hostname = sysname\n if mgmt_ip == '':\n mgmt_ip = neighbor['interface_ip']\n software_version = neighbor[version_s]\n platform = neighbor['platform']\n for software in software_version.split(','):\n if software.__contains__('Version'):\n software_version = software.split('Version')[1]\n if software_version.__contains__(':'):\n software_version = software_version.replace(': ', '')\n else:\n software_version = software_version.replace(' ', '')\n break\n if platform.__contains__('cisco '):\n platform = neighbor['platform'].replace('cisco ', '')\n elif platform.__contains__('Cisco '):\n platform = neighbor['platform'].replace('Cisco ', '')\n else:\n platform = neighbor['platform']\n ap = {\n 'hostname': hostname,\n 'ip_address': mgmt_ip,\n 'model': platform,\n 'neighbor': {\n 'hostname': session.hostname,\n 'ip_address': session.ip_address,\n 'remote_intf': neighbor['local_port']\n },\n 'software_version': software_version\n }\n self.waps.append(ap)", "def gather_metric(self):\n device_dict = {}\n # Delete first and last line of output of adb.\n output = self._shell.run(self.COMMAND).stdout\n\n # Example Line, Device Serial Num TAB Phone Status\n # 00bd977c7f504caf\toffline\n if output:\n for line in output.split('\\n'):\n spl_line = line.split('\\t')\n # spl_line[0] is serial, [1] is status. See example line.\n device_dict[spl_line[0]] = spl_line[1]\n\n return {self.DEVICES: device_dict}", "def run(self, params={}) -> dict:\n\n additional_params = {\n \"phone\": params.get(Input.PHONE),\n \"strictness\": params.get(Input.STRICTNESS),\n \"country\": params.get(Input.COUNTRY, \"\"),\n }\n\n self.logger.info(f\"[ACTION LOG] Getting information for Phone: {params.get(Input.PHONE)} \\n\")\n response = self.connection.ipqs_client.ipqs_lookup(PHONE_ENDPOINT, additional_params)\n\n return {\n Output.ACTIVE: response.get(\"active\") or False,\n Output.ACTIVE_STATUS: response.get(\"active_status\") or \"N/A\",\n Output.CARRIER: response.get(\"carrier\") or \"N/A\",\n Output.CITY: response.get(\"city\") or \"N/A\",\n Output.COUNTRY: response.get(\"country\") or \"N/A\",\n Output.DIALING_CODE: response.get(\"dialing_code\") or 0,\n Output.DO_NOT_CALL: response.get(\"do_not_call\") or False,\n Output.FORMATTED: response.get(\"formatted\") or \"null\",\n Output.FRAUD_SCORE: response.get(\"fraud_score\") or 0,\n Output.LEAKED: response.get(\"leaked\") or False,\n Output.LINE_TYPE: response.get(\"line_type\") or \"Unknown\",\n Output.LOCAL_FORMAT: response.get(\"local_format\") or \"null\",\n Output.NAME: response.get(\"name\") or \"N/A\",\n Output.PREPAID: response.get(\"prepaid\") or False,\n Output.RECENT_ABUSE: response.get(\"recent_abuse\") or False,\n Output.REGION: response.get(\"region\") or \"N/A\",\n Output.RISKY: response.get(\"risky\") or False,\n Output.TIMEZONE: response.get(\"timezone\") or \"N/A\",\n Output.VALID: response.get(\"valid\") or False,\n Output.VOIP: response.get(\"VOIP\") or False,\n Output.ZIP_CODE: response.get(\"zip_code\") or \"N/A\",\n }", "def generateNeighborMap(self):\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(np.array([i.replace(\"#\",\" \")\n .split()[0:4] for i in value.index])\n .astype(float))\n\n B=np.array(A[0]).reshape(len(A[0]),4)\n print (B[:,0]+B[:,1])/2\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(value.sum(axis=1).values)\n print A" ]
[ "0.6110668", "0.6060373", "0.59975594", "0.58959544", "0.5839219", "0.57849395", "0.56733996", "0.5624407", "0.56165946", "0.5539645", "0.546498", "0.5460354", "0.5419926", "0.54115295", "0.53951436", "0.5389832", "0.5381396", "0.5365169", "0.53650045", "0.5330386", "0.5319647", "0.53182447", "0.5312783", "0.5301429", "0.52443033", "0.52397573", "0.5216572", "0.5209584", "0.52084595", "0.520631" ]
0.6916648
0
Returns dictionary for CDP neighbor router or switch
def router_sw_parse(neighbor): mgmt_ip = neighbor[mgmt_ip_s] hostname = neighbor[hostname_s].split('.')[0] if hostname.__contains__('('): hostname = hostname.split('(')[0] if nxos: sysname = neighbor['sysname'] if sysname != '': hostname = sysname if mgmt_ip == '': mgmt_ip = neighbor['interface_ip'] software_version = neighbor[version_s] platform = neighbor['platform'] for software in software_version.split(','): if software.__contains__('Version'): software_version = software.split('Version')[1].split('REL')[0] if software_version.__contains__(':'): software_version = software_version.replace(': ', '') else: software_version = software_version.replace(' ', '') break if platform.__contains__('cisco '): platform = neighbor['platform'].replace('cisco ', '') elif platform.__contains__('Cisco '): platform = neighbor['platform'].replace('Cisco ', '') else: platform = neighbor['platform'] router_sw = { 'hostname': hostname, 'ip_address': mgmt_ip, 'remote_intf': neighbor['local_port'], 'local_intf': neighbor['remote_port'], 'software_version': software_version, 'model': platform } self.routers_switches.append(router_sw)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Neighbors(vendor):\n neighbors = {\"cisco\" : \"\", \"juniper\" : \"\", \"vyatta\" : \"\" }\n cisco_neighbors = {}\n juniper_neighbors = {}\n vyatta_neighbors = {}\n while True:\n print \"***\\t\\t%s NEIGHBORS***\" % (vendor)\n n = raw_input(\"\\t\\tNeighbor information (Press any key to continue. Press 'q' to quit): \")\n if n is not 'q':\n neighbor_id = raw_input(\"\\t\\tNeighbor ID (eg. x.x.x.x): \")\n neighbor_as = raw_input(\"\\t\\tNeighbor AS: \")\n if vendor == \"cisco\":\n cisco_neighbors[neighbor_id] = neighbor_as\n neighbors[vendor] = cisco_neighbors\n elif vendor == \"juniper\":\n juniper_neighbors[neighbor_id] = neighbor_as\n neighbors[vendor] = juniper_neighbors\n else:\n vyatta_neighbors[neighbor_id] = neighbor_as\n neighbors[vendor] = vyatta_neighbors\n else:\n break\n return neighbors", "def get_neighbor_info(neigh_ip, nbrhosts):\n\n vm_info = get_vm_with_ip(neigh_ip, nbrhosts)\n nbr_vm = vm_info['vm']\n nbr_intf = vm_info['port']\n\n macs = get_eos_mac(nbrhosts[nbr_vm], nbr_intf)\n\n return {'mac': macs['mac'], \"port\": nbr_intf, \"shell_intf\": macs['shell_intf'], \"vm\": nbr_vm}", "def topo_conf():\n for k in switches.keys():\n switches_ip[k] = IPAddr((192<<24)+int(k))\n switches_mac[k] = EthAddr(\"aa\"+ \"%010d\"%(k))", "def neighbors(self, *args, **kwargs):\n return {\n 'neighbors': [\n {'ip': ip, 'port': port}\n for ip, port in self.neighbors\n ],\n }", "def getTopoConf(self):\n\n topoDict = {}\n topoDict['hosts'] = set()\n topoDict['switches'] = set()\n topoDict['links'] = list()\n # linkID -> [linkStr]\n # Example: L = {'l1': 's1-s2'}\n topoDict['L'] = {}\n\n confDict = self.configSectionMap(\"Topology\")\n if not confDict:\n return {}\n\n linkStr = confDict['links']\n pairs = parseConfStr(linkStr)\n # 'pairs' would be a list of tuples (linkID, switch1, switch2).\n for pair in pairs:\n nodes = (pair[1], pair[2])\n topoDict['links'].append(nodes)\n if nodes[0].startswith('s') and nodes[1].startswith('s'):\n topoDict['L']['l' + pair[0]] = nodes[0] + '-' + nodes[1]\n for node in nodes:\n if node.startswith('h') and node not in topoDict:\n topoDict[node] = {}\n topoDict['hosts'].add(node)\n if node.startswith('s'):\n topoDict['switches'].add(node)\n\n # Obtain IP address information from the config file.\n # Set MAC addresses automatically and sequentially.\n baseAddr = confDict['base_addr'].strip()\n subnetAddr = confDict['subnet_addr'].strip()\n if subnetAddr == 'x':\n subnetAddr = None\n hostAddr = confDict['host_addr'].strip()\n if hostAddr == 'x':\n hostAddr = None\n\n # Check that one of the subnetAddr and hostAddr was 'x'.\n if subnetAddr and hostAddr:\n info(\"**** [G2]: invalid config for subnet or host address; please make sure that either subnet or host address is 'x'; exiting...\\n\")\n return {}\n\n netmaskLen = int(confDict['netmask_length'].strip())\n if netmaskLen == 0:\n netmaskLen = None\n\n assignedIPs = set()\n for hn in topoDict['hosts']:\n num = hn[1:]\n if not subnetAddr:\n currIP = generateIPAddress(baseAddr,num,hostAddr,netmaskLen)\n topoDict[hn]['IP'] = currIP\n assignedIPs.add(currIP)\n if not hostAddr:\n currIP = generateIPAddress(baseAddr,subnetAddr,num,netmaskLen)\n topoDict[hn]['IP'] = currIP\n assignedIPs.add(currIP)\n\n topoDict[hn]['MAC'] = dpid_to_mac(int(num))\n\n # IF 'override_ip' configuration was set, we read the IP addresses that are specified under 'ip_info' config parameter.\n # For the hosts present in the 'ip_info' config, we set the IP to user-specified value.\n overrideIP = confDict['override_ip'].strip()\n if overrideIP == 'yes':\n overrideIPStr = confDict['ip_info'].strip()\n pairs = parseConfStr(overrideIPStr)\n for (hName, hIP) in pairs:\n if hIP in assignedIPs:\n info(\"**** [G2]: override IPs conflict with auto-assigned IPs; exiting....\\n\")\n return {}\n topoDict[hName]['IP'] = hIP\n\n topoDict['flowSpec'] = confDict['flow_paths_file'].strip()\n topoDict['defaultLinkInfo'] = self.parseDefaultLinkInfo(confDict['default_link_info'])\n topoDict['linkInfos'] = self.parseLinkInfoData(confDict['link_info'])\n topoDict['topoJSON'] = os.path.join(self.outPath, confDict['topology_json_outfile'])\n\n return topoDict", "def get_bgp_neighbors(self):\n\n router_id = self.device.get_bird_status()['router_id']\n\n field_map = {\n # 'local_as'\n 'asn': 'remote_as',\n 'router_id': 'remote_id',\n 'up': 'is_up',\n 'description': 'description',\n # 'uptime'\n }\n\n rv = {\n 'router_id': router_id,\n 'peers': {},\n }\n\n for peer in self.device.get_peer_status():\n if peer['protocol'] != 'BGP':\n continue\n\n addr = IPAddress(peer['address'])\n\n row = {v: peer.get(k, None) for k, v in field_map.items()}\n row['is_enabled'] = True\n row['address_family'] = {\n 'ipv{}'.format(addr.version): {\n 'received_prefixes': 0,\n 'accepted_prefixes': peer['routes_imported'],\n 'sent_prefixes': peer['routes_exported'],\n }\n }\n rv['peers'][addr] = row\n\n return rv", "def device_info(self):\n info = {\n \"identifiers\": {\n (\n DOMAIN,\n \"serial-number\",\n self._ctrl.data[\"routerboard\"][\"serial-number\"],\n \"switch\",\n \"NAT\",\n )\n },\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} NAT\",\n }\n return info", "def ip4_route(node):\n output = normalize_text(node.run('ip route')).splitlines()\n result = {}\n for line in output:\n columns = line.split(' ')\n route = result[columns[0]] = {}\n prev = None\n for column in columns:\n if prev == 'dev':\n route['dev'] = column\n if prev == 'via':\n route['via'] = column\n if prev == 'proto':\n route['proto'] = column\n if prev == 'metric':\n route['metric'] = column\n if prev == 'scope':\n route['scope'] = column\n prev = column\n\n return result", "def convert_lsdb_to_neighbor_info(lsdb) :\n \n neidb = []\n nei_dict = {}\n\n # trace router lsa, link type 1 and 4\n for lsa_id, lsa in lsdb.rdb.items() :\n\n rtr = { \"router_id\": lsa_id, \"neighbors\": []}\n neidb.append(rtr)\n nei_dict[lsa_id] = rtr\n\n for rlink in lsa.attached_links :\n if rlink.link_type == P2P_LINK :\n rtr[\"neighbors\"].append({\"router_id\": rlink.link_id,\n \"type\": \"p2p\"})\n\n if rlink.link_type == VIRTUAL_LINK :\n rtr[\"neighbors\"].append({\"router_id\": rlink.link_id,\n \"type\": \"vlink\"})\n\n\n # trace network lsa. in network lsa, attached routers must establish\n # neighbor each other (full mesh).\n for lsa_id, lsa in lsdb.ndb.items() :\n\n for src in lsa.attached_routers :\n for dst in lsa.attached_routers :\n if src == dst : continue\n nei_dict[src][\"neighbors\"].append({\"router_id\": dst,\n \"type\": \"network\"})\n\n # sort\n for rtr in neidb :\n rtr[\"neighbors\"].sort(key = lambda nei: inet_itok(nei[\"router_id\"]))\n neidb.sort(key = lambda rtr: inet_itok(rtr[\"router_id\"]))\n\n return neidb", "def _get_network(self, kind, router=True, vlans=True, vlan_ids=True):\r\n network = {}\r\n macs = self.get('%s_mac' % kind)\r\n network['mac_addresses'] = macs\r\n\r\n if len(macs) == 0:\r\n return network\r\n\r\n if router:\r\n network['router'] = self.get('router', macs[0])\r\n\r\n if vlans:\r\n network['vlans'] = self.get('vlans', macs[0])\r\n\r\n if vlan_ids:\r\n network['vlan_ids'] = self.get('vlan_ids', macs[0])\r\n\r\n return network", "def get_network_info_dict(network):\n info_str = nx.info(network)\n lines = info_str.split('\\n')\n\n info_dict = {}\n for line in lines:\n pair = line.split(':')\n info_dict[pair[0]] = pair[1].strip()\n\n return info_dict", "def ip6_route(node):\n output = normalize_text(node.run('ip -6 route')).splitlines()\n result = {}\n for line in output:\n columns = line.split(' ')\n route = result[columns[0]] = {}\n prev = None\n for column in columns:\n if prev == 'dev':\n route['dev'] = column\n if prev == 'via':\n route['via'] = column\n if prev == 'proto':\n route['proto'] = column\n if prev == 'metric':\n route['metric'] = column\n if prev == 'pref':\n route['pref'] = column\n prev = column\n\n return result", "def __get_network_routes(self):\n routes = []\n\n gws = netifaces.gateways()\n for k in gws.keys():\n if k == 'default':\n continue\n\n\t for r in gws[k]:\n (ip,interface,is_gateway) = r\n\n gw_name = \"{0}\".format(netifaces.address_families[k])\n\n routes.append({\n gw_name : {\n 'ip_address' : ip,\n 'interface' : interface,\n\t\t\t 'default' : is_gateway\n }\n \n }\n )\n\n return routes", "def CiscoLoopback():\n cisco = {}\n print \"***Cisco router configuration***\"\n as_number = raw_input(\"\\tInsert BGP AS number: \")\n while True:\n cisco_loopback = {}\n loopback_name = raw_input(\"\\tInsert loopback name (Press 'q' to quit): \")\n if loopback_name is not 'q':\n loopback_address = raw_input(\"\\tLoopback address (eg. x.x.x.x): \")\n loopback_network = raw_input(\"\\tLoopback network (eg. x.x.x.0): \")\n loopback_mask = raw_input(\"\\tLoopback mask (eg. 255.255.255.0): \")\n cisco_loopback[\"address\"] = loopback_address\n cisco_loopback[\"network\"] = loopback_network\n cisco_loopback[\"mask\"] = loopback_mask\n cisco[loopback_name] = cisco_loopback\n elif loopback_name == 'q':\n break\n else:\n print \"You just typed something incorrect!\"\n return (cisco, as_number)", "def get_network_config2():\n interfaces = get_interfaces()\n ips = [get_ip_address2(ip) for ip in interfaces]\n return dict(zip(interfaces,ips))", "def device_info(self):\n return {\n \"connections\": {(CONNECTION_NETWORK_MAC, self._mac)},\n \"default_name\": self._device_name,\n \"default_model\": self._device[\"device_model\"],\n \"via_device\": (DOMAIN, self._router.unique_id),\n }", "def identify_remote_router(remote_address):\n global DATA\n port = remote_address[1]\n for every_router in DATA[\"neighbor\"]:\n if every_router[2] is port:\n return every_router[0]", "def get_all(cls, session, parent_bgp_router):\n logging.info(\"Retrieving all %s data from switch\", cls.__name__)\n\n uri = \"{0}/{1}/bgp_neighbors\".format(\n parent_bgp_router.base_uri, parent_bgp_router.asn\n )\n\n try:\n response = session.request(\"GET\", uri)\n except Exception as e:\n raise ResponseError(\"GET\", e)\n\n if not utils._response_ok(response, \"GET\"):\n raise GenericOperationError(response.text, response.status_code)\n\n data = json.loads(response.text)\n\n bgp_dict = {}\n # Get all URI elements in the form of a list\n uri_list = session.api.get_uri_from_data(data)\n\n for uri in uri_list:\n # Create a BgpNeighbor object\n ip_or_ifname_or_group_name, bgp_neighbor = BgpNeighbor.from_uri(\n session, parent_bgp_router, uri\n )\n # Load all BGP Neighbor data from within the Switch\n bgp_neighbor.get()\n bgp_dict[ip_or_ifname_or_group_name] = bgp_neighbor\n\n return bgp_dict", "def wap_parse(neighbor):\n mgmt_ip = neighbor[mgmt_ip_s]\n hostname = neighbor[hostname_s].split('.')[0]\n if nxos:\n sysname = neighbor['sysname']\n if sysname != '':\n hostname = sysname\n if mgmt_ip == '':\n mgmt_ip = neighbor['interface_ip']\n software_version = neighbor[version_s]\n platform = neighbor['platform']\n for software in software_version.split(','):\n if software.__contains__('Version'):\n software_version = software.split('Version')[1]\n if software_version.__contains__(':'):\n software_version = software_version.replace(': ', '')\n else:\n software_version = software_version.replace(' ', '')\n break\n if platform.__contains__('cisco '):\n platform = neighbor['platform'].replace('cisco ', '')\n elif platform.__contains__('Cisco '):\n platform = neighbor['platform'].replace('Cisco ', '')\n else:\n platform = neighbor['platform']\n ap = {\n 'hostname': hostname,\n 'ip_address': mgmt_ip,\n 'model': platform,\n 'neighbor': {\n 'hostname': session.hostname,\n 'ip_address': session.ip_address,\n 'remote_intf': neighbor['local_port']\n },\n 'software_version': software_version\n }\n self.waps.append(ap)", "def other_parse(neighbor):\n mgmt_ip = neighbor[mgmt_ip_s]\n hostname = neighbor[hostname_s].split('.')[0]\n if nxos:\n sysname = neighbor['sysname']\n if sysname != '':\n hostname = sysname\n if mgmt_ip == '':\n mgmt_ip = neighbor['interface_ip']\n software_version = neighbor[version_s]\n if software_version.__contains__(','):\n for software in software_version.split(','):\n if software.__contains__('Version'):\n software_version = software.split('Version')[1].split('REL')[0]\n if software_version.__contains__(':'):\n software_version = software_version.replace(': ', '')\n else:\n software_version = software_version.replace(' ', '')\n break\n elif software_version.__contains__('Version'):\n found_1 = False\n for x in software_version.split(' '):\n if x.__contains__('Version'):\n found_1 = True\n continue\n if found_1:\n software_version = x\n break\n elif software_version.__contains__('version'):\n found_1 = False\n for x in software_version.split(' '):\n if x.__contains__('version'):\n found_1 = True\n continue\n if found_1:\n software_version = x\n break\n platform = neighbor['platform']\n if platform.__contains__('cisco '):\n platform = neighbor['platform'].replace('cisco ', '')\n elif platform.__contains__('Cisco '):\n platform = neighbor['platform'].replace('Cisco ', '')\n else:\n platform = neighbor['platform']\n other = {\n 'hostname': hostname,\n 'ip_address': mgmt_ip,\n 'neighbor': {\n 'hostname': session.hostname,\n 'ip_address': session.ip_address,\n 'remote_intf': neighbor['local_port'],\n 'local_intf': neighbor['remote_port']\n },\n 'software_version': software_version,\n 'model': platform\n }\n self.others.append(other)", "def emulation_bgp_info(self, *args, **kwargs):\n if 'neighbour_keys' in kwargs:\n is_neighbour_handler = True\n neighbour_key_list = kwargs.pop('neighbour_keys')\n\n the_port = None\n for port in args:\n self.bgp_dict[port][\"info\"] = {}\n self.bgp_dict[port][\"bgp_info\"] = {}\n if is_neighbour_handler:\n key_list = neighbour_key_list[args.index(port)]\n else:\n key_list = list(self.bgp_dict[port][\"n_handler\"].keys())\n\n # create bgp info dictionary:\n for key in key_list:\n n_handle = self.bgp_dict[port]['n_handler'][key]\n self.bgp_dict[port][\"info\"][n_handle] = {}\n cfg_name = \"bgp_info_{0}\".format(n_handle.replace(\"bgp_neighbour_\", \"\"))\n self.ixia.puts(\"${0}\".format(n_handle))\n kwargs[\"handle\"] = \"${0}\".format(n_handle)\n\n self.ixia.ixia_emulation_bgp_info(**kwargs)\n assert self.ixia.check_return_code() == \"\"\n self.ixia.set_var(**{cfg_name: \"$return_code\"})\n self.bgp_dict[port][\"bgp_info\"][key] = cfg_name\n self.ixia.puts(\"$return_code\")\n\n # create list of info objects keys:\n _rlist = self.ixia.tcl(\"keylkeys {0}\".format(cfg_name))\n _rlist = _rlist.split(\" \")\n for key_item in _rlist:\n self.bgp_dict[port][\"info\"][n_handle][key_item] = self.ixia.tcl(\"keylget {0} {1}\".format(cfg_name, key_item))\n\n the_port = self.bgp_dict[port]['info']\n\n if the_port is not None:\n return copy.deepcopy(the_port)", "def kitero():\n return dict(hostname=hostname)", "def get_connections(capture):\n ip_dict = dict()\n for pkt in capture:\n\n if not hasattr(pkt, \"ip\") and not hasattr(pkt, \"ipv6\"):\n continue\n\n protocol = pkt.highest_layer\n\n tcp_dst_port = None\n tcp_src_port = None\n if hasattr(pkt, \"tcp\"):\n tcp_src_port = pkt.tcp.srcport\n tcp_dst_port = pkt.tcp.dstport\n\n if hasattr(pkt, \"ip\"):\n if pkt.ip.src.startswith(\"192.168.178\"):\n ip, dst = pkt.ip.src, pkt.ip.dst\n else:\n ip, dst = pkt.ip.dst, pkt.ip.src\n tcp_dst_port = tcp_src_port\n else:\n # TODO: how to discern src and dst in IPv6?\n ip, dst = pkt.ipv6.src, pkt.ipv6.dst\n\n ip = \"%s\" % ip\n dkey = (\n \"%s\" % protocol,\n int(tcp_dst_port) if tcp_dst_port else None,\n \"%s\" % dst\n )\n if ip not in ip_dict:\n ip_dict[ip] = {dkey: 1}\n else:\n ip_dict[ip][dkey] = ip_dict[ip].get(dkey, 0) + 1\n return ip_dict", "def get_socket_dictionary(self) -> dict:\n socket_dictionary = {\n \"action\": self.action,\n \"car_id\": self.car_id,\n \"username\": self.username,\n \"password\": self.password,\n \"usertoken\": self.usertoken,\n \"info_date_time\": self.info_date_time,\n \"current_location\": self.current_location,\n \"engineer_bluetooth\": self.engineer_bluetooth,\n \"engineer_code\": self.engineer_code\n }\n return socket_dictionary", "def draggableCircuitResults(self):\n returnedDictionary={}\n self.blochSpheres=self.separatedBlochSpheres()\n returnedDictionary[\"probabilities\"] = self.separatedProbabilities()\n #returnedDictionary[\"blochSpheres\"] = self.separatedBlochSpheres()\n returnedDictionary[\"diracNotation\"] = self.diracNotation()\n returnedDictionary[\"link\"] = \"\"\n returnedDictionary['chart'] = self.graph()\n try:\n returnedDictionary[\"qasm\"] = self.circuit.qasm()\n except Exception:\n #str(Exception)\n returnedDictionary[\"qasm\"] = \"//You are using custom gate\\n//with size more than 2 qubits\\n//sorry, this version doesn't support that\\n//qiskit version 0.19.1\"\n \n if self.API_TOKEN != \"\":\n returnedDictionary[\"link\"] = self.runOnIBMQ()\n \n return returnedDictionary", "def device_info(self):\n info = {\n \"connections\": {(CONNECTION_NETWORK_MAC, self._data[\"port-mac-address\"])},\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} {self._data['default-name']}\",\n }\n return info", "def get_gateway(self, node):\n router = None\n\n for ip, n in self._nodes_dict.items():\n\n if n is node:\n continue\n\n if n.is_gateway:\n return ip\n\n if not router and n.is_router:\n router = ip\n\n return router", "def networks(self) -> dict:\n return self.data[\"networks\"]", "def get_lldp_neighbors(self):\n lldp = {}\n neighbors_detail = self.get_lldp_neighbors_detail()\n for interface, entries in neighbors_detail.items():\n lldp[interface] = []\n for lldp_entry in entries:\n hostname = lldp_entry[\"remote_system_name\"]\n if not hostname:\n hostname = lldp_entry[\"remote_chassis_id\"]\n lldp[interface].append({\n \"port\": lldp_entry[\"remote_port\"],\n \"hostname\": hostname\n })\n\n return lldp", "def phone_parse(neighbor):\n mgmt_ip = neighbor[mgmt_ip_s]\n hostname = neighbor[hostname_s].split('.')[0]\n if nxos:\n sysname = neighbor['sysname']\n if sysname != '':\n hostname = sysname\n if mgmt_ip == '':\n mgmt_ip = neighbor['interface_ip']\n l_intf = neighbor['local_port']\n intf = re.findall(r'.{2}', l_intf)[0] + re.findall(r'\\d.+', l_intf)[0]\n macreg = re.findall(r'.{4}', hostname.replace('SEP', ''))\n mac_address = f'{macreg[0]}.{macreg[1]}.{macreg[2]}'.lower()\n voice_vlan = 'None'\n software_version = neighbor[version_s].replace('.loads', '')\n platform = neighbor['platform']\n for switchport in switchports:\n if switchport['interface'] == intf:\n for mac_addr in mac_addrs:\n if mac_addr['vlan'] == switchport['voice_vlan']:\n voice_vlan = mac_addr['vlan']\n break\n break\n if platform.__contains__('Cisco IP Phone'):\n platform = neighbor['platform'].replace('Cisco IP Phone ', '')\n else:\n platform = neighbor['platform']\n phone = {\n 'hostname': hostname,\n 'neighbor': {\n 'hostname': session.hostname,\n 'ip_address': session.ip_address,\n 'remote_intf': l_intf\n },\n 'ip_address': mgmt_ip,\n 'mac_addr': mac_address,\n 'voice_vlan': voice_vlan,\n 'software_version': software_version,\n 'model': platform\n }\n self.phones.append(phone)" ]
[ "0.63590497", "0.6126241", "0.61201745", "0.60082996", "0.5985307", "0.59344363", "0.5915709", "0.586581", "0.5772417", "0.5727852", "0.57097715", "0.5701715", "0.569434", "0.5683343", "0.564236", "0.56369084", "0.56235886", "0.5598898", "0.55868256", "0.5574442", "0.5525679", "0.55181444", "0.5499835", "0.5489069", "0.5459118", "0.54451", "0.5436913", "0.5420169", "0.5411269", "0.5400971" ]
0.6161124
1