query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Merge two volumes into one. | def merge_volumes(volume1, volume2):
if not isinstance(volume1, Volume) or \
not isinstance(volume2, Volume):
raise TypeError()
lowercorner1, uppercorner1 = volume1.get_corners()
lowercorner2, uppercorner2 = volume2.get_corners()
lowercorner = (min(lowercorner1[0], lowercorner2[0]),
min(lowercorner1[1], lowercorner2[1]),
min(lowercorner1[2], lowercorner2[2]))
uppercorner = (max(uppercorner1[0], uppercorner2[0]),
max(uppercorner1[1], uppercorner2[1]),
max(uppercorner1[2], uppercorner2[2]))
return Volume(None, lowercorner, uppercorner) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge(): #Status: WIP\r\n pass",
"def sync(self, **kwargs):\n volume_1 = kwargs['NAMES'][0]\n volume_2 = kwargs['NAMES'][1]\n path1 = f\"{self.cm.find_name(name=volume_1)[0]['path']}/{volume_1}/\"\n path2 = f\"{self.cm.find_name(name=volume_2)[0]['path']}/{volume_2}/\"\n os.system(f\"rsync -avzh {path2} {path1}\")\n kwargs1 = {'NAME': volume_1, 'key': \"sync_with\", 'value': volume_2}\n volume_info1 = self.add_tag(**kwargs1)\n result = [volume_info1]\n return result",
"def svn_fs_merge(*args):\r\n return _fs.svn_fs_merge(*args)",
"def merge_snapshot(self):\n disks = self.get_disks()\n disk_files_tree = []\n for disk in disks:\n disk_files_tree += (DiskImageHelper.get_backing_files_tree(disk.file))\n merge_snapshot_cmd = \"virsh blockpull --domain {domain_name} {disk_path} --wait\".format(\n domain_name=self.name, disk_path=disk.file)\n\n logging.debug(\"Executing: '%s'\" % merge_snapshot_cmd)\n logging.info(\"Merging base to new snapshot for '%s' device\" % disk.device)\n\n # launch command\n merge_snapshot_cmds = shlex.split(merge_snapshot_cmd)\n merge_snapshot = subprocess.Popen(merge_snapshot_cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=False)\n\n # wait to terminate\n status = merge_snapshot.wait()\n\n if status != 0:\n logging.error(\"Error for '%s': %s\" % (merge_snapshot_cmd, merge_snapshot.stderr.read()))\n logging.critical(\"{exe} returned {status} state\".format(exe=merge_snapshot_cmds[0], status=status))\n raise Exception(\"blockpull didn't work properly\")\n\n current_disk_files = [disk.file for disk in self.get_disks()]\n\n # remove old disk device files without current ones\n for file in [disk_file_tree for disk_file_tree in disk_files_tree if disk_file_tree not in current_disk_files]:\n logging.info(\"Removing old disk file: '%s'\" % file)\n os.remove(file)",
"def merge_clouds(commande):\n commande+=\" -merge_clouds -save_clouds\"\n subprocess.call(commande)\n return True",
"def merge(self, ref, *args):\n return self.cmd('merge', ref, *args)",
"def apply_merge(volume, volumes, merge_directions):\n \n def get_new_volume(volume, lowcorner):\n v2 = get_volume(lowcorner)\n if v2 != None:\n return merge_volumes(volume, v2)\n else:\n return volume\n\n def get_volume(lowcorner):\n if not isinstance(lowcorner, tuple):\n raise TypeError() # required for \"==\"\n\n for i in range(len(volumes)):\n v = volumes[i]\n if v.p1 == lowcorner:\n logger.debug(\"\\tMerging volume with low corner %s\", v.p1)\n return volumes.pop(i)\n \n logger.warning(\"\\tNo volume to merge with\")\n return None\n\n import copy\n\n logger.debug(\"\\t== Function == apply_merge\")\n\n p1, p2 = volume.get_corners()\n logger.debug(\"\\tTargetting volume with low corner %s\", p1)\n\n if len(merge_directions) == 1:\n if Axes.k in merge_directions:\n p1_target = list(copy.deepcopy(p1))\n p1_target[Axes.k.value] = p2[Axes.k.value]\n new_volume = get_new_volume(volume, tuple(p1_target))\n\n elif Axes.j in merge_directions:\n p1_target = list(copy.deepcopy(p1))\n p1_target[Axes.j.value] = p2[Axes.j.value]\n new_volume = get_new_volume(volume, tuple(p1_target))\n\n elif Axes.i in merge_directions:\n p1_target = list(copy.deepcopy(p1))\n p1_target[Axes.i.value] = p2[Axes.i.value]\n new_volume = get_new_volume(volume, tuple(p1_target))\n\n elif len(merge_directions) == 2:\n logger.debug(\"\\tMerge directions: %s\", merge_directions)\n axis1, axis2 = merge_directions\n\n p1_target = list(copy.deepcopy(p1))\n p1_target[axis1.value] = p2[axis1.value]\n volume_axis1 = get_new_volume(volume, tuple(p1_target))\n\n new_volume_axis1 = apply_merge(volume_axis1, volumes, [axis2])\n new_volume_axis2 = apply_merge(volume, volumes, [axis2])\n new_volume = merge_volumes(new_volume_axis1, new_volume_axis2)\n\n elif len(merge_directions) == 3:\n logger.debug(\"\\tMerge directions %s\", merge_directions)\n axis1, axis2, axis3 = merge_directions\n \n p1_target = list(copy.deepcopy(p1))\n p1_target[axis1.value] = p2[axis1.value]\n volume_axis1 = get_new_volume(volume, tuple(p1_target))\n\n new_vol1 = apply_merge(volume, volumes, [axis2, axis3])\n new_vol2 = apply_merge(volume_axis1, volumes, [axis2, axis3])\n new_volume = merge_volumes(new_vol1, new_vol2)\n\n else:\n raise ValueError()\n\n logger.debug(\"\\tEnd\")\n return new_volume",
"def union(self, other: Catalog) -> Catalog:\n cat = self.copy()\n oth_cp = other.copy()\n\n for k in oth_cp.keys():\n for ver_id, version in oth_cp[k].versions.items():\n cat[k][ver_id] = version\n return cat",
"def merge():\n click.echo(\"Not implemented yet. In the future, this command will be used for merging models.\")\n sys.exit(-2)",
"def merge_fs(fs1,fs2):\n # This function merges fs2 into fs1, changing fs1 in-place\n # It's a cheaper and faster alternative of unify(), which will check\n # all the similarities and differences between fs1 and fs2. But this one\n # just assumes that fs2 and fs1 does not have any entries in common\n # NOTICE: In Templates.lex we cannot guarantee there is no overlap\n # so only use this function when it IS clear.\n for k in fs2.keys():\n if fs1.has_key(k):\n merge_fs(fs1[k],fs2[k])\n else:\n fs1[k] = fs2[k]\n return",
"def variant_add(v1: dict, v2: dict) -> Dict[str, Any]:\n left = set(v1.keys()).difference(v2.keys())\n right = set(v2.keys()).difference(v1.keys())\n joint = set(v1.keys()) & set(v2.keys())\n\n # deal with __migrator: ordering\n if \"__migrator\" in v2:\n ordering = v2[\"__migrator\"].get(\"ordering\", {})\n operation = v2[\"__migrator\"].get(\"operation\")\n # handle special operations\n if operation:\n return VARIANT_OP[operation](v1, v2)\n else:\n ordering = {}\n\n # special keys\n if \"__migrator\" in right:\n right.remove(\"__migrator\")\n\n # special keys in joint\n special_variants = {}\n if \"pin_run_as_build\" in joint:\n # For run_as_build we enforce the migrator's pin\n # TODO: should this just be a normal ordering merge, favoring more exact pins?\n joint.remove(\"pin_run_as_build\")\n special_variants[\"pin_run_as_build\"] = {\n **v1[\"pin_run_as_build\"],\n **v2[\"pin_run_as_build\"],\n }\n\n if \"zip_keys\" in joint:\n # zip_keys is a bit weird to join on as we don't have a particularly good way of identifying\n # a block. Longer term having these be named blocks would make life WAY simpler\n # That does require changes to conda-build itself though\n #\n # A zip_keys block is deemed mergeable if zkₛ,ᵢ ⊂ zkₘ,ᵢ\n zk_out = []\n zk_l = {frozenset(e) for e in v1[\"zip_keys\"]}\n zk_r = {frozenset(e) for e in v2[\"zip_keys\"]}\n\n for zk_r_i in sorted(zk_r, key=lambda x: -len(x)):\n for zk_l_i in sorted(zk_l, key=lambda x: -len(x)):\n # Merge the longest common zk first\n if zk_l_i.issubset(zk_r_i):\n zk_l.remove(zk_l_i)\n zk_r.remove(zk_r_i)\n zk_out.append(zk_r_i)\n break\n else:\n # Nothing to do\n pass\n\n zk_out.extend(zk_l)\n zk_out.extend(zk_r)\n zk_out = sorted(\n [sorted(zk) for zk in zk_out], key=lambda x: (len(x), str(x))\n )\n\n joint.remove(\"zip_keys\")\n special_variants[\"zip_keys\"] = zk_out\n\n joint_variant = {}\n for k in joint:\n v_left, v_right = ensure_list(v1[k]), ensure_list(v2[k])\n joint_variant[k] = variant_key_add(\n k, v_left, v_right, ordering=ordering.get(k, None)\n )\n\n out = {\n **toolz.keyfilter(lambda k: k in left, v1),\n **toolz.keyfilter(lambda k: k in right, v2),\n **joint_variant,\n **special_variants,\n }\n\n return out",
"def merge(self,other):\n if self.empty: \n self.copy(other)\n return self\n elif other.empty:\n return self\n if(other.vmin < self.vmin):\n self.vmin = other.vmin\n if(other.vmax > self.vmax):\n self.vmax = other.vmax\n\n nA = float(self.vcount)\n nB = float(other.vcount)\n nAB = nA*nB\n nAA = float(self.vcountsq)\n nBB = float(other.vcountsq)\n nX = nA+nB\n nXX = nX**2 #nAA+nBB+2*nAB #nX**2 # actually (nA+nB)^2 = (nAA+nBB+2*nAB)\n nXXX = nXX*nX\n self.vcount = nX\n self.vcountsq = nXX\n\n self.vsum += other.vsum;\n\n # merge of mean and m2\n delta = other.vmean-self.vmean;\n delta2 = delta**2\n delta3 = delta**3\n delta4 = delta**4\n self.vmean += delta*nB/nA\n self.vm2 += other.vm2 + delta2*(nAB/nX)\n self.vm3 += other.vm3 + delta3*(nAB*(nA-nB))/nXX + 3*delta*(nA*other.vm2-nB*self.vm2)/nX\n self.vm4 += other.vm4 + delta4*(nAB*(nAA-nAB+nBB))/nXXX + 6*delta2*(nAA*other.vm2+nBB*self.vm2)/nXX + 4*delta*(nA*other.vm3-nB*self.vm3)/nX\n self.dirty = True\n return self",
"def join(self, other: Catalog) -> Catalog:\n assert len(self.intersection(other)) == 0\n return self.union(other)",
"def test_volume_extend(self, volume, volumes_steps):\n volumes_steps.extend_volume(volume.name)",
"def merge(self, other):\n extras = other.difference(self)\n if len(extras) > 0:\n self.update(extras)\n self.reset()\n return True\n return False",
"def merge(*args):\n return _libsbml.Unit_merge(*args)",
"def merge(file_a, file_b):\n res = []\n a = read_bin_file_in_full(file_a)\n b = read_bin_file_in_full(file_b)\n pa = 0\n pb = 0\n while pa < len(a) and pb < len(b):\n if a[pa] < b[pb]:\n res.append(a[pa])\n pa += 1\n else:\n res.append(b[pb])\n pb += 1\n while pa < len(a):\n res.append(a[pa])\n pa += 1\n while pb < len(b):\n res.append(b[pb])\n pb += 1\n\n return write_to_temp(res)",
"def merge_assets(self, other):\n for asset in other.asset:\n if find_elements(root=self.asset, tags=asset.tag,\n attribs={\"name\": asset.get(\"name\")}, return_first=True) is None:\n self.asset.append(asset)",
"def merge(a,b):\n c = a.copy()\n c.update(b)\n return c",
"def merge(self, source_vector_clock, source_id, destination_id):\n self.vectors[destination_id].merge(source_vector_clock.vectors[source_id])",
"def merge_clouds(pc1, pc2):\n pc_out = pc1\n for pt in pc2:\n pc_out.append(pt)\n\n return pc_out",
"def _merge(self):\n raise NotImplementedError",
"def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)",
"def merge(self, first, second):\n return second if self.failed(first) else first",
"def hallucinate_merge(self, other):\n res = CompleteVec(None,None,self.max_num_samples)\n res.needs_update = True\n return res",
"def merge_df(self, first_df, second_df):\n self.file = pd.merge(second_df, first_df, on=[(\"NAME\", \"TYPE\")])",
"def imprint_merge_each_group():\r\n \r\n G = cubit.get_entities(\"group\")\r\n for gid in G:\r\n vid = cubit.get_group_volumes(gid)\r\n if len(vid)>1:\r\n cubit.cmd(f\"imprint vol {list_to_str(vid)}\")\r\n cubit.cmd(f\"merge vol {list_to_str(vid)}\")",
"def _helm_merge(a, b):\n if not (isinstance(b, dict) and isinstance(a, dict)):\n # if either one is not a dict,\n # there's no merging to do: use 'b'\n return b\n for key, value in b.items():\n if key in a:\n a[key] = _helm_merge(a[key], value)\n else:\n a[key] = value\n return a",
"def merge(self, other_btree):\n pass",
"def volumes(self):"
]
| [
"0.58557945",
"0.5811954",
"0.5793941",
"0.57914466",
"0.56307393",
"0.5590437",
"0.5583113",
"0.55503875",
"0.5532684",
"0.54878396",
"0.54819626",
"0.54766107",
"0.54480654",
"0.5441496",
"0.542764",
"0.5426027",
"0.54059726",
"0.5367306",
"0.5359285",
"0.5351776",
"0.5347575",
"0.53410673",
"0.5336507",
"0.5336107",
"0.5329887",
"0.5328802",
"0.532762",
"0.5322807",
"0.53190905",
"0.5305875"
]
| 0.7455013 | 0 |
Alias of hypercubes_overlap. We do not verify that it is included but by definition of the problem if volume crosses outfile then volume in outfile. | def included_in(volume, outfile):
return hypercubes_overlap(volume, outfile) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hypercubes_overlap(hypercube1, hypercube2):\n if not isinstance(hypercube1, Volume) or \\\n not isinstance(hypercube2, Volume):\n raise TypeError()\n\n lowercorner1, uppercorner1 = hypercube1.get_corners()\n lowercorner2, uppercorner2 = hypercube2.get_corners()\n nb_dims = len(uppercorner1)\n \n for i in range(nb_dims):\n if not uppercorner1[i] > lowercorner2[i] or \\\n not uppercorner2[i] > lowercorner1[i]:\n return False\n\n return True",
"def check_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_overlaps - something went wrong... no data?\")\n pass",
"def is_overlap(box_1, box_2, iou_th):\n return box_1.iou(box_2) > iou_th",
"def overlap_variants_and_motifs(motifs, variants, output_file: str):\r\n # TODO: if necessary to make BedTool again, change architecture, figure out why one file okay and the other not\r\n mot = pybedtools.BedTool(motifs)\r\n mot.intersect(variants, wo=True, header=True).saveas(output_file)\r\n return",
"def can_overlap(self):\n return False",
"def test_compute_overlap(self):\n # box1 contained in box2\n box1 = ((1, 2), (1, 2), (1, 2))\n box2 = ((1, 3), (1, 3), (1, 3))\n mapping = {box1: [1, 2, 3, 4], box2: [1, 2, 3, 4, 5]}\n # box1 in box2, so complete overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box1, box2), 1)\n # 4/5 atoms in box2 in box1, so 80 % overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box2, box1), .8)",
"def overlap(self, *args, type='bbox'):\n return self.phy2abs.overlap(*args, type=type)",
"def check_sim_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.sim_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.sim_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.sim_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.sim_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.sim_spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass",
"def showIntersections(self, ifOverlapped=True):\n if ifOverlapped:\n combined = np.sum(self.Intersections2D)\n combined.show()\n else:\n for c2 in self.Intersections2D:\n c2.show()",
"def SH_FindOverlap(xcenter, ycenter, xlength, ylength, xp_corner, yp_corner):\n\n areaClipped = 0.0\n top = ycenter + 0.5 * ylength\n bottom = ycenter - 0.5 * ylength\n\n left = xcenter - 0.5 * xlength\n right = xcenter + 0.5 * xlength\n\n nVertices = 4 # input detector pixel vertices\n MaxVertices = 9\n # initialize xPixel, yPixel to the detector pixel corners.\n # xPixel,yPixel will become the clipped polygon vertices inside the cube pixel\n # xnew,ynew xpixel and ypixel of size MaxVertices\n\n xPixel = []\n yPixel = []\n\n xnew = []\n ynew = []\n\n for j in range(0, 9):\n xnew.append(0.0)\n ynew.append(0.0)\n xPixel.append(0.0)\n yPixel.append(0.0)\n\n\n # Xpixel, YPixel closed (5 corners)\n for i in range(0, 4):\n xPixel[i] = xp_corner[i]\n yPixel[i] = yp_corner[i]\n xPixel[4] = xp_corner[0]\n yPixel[4] = yp_corner[0]\n\n\n for i in range(0, 4): # 0:left, 1: right, 2: bottom, 3: top\n nVertices2 = 0\n for j in range(0, nVertices):\n x1 = xPixel[j]\n y1 = yPixel[j]\n x2 = xPixel[j + 1]\n y2 = yPixel[j + 1]\n condition = calcCondition(i, x1, y1, x2, y2, left, right, top, bottom)\n x = 0\n y = 0\n\n if condition == 1:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2);\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n\n elif condition == 2:\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n elif condition == 3:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2)\n\n#\tcondition == 4: points outside\n# Done looping over J corners\n nVertices2 = addpoint(xnew[0], ynew[0], xnew, ynew, nVertices2) # close polygon\n\n if nVertices2 > MaxVertices:\n raise Error2DPolygon(\" Failure in finding the clipped polygon, nVertices2 > 9 \")\n\n\n nVertices = nVertices2 - 1;\n\n for k in range(0, nVertices2):\n xPixel[k] = xnew[k]\n yPixel[k] = ynew[k]\n\n# done loop over top,bottom,left,right\n nVertices = nVertices + 1\n\n\n if nVertices > 0:\n areaClipped = FindAreaPoly(nVertices, xPixel, yPixel);\n\n\n return areaClipped;",
"def get_crossed_outfiles(buffer_index, buffers, outfiles):\n crossing = list()\n buffer_of_interest = buffers[buffer_index]\n for outfile in outfiles.values():\n if hypercubes_overlap(buffer_of_interest, outfile):\n crossing.append(outfile)\n return crossing",
"def overlap_conflict(out, *inputs):\n from . import _bh\n\n for i in inputs:\n if not np.isscalar(i):\n if np.may_share_memory(out, i) and not _bh.same_view(out, i):\n return True\n return False",
"def overlap_measure(self, other_cube):\n return np.count_nonzero(np.logical_and(self.mask, other_cube.mask))",
"def arglocs_overlap(*args):\n return _ida_hexrays.arglocs_overlap(*args)",
"def intersects(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n return False",
"def do_overlap(ds,iterno,algo=\"FordRollett\",ignore=1,unit_weights=False,top=None,bottom=None,\n exact_angles=None,drop_frames='',drop_tubes = '', use_gains = [],do_sum=False,\n do_interp = False, dumpfile=None):\n import time\n from Reduction import overlap,interpolate\n # Get sensible values\n if top is None: top = ds.shape[1]-1\n if bottom is None: bottom = 0\n\n # Vertically integrate\n # Dimensions are step,vertical,tube\n\n b = ds[:,bottom:top,:].intg(axis=1).get_reduced()\n\n # Determine pixels per tube interval\n\n tube_pos = ds.axes[-1]\n if tube_pos.ndim == 2: #very old data, just take one slice\n tube_pos = tube_pos[0]\n tubesep = abs(tube_pos[0]-tube_pos[-1])/(len(tube_pos)-1)\n tube_steps = ds.axes[0]\n bin_size = abs(tube_steps[0]-tube_steps[-1])/(len(tube_steps)-1)\n pixel_step = int(round(tubesep/bin_size))\n bin_size = tubesep/pixel_step\n print '%f tube separation, %d steps before overlap, ideal binsize %f' % (tubesep,pixel_step,bin_size)\n dropped_frames = parse_ignore_spec(drop_frames)\n dropped_tubes = parse_ignore_spec(drop_tubes)\n\n # Drop frames from the end as far as we can\n\n for empty_no in range(b.shape[0]-1,0,-1):\n print \"Trying %d\" % empty_no\n if empty_no not in dropped_frames:\n break\n dropped_frames.remove(empty_no)\n print \"All frames after %d empty so dropped\" % empty_no\n b = b[:empty_no+1]\n\n # Do we need to add dummy missing frames?\n\n extra_steps = b.shape[0]%pixel_step\n if extra_steps > 0:\n start_drop = b.shape[0]\n # gumpy has no resize\n new_b = zeros([((b.shape[0]/pixel_step)+1)*pixel_step,b.shape[1]])\n new_b[:b.shape[0]] = b\n b = new_b\n extra_dropped_frames = range(start_drop,b.shape[0])\n print \"Filled out array from %d to %d with dummy frames\" % (start_drop,b.shape[0])\n dropped_frames |= set(extra_dropped_frames)\n else:\n extra_dropped_frames = []\n \n # Zero out dropped frames\n\n print 'Dropped frames: ' + `dropped_frames`\n b_zeroed = copy(b)\n\n # Make a simple array to work out which sectors are missing frames\n\n frame_check = array.ones(b.shape[0])\n\n # Zero out all matching steps\n\n all_zeroed = copy(b)\n region_starts = [a*pixel_step for a in range(b.shape[0]/pixel_step)]\n for frame_no in dropped_frames:\n b_zeroed[frame_no] = 0\n b_zeroed.var[frame_no] = 0\n dropped_step = frame_no%pixel_step\n ref_drop_steps = [r+dropped_step for r in region_starts]\n for drop_step in ref_drop_steps:\n frame_check[drop_step] = 0\n all_zeroed[drop_step] = 0\n all_zeroed.var[drop_step] = 0\n\n # Now drop out whole detectors\n\n for tube_no in dropped_tubes:\n b_zeroed[:,tube_no] = 0\n b_zeroed.var[:,tube_no] = 0\n all_zeroed[:,tube_no] = 0\n all_zeroed.var[:,tube_no] = 0\n\n # Interpolation. If requested, we first interpolate the data onto a regular angular grid,\n # which is the assumption underlying the regain calculation. However, as the deviations\n # from regularity are usually minor, this step can usually be skipped\n \n if do_interp:\n if exact_angles != None:\n h_correction = read_horizontal_corrections(exact_angles)\n else:\n h_correction = None\n \n all_zeroed = interpolate.interpolate(all_zeroed,dropped_frames,tube_steps,tube_steps[0],\n bin_size,len(tube_pos),h_correction=h_correction)\n b_zeroed = interpolate.interpolate(b_zeroed,dropped_frames,tube_steps,tube_steps[0],\n bin_size,len(tube_pos),h_correction=h_correction)\n\n \n c = all_zeroed.reshape([b.shape[0]/pixel_step,pixel_step,b.shape[-1]])\n frame_check = frame_check.reshape([b.shape[0]/pixel_step,pixel_step])\n frame_sum = frame_check.intg(axis=1)\n print `b.shape` + \"->\" + `c.shape`\n print 'Relative no of frames: ' + `frame_sum`\n\n # Output the starting data for external use\n\n if dumpfile is not None:\n dump_tube_intensities(dumpfile,raw=b_zeroed)\n if len(use_gains)==0: #we have to calculate them\n if c.shape[0] == 1: #can't be done, there is no overlap\n return None,None,None,None,None\n if do_sum:\n # sum the individual unoverlapped sections. Reshape is required as the\n # intg function removes the dimension\n d = c.intg(axis=1).reshape([c.shape[0],1,c.shape[2]]) #array of [rangeno,stepno,tubeno]\n # normalise by the number of frames in each section\n else:\n d = c #no op\n # Note gumpy can't do transposes of more than two axes at once\n e = d.transpose((2,0)) #array of [tubeno,stepno,section]\n e = e.transpose((1,2)) #array of [tubeno,section,stepno]\n print \"Data shape: \" + repr(e.shape)\n print \"Check shape: \" + repr(frame_sum.shape)\n # create the mask: any values of zero are assumed to be incorrect and masked out\n pixel_mask = array.ones_like(e[ignore:])\n for one_tube in range(len(e[ignore:])):\n if not e[ignore+one_tube].any(): #all zero\n pixel_mask[one_tube] = 0 #mask it out\n gain,dd,interim_result,residual_map,chisquared,oldesds,first_ave,weights = \\\n iterate_data(e[ignore:],iter_no=iterno,unit_weights=unit_weights,pixel_mask=pixel_mask)\n else: #we have been provided with gains\n gain = use_gains\n chisquared=0.0\n # calculate errors based on full dataset\n # First get a full model\n reshape_ds = b_zeroed.reshape([b.shape[0]/pixel_step,pixel_step,b.shape[-1]])\n start_ds = reshape_ds.transpose((2,0))[ignore:] #array of [tubeno,stepno,section]\n start_ds = start_ds.transpose((1,2))\n start_var = start_ds.var\n\n # Our new pixel mask has to have all of the steps in\n\n pixel_mask = array.ones_like(start_ds)\n for one_tube in range(len(start_ds)):\n if not start_ds[one_tube].any(): #all zero\n pixel_mask[one_tube] = 0 #mask it out\n\n # Normalise gains so that average is 1.0\n\n gain = gain*len(gain)/gain.sum()\n model,wd,model_var,esds = overlap.apply_gain(start_ds,1.0/start_var,gain,\n calc_var=True,bad_steps=dropped_frames,pixel_mask=pixel_mask)\n\n # model and model_var have shape tubeno*pixel_step + no_steps (see shift_tube_add_new)\n\n print 'Have full model and errors at %f' % time.clock()\n\n # step size could be less than pixel_step if we have a short non-overlap scan\n\n real_step = pixel_step\n if len(tube_steps)< pixel_step:\n real_step = len(tube_steps)\n # and we have to prune the output data too\n holeless_model = zeros([real_step*start_ds.shape[0]])\n holeless_var = zeros_like(holeless_model)\n for tube_set in range(start_ds.shape[0]):\n holeless_model[tube_set*real_step:(tube_set+1)*real_step]=model[tube_set*pixel_step:(tube_set+1)*pixel_step] \n holeless_var[tube_set*real_step:(tube_set+1)*real_step]=model_var[tube_set*pixel_step:(tube_set+1)*pixel_step] \n model = holeless_model\n model_var = holeless_var\n cs = Dataset(model)\n cs.var = model_var\n\n # Now build up the important information\n\n cs.title = ds.title\n cs.copy_cif_metadata(ds)\n\n # construct the axes\n\n if exact_angles is None or do_interp:\n axis = arange(len(model))\n new_axis = axis*bin_size + ds.axes[0][0] + ignore*pixel_step*bin_size\n if not do_interp:\n axis_string = \"\"\"Following application of gain correction, two theta values were recalculated assuming a step size of %8.3f \n and a tube separation of %8.3f starting at %f.\"\"\" % (bin_size,tubesep,ds.axes[0][0]+ignore*pixel_step*bin_size)\n else:\n axis_string = \"\"\"Gain correction was performed after interpolating observed values onto a\n regular angular grid with a step size of %8.3f and a tube separation of %8.3f starting at %f.\"\"\" % (bin_size,tubesep,ds.axes[0][0]+ignore*pixel_step*bin_size)\n else:\n new_axis = calculate_average_angles(tube_steps,exact_angles,pixel_step,tubesep,\n extra_dummy=extra_dropped_frames)\n # Remove ignored tubes\n \n new_axis = new_axis[ignore*real_step:]\n \n axis_string = \\\n \"\"\"Following application of gain correction, two theta values were recalculated using a tube separation of \n%8.3f and the recorded positions of the lowest angle tube, and then adding an average of the \nangular corrections for the tubes contributing to each two theta position.\"\"\" % (tubesep)\n cs.set_axes([new_axis],anames=['Two theta'],aunits=['Degrees'])\n print 'New axis goes from %f to %f in %d steps' % (new_axis[0],new_axis[-1],len(new_axis))\n print 'Total %d points in output data' % len(cs)\n # prepare info for CIF file\n import math\n detno = map(lambda a:\"%d\" % a,range(len(gain)))\n gain_as_strings = map(lambda a:\"%.4f\" % a,gain)\n gain_esd = [\"%.4f\" % a for a in esds]\n cs.harvest_metadata(\"CIF\").AddCifItem((\n ((\"_[local]_detector_number\",\"_[local]_refined_gain\",\"_[local]_refined_gain_esd\"),),\n ((detno,gain_as_strings,gain_esd),))\n )\n if len(use_gains)==0:\n info_string = \"After vertical integration between pixels %d and %d,\" % (bottom,top) + \\\n \"\"\" individual tube gains were iteratively refined using the Ford/Rollett algorithm (Acta Cryst. (1968) B24,293). \n Final gains are stored in the _[local]_refined_gain loop.\"\"\" + axis_string\n else:\n info_string = \"After vertical integration between pixels %d and %d,\" % (bottom,top) + \\\n \" individual tube gains were corrected based on a previous iterative refinement using the Ford/Rollett algorithm. The gains used\" + \\\n \"are stored in the _[local]_refined_gain loop.\" + axis_string\n cs.add_metadata(\"_pd_proc_info_data_reduction\",info_string,append=True)\n return cs,gain,esds,chisquared,c.shape[0]",
"def overlap_with(self, other):",
"def check_recon_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'recon_spec'):\n for i, spectrum in enumerate(self.recon_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.recon_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.recon_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.recon_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.recon_spec[spectrum]._add_to_overlapping_filters(filtername)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass",
"def overlay_resources_score_motifs(motif_sites_input_file,\n motifs_overlapping_tracks_output_dir,\n chromatin_tracks_dir_path,\n chromatin_tracks_files):\n\n # for motif_sites_input_file in motif_sites_input_files:\n with open(motif_sites_input_file) as f:\n chr_n_file = f.readline().strip().split('\\t')[0].strip() + '.bed'\n # it is assumed for every motif file name there exists a matching file name in the chromatin_tracks_input_dir\n if chr_n_file in chromatin_tracks_files:\n motifs_overlapping_tracks_file = motifs_overlapping_tracks_output_dir + '/' + '.'.join(\n motif_sites_input_file.split('/')[-1].split('.')[0:-1]) + '_overlapping_tracks' + '.bed7'\n motifs_overlapping_tracks_file_tmp = motifs_overlapping_tracks_file + '_tmp'\n # create or overwrite output files\n if not os.path.exists(motifs_overlapping_tracks_file):\n \n motif_sites_input_file_sorted = motif_sites_input_file + '_sorted'\n chromatin_tracks_input_file = chromatin_tracks_dir_path +'/'+ chr_n_file\n chromatin_tracks_input_file_sorted = chromatin_tracks_input_file + '_sorted'\n \n print(\"intersecting: \" + motif_sites_input_file + ' and ' + chromatin_tracks_input_file)\n \n os.system(\"\"\"sort -k1,1 -k2,2n -k3,3n {} > {}\"\"\".format(motif_sites_input_file, motif_sites_input_file_sorted))\n os.system(\"\"\"sort -k1,1 -k2,2n -k3,3n {} > {}\"\"\".format(chromatin_tracks_input_file, chromatin_tracks_input_file_sorted))\n \n\n motif_sites_file_obj = BedTool(motif_sites_input_file_sorted)\n motif_sites_file_obj.map(BedTool(chromatin_tracks_input_file_sorted), c=4, o=['collapse']).saveas(motifs_overlapping_tracks_file_tmp)\n \n with open(motifs_overlapping_tracks_file_tmp, 'r') as infile, open(motifs_overlapping_tracks_file,\n 'w') as outfile:\n line = infile.readline()\n while line:\n\n sline = line.split('\\t')\n if len(sline) > 6:\n if sline[7] != '.' and sline[7] != \".\\n\":\n my_list = sline[7].split(',')\n cell_assay_values_dict_ChromHMM = {}\n cell_assay_values_dict_cCRE = {}\n cell_assay_values_dict_IndexDHS = {}\n cell_assay_values_dict_RegElem = {}\n cell_assay_values_dict_DNaseq = {}\n elem_list = []\n for elem in my_list:\n # TODO: check if statement below\n if elem.__contains__('#'):\n cell_value = elem.split('#')[0]\n assay_value = elem.split('#')[1]\n if len(elem.split('#')) > 2:\n state_value = elem.split('#')[2].rstrip(\"\\n\")\n\n if assay_value == \"ChromHMM\":\n if cell_value not in list(cell_assay_values_dict_ChromHMM.keys()):\n cell_assay_values_dict_ChromHMM[cell_value] = []\n cell_assay_values_dict_ChromHMM[cell_value].append(state_value)\n\n elif assay_value == \"cCRE\":\n if cell_value not in list(cell_assay_values_dict_cCRE.keys()):\n cell_assay_values_dict_cCRE[cell_value] = []\n cell_assay_values_dict_cCRE[cell_value].append(state_value)\n\n elif assay_value == \"IndexDHS\":\n if cell_value not in list(cell_assay_values_dict_IndexDHS.keys()):\n cell_assay_values_dict_IndexDHS[cell_value] = []\n cell_assay_values_dict_IndexDHS[cell_value].append(state_value)\n\n elif assay_value == \"RegElem\":\n if cell_value not in list(cell_assay_values_dict_RegElem.keys()):\n cell_assay_values_dict_RegElem[cell_value] = []\n cell_assay_values_dict_RegElem[cell_value].append(state_value)\n\n elif assay_value == \"DNase-seq\":\n if cell_value not in list(cell_assay_values_dict_DNaseq.keys()):\n cell_assay_values_dict_DNaseq[cell_value] = []\n cell_assay_values_dict_DNaseq[cell_value].append(float(state_value))\n\n else:\n elem_list.append(elem.rstrip(\"\\n\"))\n\n for cell in cell_assay_values_dict_ChromHMM:\n elem_list.append(cell + \"#ChromHMM#\" +\n Counter(cell_assay_values_dict_ChromHMM[cell]).most_common(1)[0][\n 0])\n\n for cell in list(cell_assay_values_dict_cCRE.keys()):\n elem_list.append(\n cell + \"#cCRE#\" + Counter(cell_assay_values_dict_cCRE[cell]).most_common(1)[0][\n 0])\n\n for cell in list(cell_assay_values_dict_IndexDHS.keys()):\n elem_list.append(cell + \"#IndexDHS#\" +\n Counter(cell_assay_values_dict_IndexDHS[cell]).most_common(1)[0][\n 0])\n\n for cell in list(cell_assay_values_dict_RegElem.keys()):\n elem_list.append(cell + \"#RegElem#\" +\n Counter(cell_assay_values_dict_RegElem[cell]).most_common(1)[0][0])\n\n for cell in list(cell_assay_values_dict_DNaseq.keys()):\n elem_list.append(\n cell + \"#DNase-seq#\" + str(max(cell_assay_values_dict_DNaseq[cell])))\n\n outfile.write('\\t'.join(sline[0:7]) + '\\t' + ','.join(elem_list) + '\\n')\n\n line = infile.readline()\n\n os.remove(motifs_overlapping_tracks_file_tmp)\n os.remove(motif_sites_input_file_sorted)\n os.remove(chromatin_tracks_input_file_sorted)\n\n print(\"Finished intersecting: \" + motif_sites_input_file + ' and ' + chromatin_tracks_input_file)\n else:\n print(\"Use existing data files in \" + motifs_overlapping_tracks_file)\n else:\n print(\"Specified chromatin track file \" + chr_n_file + \" cannot be found and will be ignored.\")\n return None\n cleanup()\n return motifs_overlapping_tracks_file",
"def write_to_outfile(involume, outvolume, data, outfiles_partition, outdir_path, O, file_manager, addition, tracker):\n lowcorner, upcorner = get_overlap_subarray(involume, outvolume) # find subarray crossing both files in the basis of the original image\n overlap_vol = get_overlap_volume(involume, outvolume)\n overlap_shape = overlap_vol.get_shape()\n if DONT_WRITE:\n tracker.add_volume(overlap_vol)\n\n nb_outfile_seeks_tmp = 0\n s = overlap_shape\n if s[2] != O[2]:\n nb_outfile_seeks_tmp += s[0]*s[1]\n elif s[1] != O[1]:\n nb_outfile_seeks_tmp += s[0]\n elif s[0] != O[0]:\n nb_outfile_seeks_tmp += 1\n else:\n pass\n\n if DONT_WRITE:\n print(f\"Overlap shape: {overlap_shape}\")\n print(f\"Outfile shape: {O}\")\n print(f\"Number seeks: {nb_outfile_seeks_tmp}\")\n return overlap_shape, 0, nb_outfile_seeks_tmp\n\n slices = [(lowcorner[0], upcorner[0]), (lowcorner[1], upcorner[1]), (lowcorner[2], upcorner[2])]\n offset_in = involume.get_corners()[0] # lower corner\n offset_out = outvolume.get_corners()[0]\n\n slices_in_infile = [ # convert corners in the basis of input file\n (lowcorner[0]-offset_in[0], upcorner[0]-offset_in[0]), \n (lowcorner[1]-offset_in[1], upcorner[1]-offset_in[1]), \n (lowcorner[2]-offset_in[2], upcorner[2]-offset_in[2])]\n \n slices_in_outfile = [ # convert corners in the basis of output file\n (lowcorner[0]-offset_out[0], upcorner[0]-offset_out[0]), \n (lowcorner[1]-offset_out[1], upcorner[1]-offset_out[1]), \n (lowcorner[2]-offset_out[2], upcorner[2]-offset_out[2])]\n\n if DEBUG_LOCAL:\n logger.debug(f\"[debug] extracting {s[0][0]}:{s[0][1]}, {s[1][0]}:{s[1][1]}, {s[2][0]}:{s[2][1]} from input file\")\n logger.debug(f\"[debug] inserting {s2[0][0]}:{s2[0][1]}, {s2[1][0]}:{s2[1][1]}, {s2[2][0]}:{s2[2][1]} into output file {out_filename}\")\n\n s = slices_in_infile\n subarr_data = data[s[0][0]:s[0][1],s[1][0]:s[1][1],s[2][0]:s[2][1]] # extract subarr from input file's data \n\n _3d_pos = numeric_to_3d_pos(outvolume.index, outfiles_partition, order='C')\n i, j, k = _3d_pos\n\n if addition:\n subarr_data = subarr_data + 1\n\n global outdirs_dict, outdir_index\n\n if (i, j, k) in outdirs_dict.keys():\n outdir_path = outdirs_dict[(i, j, k)]\n print(f\"Writing at: {outdir_path}\")\n else:\n outdir_path = '/disk' + str(outdir_index) + '/gtimothee/output'\n outdirs_dict[(i, j, k)] = outdir_path\n outdir_index += 1\n if outdir_index == 6:\n outdir_index = 0\n\n print(f\"Writing at: {outdir_path}\")\n print(f\"Increasing writing index: {outdir_index}\")\n\n t2 = time.time()\n if not DONT_WRITE:\n file_manager.write_data(i, j, k, outdir_path, subarr_data, slices_in_outfile, O)\n t2 = time.time() - t2\n \n if DEBUG_LOCAL: \n file_manager.test_write(outfile_path, slices_in_outfile, subarr_data)\n\n return overlap_shape, t2, nb_outfile_seeks_tmp",
"def overlap(array1,array2,thresh=0.05e0):\r\n arrayout = array1 * array2\r\n thresh2 = np.max(np.abs(arrayout))*thresh\r\n arrayout = np.array(1.0 * (np.abs(arrayout) > thresh2),dtype=np.bool)\r\n return arrayout",
"def polyUVOverlap(*args, nonOverlappingComponents: bool=True, overlappingComponents: bool=True,\n **kwargs)->List[selectionItem]:\n pass",
"def parcel_overlap(parcellation1, parcellation2, outpath):\n p1_dat = nib.load(parcellation1).get_data()\n p2_dat = nib.load(parcellation2).get_data()\n p1regs = np.unique(p1_dat)\n p1regs = p1regs[p1regs > 0]\n p2regs = np.unique(p2_dat)\n\n p1n = get_filename(parcellation1)\n p2n = get_filename(parcellation2)\n\n overlapdat = lil_matrix((p1regs.shape[0], p2regs.shape[0]), dtype=np.float32)\n for p1idx, p1reg in enumerate(p1regs):\n p1seq = p1_dat == p1reg\n N = p1seq.sum()\n poss_regs = np.unique(p2_dat[p1seq])\n for p2idx, p2reg in enumerate(p2regs):\n if p2reg in poss_regs:\n # percent overlap is p1seq and'd with the anatomical region voxelspace, summed and normalized\n pover = np.logical_and(p1seq, p2_dat == p2reg).sum() / float(N)\n overlapdat[p1idx, p2idx] = pover\n\n outf = op.join(outpath, \"{}_{}.csv\".format(p1n, p2n))\n with open(outf, \"w\") as f:\n p2str = [\"%s\" % x for x in p2regs]\n f.write(\"p1reg,\" + \",\".join(p2str) + \"\\n\")\n for idx, p1reg in enumerate(p1regs):\n datstr = [\"%.4f\" % x for x in overlapdat[idx,].toarray()[0,]]\n f.write(str(p1reg) + \",\" + \",\".join(datstr) + \"\\n\")\n f.close()\n return",
"def example3_half_overlap(GeomCA_parameters):\n num_pts = 100\n GeomCA_parameters['experiment_filename_prefix'] = 'problematic_half_overlap_'\n subfolder = 'problematic_half_overlap'\n GeomCA_parameters['pr_comp_quality_threshold'] = 0.3\n GeomCA_parameters['pr_comp_consistency_threshold'] = 0.7\n R = np.concatenate([circle(n=num_pts, r=1.5), circle(n=int(num_pts/5), r=0.3)])\n E = np.concatenate([half_circle(n=int((2*num_pts)/3), r=1.4, noise=0.1), circle(n=int(num_pts/8), r=0.2, noise=0.1)])\n return run_GeomCA_and_visualize(R, E, subfolder, GeomCA_parameters)",
"def _overlap(c1, c2, index='dice'):\n set1 = set(c1)\n set2 = set(c2)\n intersection_num = float(len(set1 & set2))\n try:\n if index == 'dice':\n total_num = len(set1 | set2) + intersection_num\n overlap = 2.0 * intersection_num / total_num\n elif index == 'percent':\n overlap = 1.0 * intersection_num / len(set1)\n else:\n raise Exception(\"Only support 'dice' and 'percent' as overlap indices at present.\")\n except ZeroDivisionError as e:\n print(e)\n overlap = np.nan\n return overlap",
"def cal_overlaps(boxes1, boxes2):\n area1 = (boxes1[:, 0] - boxes1[:, 2]) * (boxes1[:, 1] - boxes1[:, 3]) # (Nsample, 1)\n area2 = (boxes2[:, 0] - boxes2[:, 2]) * (boxes2[:, 1] - boxes2[:, 3]) # (Msample, 1)\n\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) # (Nsample, Msample)\n\n # calculate the intersection of boxes1(anchor) and boxes2(GT box)\n for i in range(boxes1.shape[0]):\n overlaps[i][:] = cal_iou(boxes1[i], area1[i], boxes2, area2)\n\n return overlaps",
"def merge_overwrap(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n for j in range(Ly):\n cff = z_u_w[j,N] - z_u_w[j,0]\n if self.hbls[j] + self.hbbl[j] > cff:\n self.hbls[j] = cff\n self.hbbl[j] = cff",
"def check_overlap(a, b):\n if a[0] >= b[2] or a[1] >= b[3] or a[2] <= b[0] or a[3] <= b[1]:\n return False\n return True",
"def _overlap(x1, w1, x2, w2):\r\n if x1+w1 < x2-w2: return False\r\n if x1-w1 > x2+w2: return False\r\n\r\n return True",
"def crossing(self, *args):\n return self.overlap(*args, type='point')"
]
| [
"0.6887589",
"0.6100261",
"0.60348684",
"0.5958832",
"0.58412147",
"0.57699",
"0.5757132",
"0.5709748",
"0.56067",
"0.55064774",
"0.5492626",
"0.5490349",
"0.5449239",
"0.5437164",
"0.5422487",
"0.54063815",
"0.54039466",
"0.5402923",
"0.5399976",
"0.5399588",
"0.5388516",
"0.5381361",
"0.53569764",
"0.5326242",
"0.5313392",
"0.5312664",
"0.5289396",
"0.52832454",
"0.5267875",
"0.5225853"
]
| 0.83019495 | 0 |
Add volume information to dictionary associating output file index to | def add_to_array_dict(array_dict, outfile, volume):
if (not isinstance(outfile.index, int)
or not isinstance(volume, Volume)
or not isinstance(outfile, Volume)):
raise TypeError()
if not outfile.index in array_dict.keys():
array_dict[outfile.index] = list()
array_dict[outfile.index].append(volume) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_volume_info(self, vi):\n vol_num = vi.volume_number\n self.volume_info_dict[vol_num] = vi\n if self.fh:\n self.fh.write(vi.to_string() + \"\\n\")",
"def volumes(self):",
"def generate_volume_info(self, NAME, path):\n info = {'tags': [], 'name': NAME, 'path': path, 'AttachedToVm': [],\n 'State': 'available', 'machine_path': None,\n 'time': datetime.datetime.now()}\n return info",
"def volume(self):\n return {'lvad': self._v}",
"def populate_volumes(self):\n print \"Populating volumes info...\"\n volumes = self.get_all_volumes()\n for i in volumes:\n\n # handle associated instance's KEEP-tag\n associated_instance_id = i.attach_data.instance_id\n\n if associated_instance_id is None: # sometimes there is no attached instance\n instance_keep_tag = \"-------no-instance-found\"\n else:\n instance_keep_tag = Ins.spreadsheet[associated_instance_id]['KEEP_tag']\n self.spreadsheet[i.id] = dict(Name_tag=self.get_name_tag(i), id=i.id, KEEP_tag=self.get_keep_tag(i),\n instance_KEEP_tag=instance_keep_tag,\n associated_instance_id=associated_instance_id,\n PROD_tag=self.is_production(i), attachment_state=i.attachment_state(),\n state=i.volume_state(), status=i.status, iops=i.iops, size=i.size,\n created=i.create_time, region=i.region.name)",
"def generateInfoVolumes(regions):\n print \"\\nWriting volumes info to output file %s\" % volumes_data_output_file\n with open(volumes_data_output_file, 'w') as f1:\n f1.write(\"VOLUMES\\n\")\n f1.write(\n \"Name\\tvolume_ID\\tKEEP-tag_of_volume\\tKEEP-tag_of_instance\\tproduction?\\tvolume_attachment_state\\tassociated_instance\\tinstance_state\\tsize\\tcreate_time\\tregion\\tzone\\tassociated_snapshot\\n\\n\")\n for r in regions:\n volumes = getVolumes(r)\n print \".\" # give some feedback to the user\n for v in volumes:\n f1.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"\n % (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size,\n v.create_time, v.region.name, v.zone, v.snapshot_id))",
"def add_volume_info(subject, surface, subjects_dir, volume='T1'):\n import os.path as op\n from mne.bem import _extract_volume_info\n from mne.surface import (read_surface, write_surface)\n subject_dir = op.join(subjects_dir, subject)\n mri_dir = op.join(subject_dir, 'mri')\n T1_mgz = op.join(mri_dir, volume + '.mgz')\n new_info = _extract_volume_info(T1_mgz)\n print(new_info.keys())\n rr, tris, volume_info = read_surface(surface,\n read_metadata=True)\n\n # volume_info.update(new_info) # replace volume info, 'head' stays\n print(volume_info.keys())\n import numpy as np\n if 'head' not in volume_info.keys():\n volume_info['head'] = np.array([2, 0, 20], dtype=np.int32)\n write_surface(surface, rr, tris, volume_info=volume_info)",
"def volume(self):\n v = {'art': self._vart, 'ven': self._vven}\n if self._lvad is not None:\n v['lvad'] = self._lvad.volume['lvad']\n return v",
"def volume(self):\n v = {'art': self._vart, 'ven': self._vven}\n if self._lvad is not None:\n v['lvad'] = self._lvad.volume['lvad']\n return v",
"def volume(self):\n v = {'art': self._vart, 'ven': self._vven}\n if self._lvad is not None:\n v['lvad'] = self._lvad.volume['lvad']\n return v",
"def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))",
"def volumes(self) -> dict:\n return self.data[\"volumes\"]",
"def get_volume_info(volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n volume_info_list = []\n for volume in volumes:\n command = 'cinder show %s' % volume['id']\n volume_info = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n att = volume_info['attachments'].replace(\"'\", \"\\\"\").replace(\n \"u\\\"\", \"\\\"\").replace(\" None,\", \" \\\"None\\\",\")\n volume_info['device'] = json.loads(att)[0]['device']\n volume_info_list.append(volume_info)\n return volume_info_list",
"def _map_volume(self, volume, volume_path, connector=None):\n # Create NVME subsystem for previously created LV\n nqn = self._get_target_nqn(volume.id, connector)\n try:\n uuid = self._get_nvme_uuid(volume)\n\n ns_id = self._ensure_subsystem_exists(nqn, volume_path, uuid)\n\n self._ensure_port_exports(nqn, self.target_ips, self.target_port,\n self.nvme_transport_type,\n self.nvmet_port_id)\n except Exception:\n LOG.error('Failed to add subsystem: %s', nqn)\n raise NVMETTargetAddError(subsystem=nqn)\n\n LOG.info('Subsystem %s now exported on port %s', nqn, self.target_port)\n return nqn, ns_id",
"def _generate_inventory(self, datapath):\n \n files = [file for file in listdir(datapath) if '.nc' in file and not 'xyz' in file]\n # file_prefixes = list(set([ file.split('_')[0] for file in files ]))\n # file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n if self.extra_pref:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2] + [self.extra_pref]) for file in files ]))\n else:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n \n inventory = {}\n for file_prefix in file_prefixes:\n fname = path.join(datapath,f'{file_prefix}{self.first_suffix}')\n if not self.metafile:\n self.metafile = fname\n vars = [ var for var in list(Dataset(fname).variables) if var not in self.skip_vars ]\n for var in vars:\n inventory[var] = {'files': sorted([path.join(datapath,file) \n for file in listdir(datapath) if file_prefix in file])}\n return inventory",
"def get_complete_volume_info(vol_name, vol_info_dict=None):\n return_dict = {}\n try:\n if not vol_info_dict:\n vol_info_dict, err = get_basic_volume_info(vol_name)\n if err:\n raise Exception(err)\n\n return_dict = vol_info_dict\n\n vol_status_dict = {}\n if vol_info_dict['status'] == 1:\n vol_status_dict, err = get_volume_status(\n vol_info_dict['name'], vol_info_dict)\n if err:\n raise Exception(err)\n\n if vol_info_dict['status'] == 1:\n if vol_status_dict:\n # Add the status and usage info\n return_dict.update(vol_status_dict)\n vol_process_dict, err = get_volume_process_status(\n vol_name, vol_info_dict, vol_status_dict)\n if err:\n raise Exception(err)\n\n return_dict['processes_ok'] = vol_process_dict['processes_ok']\n\n for br in return_dict['brick_status']:\n if br in vol_process_dict['brick_status']:\n return_dict['brick_status'][br].update(\n vol_process_dict['brick_status'][br])\n\n quotas, err = get_volume_quota(vol_name, vol_info_dict)\n if err:\n raise Exception(err)\n return_dict['quotas'] = quotas\n\n except Exception, e:\n return None, 'Error getting complete volume information : %s' % str(e)\n else:\n return return_dict, None",
"def get_volumes_detail(self, **kw):\n return (200, {}, {\"volumes\": [\n {'id': 1234,\n 'name': 'sample-volume for cinder',\n 'attachments': [{'server_id': 12234}]},\n {'id': 'pvcvolume',\n 'name': 'pvc sample-volume for cinder',\n 'attachments': [{'server_id': 54321}]}\n ]})",
"def derive_newrelic_volume(self):\n # read and write volume\n self.update_metric(\"newrelic/volume_reads\", self.sum_of([\"status/com_select\", \"status/qcache_hits\"]))\n self.update_metric(\"newrelic/volume_writes\", self.sum_of([\"status/com_insert\", \"status/com_insert_select\",\n \"status/com_update\", \"status/com_update_multi\",\n \"status/com_delete\", \"status/com_delete_multi\",\n \"status/com_replace\", \"status/com_replace_select\"]))",
"def add_volume(self, volume_block, apfs_tree):\n\n # get volume superblock\n block = self.read_block(volume_block)\n block_map = block.body.block_map_block # mapping btree\n root_dir_id = block.body.root_dir_id # root dir id\n if self.verbose:\n vol_desc = \"%s (volume, Mapping-Btree: %d, Rootdir-ID: %d\" % (\n block.body.name, block_map, root_dir_id)\n else:\n vol_desc = block.body.name\n\n # get volume btree\n block = self.read_block(block_map)\n\n # get root btree node and parse it with all its children, collecting dir entries\n block = self.read_block(block.body.root)\n entries = self.get_entries(block)\n\n # create a tree from the found dir entries\n vol_node = Node(vol_desc, apfs_tree)\n self.list_children(1, entries, vol_node)",
"def brain_vol_info():\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(description=\"Query brain volume data.\")\n parser.add_argument(\"volume\", help=\"The volume file to load. Should be in mgh, mgz or nifti format.\")\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-c', '--crs', nargs='*', help=\"The query voxel, defined as a 0-based index into the volume. For a 3D volume, this would be 3 integers which represent the CRS (column, row, slice) of the voxel, like 128 128 128.\")\n group.add_argument(\"-f\", \"--crs-file\", help=\"A file containing the voxels to query, one per line. A voxel should be given by zero-based indices into each dimension of the volume, e.g., '0 23 188'.\")\n group.add_argument('-a', '--all-values', help=\"Instead of returning the value for a single voxel, return all voxel values which occur in the volume. Forces integer values (by rounding).\", action=\"store_true\")\n group.add_argument('-l', '--all-value-counts', help=\"Instead of returning the value for a single voxel, return the counts for all voxel values which occur in the volume. The order of the counts is guaranteed to be identical to the order of the output when running with '-a'. Forces integer values (by rounding).\", action=\"store_true\")\n parser.add_argument(\"-v\", \"--verbose\", help=\"Increase output verbosity.\", action=\"store_true\")\n parser.add_argument(\"-s\", \"--separator\", help=\"Output separator (between vertex coords / indices).\", default=\" \")\n args = parser.parse_args()\n\n volume_file = args.volume\n verbose = args.verbose\n sep = args.separator\n\n vol_data = nib.load(volume_file).get_data()\n if verbose:\n print(\"---Brain Vol Info---\")\n print(\"Volume has %d dimensions, shape %s and data type %s. It contains %d voxels.\" % (len(vol_data.shape), vol_data.shape, vol_data.dtype, len(np.ravel(vol_data))))\n\n voxel_value_print_format = \"%f\"\n if np.issubdtype(vol_data.dtype, np.integer):\n voxel_value_print_format = \"%d\"\n\n if args.all_values or args.all_value_counts:\n if verbose:\n print(\"NOTE: This mode treats the intensity values in the volume as integers. You should only use it if that is suitable for the input volume.\")\n voxel_value_print_format = \"%d\"\n vol_data = np.rint(vol_data).astype(int) # Force integer values. For floats, you would get as many values of there are voxels, and this does not make sense.\n vol_data_flat = np.ravel(vol_data)\n occuring_values = dict()\n for value in vol_data_flat:\n if value in occuring_values:\n occuring_values[value] = occuring_values[value] + 1\n else:\n occuring_values[value] = 1\n if args.all_values:\n if verbose:\n print(\"Printing all %d different intensity values that occur within the volume.\" % (len(occuring_values)))\n print(sep.join([str(k) for k in sorted(occuring_values.keys())]))\n else:\n if verbose:\n print(\"Printing the counts for the %d different intensity values that occur within the volume. Sum of counts is %d.\" % (len(occuring_values), sum(occuring_values.values())))\n print(sep.join([str(pair[1]) for pair in sorted(occuring_values.items(), key=lambda pair: pair[0])]))\n\n else:\n if args.crs:\n voxel_index = tuple([int(x) for x in args.crs])\n voxel_display_string = \" \".join(args.crs)\n if verbose:\n print(\"Received 1 voxel index (with %d dimensions) from the command line. Printing intensity value of the voxel '%s' in the volume.\" % (len(voxel_index), voxel_display_string))\n if len(voxel_index) != len(vol_data.shape):\n warnings.warn(\"Dimension mismatch: Received query voxel with %d dimenions, but the volume has %d.\" % (len(voxel_index), len(vol_data.shape)))\n print(voxel_value_print_format % (vol_data[voxel_index]))\n else:\n voxel_indices = nit.load_voxel_indices(args.crs_file)\n voxel_values = []\n if voxel_indices.shape[1] != len(vol_data.shape):\n warnings.warn(\"Dimension mismatch: Received query voxels with %d dimensions, but the volume has %d.\" % (voxel_indices.shape[1], len(vol_data.shape)))\n if verbose:\n print(\"Received %d voxel indices (with %d dimensions) from file '%s'. Printing their intensity values in the volume.\" % (voxel_indices.shape[0], voxel_indices.shape[1], args.crs_file))\n for voxel_index in voxel_indices:\n voxel_index = tuple(voxel_index.tolist())\n voxel_values.append(vol_data[voxel_index])\n print(sep.join([str(v) for v in voxel_values]))\n\n sys.exit(0)",
"def get_volumes_metadata(cls, cluster):\n def _get_volumes_ids(instance):\n return [v['id']\n for v in instance.volumes_metadata.get('volumes', [])]\n\n volumes_metadata = {\n 'volumes': [],\n 'volumes_roles_mapping': {},\n 'rule_to_pick_boot_disk': [],\n }\n\n cluster_volumes_ids = _get_volumes_ids(cluster)\n release_volumes_ids = _get_volumes_ids(cluster.release)\n processed_volumes = {}\n\n enabled_plugins = ClusterPlugin.get_enabled(cluster.id)\n for plugin_adapter in map(wrap_plugin, enabled_plugins):\n metadata = plugin_adapter.volumes_metadata\n\n for volume in metadata.get('volumes', []):\n volume_id = volume['id']\n for owner, volumes_ids in (('cluster', cluster_volumes_ids),\n ('release', release_volumes_ids)):\n if volume_id in volumes_ids:\n raise errors.AlreadyExists(\n 'Plugin {0} is overlapping with {1} '\n 'by introducing the same volume with '\n 'id \"{2}\"'.format(plugin_adapter.full_name,\n owner,\n volume_id)\n )\n elif volume_id in processed_volumes:\n raise errors.AlreadyExists(\n 'Plugin {0} is overlapping with plugin {1} '\n 'by introducing the same volume with '\n 'id \"{2}\"'.format(\n plugin_adapter.full_name,\n processed_volumes[volume_id],\n volume_id\n )\n )\n\n processed_volumes[volume_id] = plugin_adapter.full_name\n\n volumes_metadata.get('volumes_roles_mapping', {}).update(\n metadata.get('volumes_roles_mapping', {}))\n volumes_metadata.get('volumes', []).extend(\n metadata.get('volumes', []))\n volumes_metadata.get('rule_to_pick_boot_disk', []).extend(\n metadata.get('rule_to_pick_boot_disk', []))\n\n return volumes_metadata",
"def export_configmap_from_volume(volume_dict, section):\n # only export configmap in case of volume with 'File' type\n if volume_dict['pathType'] != 'File':\n return 0\n _vol = volume_dict.copy()\n _vol['section'] = section\n configmap_name = sp.get_k8s_configmap_name(_vol)\n target_filename = volume_dict['hostPath']\n\n LOG.debug(\"Exporting k8s configmap '%s'.\", configmap_name)\n cmd = [\"kubectl\", \"--kubeconfig=/etc/kubernetes/admin.conf\", \"get\",\n \"cm\", \"-n\", \"kube-system\", configmap_name, \"-o=jsonpath={.data.*}\"]\n return _export_k8s_configmap(target_filename, cmd)",
"def add_volume(self, volume: 'Volume'):\n self.volumes.append(volume)",
"def _attach_volume(self):\n return []",
"def _read_volume_info(fobj):\n volume_info = OrderedDict()\n head = np.fromfile(fobj, '>i4', 1)\n if not np.array_equal(head, [20]): # Read two bytes more\n head = np.concatenate([head, np.fromfile(fobj, '>i4', 2)])\n if not np.array_equal(head, [2, 0, 20]) and not np.array_equal(head, [2, 1, 20]):\n warnings.warn(\"Unknown extension code.\")\n return volume_info\n head = [2, 0, 20]\n\n volume_info['head'] = head\n for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',\n 'zras', 'cras']:\n pair = fobj.readline().decode('utf-8').split('=')\n if pair[0].strip() != key or len(pair) != 2:\n raise IOError('Error parsing volume info.')\n if key in ('valid', 'filename'):\n volume_info[key] = pair[1].strip()\n elif key == 'volume':\n volume_info[key] = np.array(pair[1].split()).astype(int)\n else:\n volume_info[key] = np.array(pair[1].split()).astype(float)\n # Ignore the rest\n return volume_info",
"def get_volume_info(self):\n if self.issue:\n return f\"{self.volume} ({self.issue}), {self.pages}\"\n else:\n return f\"{self.volume}, {self.pages}\"",
"def _read_volume_info(fobj):\n volume_info = OrderedDict()\n head = np.fromfile(fobj, '>i4', 1)\n if not np.array_equal(head, [20]): # Read two bytes more\n head = np.concatenate([head, np.fromfile(fobj, '>i4', 2)])\n if not np.array_equal(head, [2, 0, 20]):\n warnings.warn(\"Unknown extension code.\")\n return volume_info\n\n volume_info['head'] = head\n for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',\n 'zras', 'cras']:\n pair = fobj.readline().decode('utf-8').split('=')\n if pair[0].strip() != key or len(pair) != 2:\n raise IOError('Error parsing volume info.')\n if key in ('valid', 'filename'):\n volume_info[key] = pair[1].strip()\n elif key == 'volume':\n volume_info[key] = np.array(pair[1].split()).astype(int)\n else:\n volume_info[key] = np.array(pair[1].split()).astype(float)\n # Ignore the rest\n return volume_info",
"def update_volume_tag(self, info, key, value):\n keys = []\n for tag in info[0]['tags']:\n if key == list(tag.keys())[0]:\n if len(value) == 0:\n info[0]['tags'].remove(tag)\n keys.append(list(tag.keys())[0])\n else:\n tag.update({key: value})\n keys.append(list(tag.keys())[0])\n if key not in keys:\n tag = {key: value}\n info[0]['tags'].append(tag)\n info[0]['time'] = datetime.datetime.now()\n return info",
"def update_volumes():\n print 'do something useful here'",
"def _update_volume_stats(self):\n self._ensure_shares_mounted()\n data = {}\n lcfg = self.configuration\n backend_name = self.configuration.safe_get('volume_backend_name')\n data['volume_backend_name'] = backend_name or self.__class__.__name__\n data['vendor_name'] = 'Oracle'\n data['driver_version'] = self.VERSION\n data['storage_protocol'] = self.protocol\n\n asn = self.zfssa.get_asn()\n data['location_info'] = '%s:%s' % (asn, lcfg.zfssa_nfs_share)\n\n free, used = self._get_share_capacity_info()\n capacity = float(free) + float(used)\n ratio_used = used / capacity\n\n data['QoS_support'] = False\n data['reserved_percentage'] = 0\n\n used_percentage_limit = 100 - self.configuration.reserved_percentage\n used_ratio_limit = used_percentage_limit / 100.0\n if (ratio_used > used_ratio_limit or\n ratio_used >= self.configuration.max_over_subscription_ratio):\n data['reserved_percentage'] = 100\n\n data['total_capacity_gb'] = float(capacity) / units.Gi\n data['free_capacity_gb'] = float(free) / units.Gi\n\n share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool,\n lcfg.zfssa_nfs_project,\n lcfg.zfssa_nfs_share)\n pool_details = self.zfssa.get_pool_details(lcfg.zfssa_nfs_pool)\n\n data['zfssa_compression'] = share_details['compression']\n data['zfssa_encryption'] = share_details['encryption']\n data['zfssa_logbias'] = share_details['logbias']\n data['zfssa_poolprofile'] = pool_details['profile']\n data['zfssa_sparse'] = six.text_type(lcfg.nfs_sparsed_volumes)\n\n self._stats = data"
]
| [
"0.7031723",
"0.6087027",
"0.5982795",
"0.59471434",
"0.59254646",
"0.5894586",
"0.57727504",
"0.56557536",
"0.56557536",
"0.56557536",
"0.56455255",
"0.56353813",
"0.5634556",
"0.5623358",
"0.5588118",
"0.55815965",
"0.54794145",
"0.54393363",
"0.5389895",
"0.5359095",
"0.5340111",
"0.53239906",
"0.5310611",
"0.5302669",
"0.52950746",
"0.5246013",
"0.52450734",
"0.5217382",
"0.5217166",
"0.52098924"
]
| 0.63656837 | 1 |
From a dictionary of Volumes, creates a dictionary of list of slices. The new arrays_dict associates each output file to each volume that must be written at a time. | def clean_arrays_dict(arrays_dict):
for k in arrays_dict.keys():
volumes_list = arrays_dict[k]
arrays_dict[k] = [convert_Volume_to_slices(v) for v in volumes_list] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_to_array_dict(array_dict, outfile, volume):\n if (not isinstance(outfile.index, int) \n or not isinstance(volume, Volume) \n or not isinstance(outfile, Volume)):\n raise TypeError()\n\n if not outfile.index in array_dict.keys():\n array_dict[outfile.index] = list()\n array_dict[outfile.index].append(volume)",
"def from_array_dict(cls, in_dict):\n kws = {}\n for k in ['material', 'vertices', 'params', 'normals', 'texcoords',\n 'lines', 'faces', 'points', 'curves', 'curve2Ds',\n 'surfaces']:\n if k in in_dict:\n kws[k] = copy.deepcopy(in_dict[k])\n if isinstance(kws.get('vertices', None), np.ndarray):\n old_vert = kws['vertices']\n nvert = old_vert.shape[1]\n assert(nvert in (3, 4))\n kws['vertices'] = [\n {k: old_vert[i, j] for j, k in enumerate('xyzw'[:nvert])\n if ((j < 3) or (not np.isnan(old_vert[i, j])))}\n for i in range(old_vert.shape[0])]\n if isinstance(in_dict.get('vertex_colors', None), np.ndarray):\n old_colr = in_dict['vertex_colors']\n assert(old_colr.shape == (len(kws['vertices']), 3))\n for i in range(old_colr.shape[0]):\n for j, k in enumerate(['red', 'green', 'blue']):\n kws['vertices'][i][k] = old_colr[i, j]\n if isinstance(kws.get('params', None), np.ndarray):\n old_parm = kws['params']\n nparm = old_parm.shape[1]\n assert(nparm in [2, 3])\n kws['params'] = [\n {k: old_parm[i, j] for j, k in enumerate('uvw'[:nparm])\n if ((j < 2) or (not np.isnan(old_parm[i, j])))}\n for i in range(old_parm.shape[0])]\n if isinstance(kws.get('normals', None), np.ndarray):\n old_norm = kws['normals']\n assert(old_norm.shape[1] == 3)\n kws['normals'] = [\n {k: old_norm[i, j] for j, k in enumerate('ijk')}\n for i in range(old_norm.shape[0])]\n if isinstance(kws.get('texcoords', None), np.ndarray):\n old_texc = kws['texcoords']\n ntexc = old_texc.shape[1]\n assert(ntexc in [1, 2, 3])\n kws['texcoords'] = [\n {k: old_texc[i, j] for j, k in enumerate('uvw'[:ntexc])\n if ((j < 1) or (not np.isnan(old_texc[i, j])))}\n for i in range(old_texc.shape[0])]\n # Composites of above\n if isinstance(kws.get('lines', None), np.ndarray):\n old_edge = kws['lines']\n assert(old_edge.shape[1] == 2)\n kws['lines'] = [\n [{'vertex_index': np.int32(old_edge[i, j])}\n for j in range(old_edge.shape[1])\n if (not np.isnan(old_edge[i, j]))]\n for i in range(old_edge.shape[0])]\n if isinstance(kws.get('faces', None), np.ndarray):\n old_face = kws['faces']\n assert(old_face.shape[1] >= 3)\n kws['faces'] = [\n [{'vertex_index': np.int32(old_face[i, j])}\n for j in range(old_face.shape[1])\n if (not np.isnan(old_face[i, j]))]\n for i in range(old_face.shape[0])]\n if isinstance(in_dict.get('face_texcoords', None), np.ndarray):\n old_texc = in_dict['face_texcoords']\n assert(old_texc.shape[0] == len(kws.get('faces', [])))\n for i in range(old_texc.shape[0]):\n for j in range(old_texc.shape[1]):\n if not np.isnan(old_texc[i, j]):\n kws['faces'][i][j]['texcoord_index'] = np.int32(\n old_texc[i, j])\n if isinstance(in_dict.get('face_normals', None), np.ndarray):\n old_norm = in_dict['face_normals']\n assert(old_norm.shape[0] == len(kws.get('faces', [])))\n for i in range(old_norm.shape[0]):\n for j in range(old_norm.shape[1]):\n if not np.isnan(old_norm[i, j]):\n kws['faces'][i][j]['normal_index'] = np.int32(\n old_norm[i, j])\n if isinstance(kws.get('points', None), np.ndarray):\n old_pnts = kws['points']\n kws['points'] = [\n [np.int32(old_pnts[i, j]) for j in range(old_pnts.shape[1])\n if (not np.isnan(old_pnts[i, j]))]\n for i in range(old_pnts.shape[0])]\n if isinstance(kws.get('curves', None), np.ndarray):\n old_curv = kws['curves']\n kws['curves'] = [\n {'vertex_indices': [\n np.int32(old_curv[i, j]) for j in range(old_curv.shape[1])\n if (not np.isnan(old_curv[i, j]))]}\n for i in range(old_curv.shape[0])]\n assert('curve_params' in in_dict)\n if isinstance(in_dict['curve_params'], np.ndarray):\n old_parm = in_dict['curve_params']\n assert(old_parm.shape == (len(kws['curves']), 2))\n for i in range(old_parm.shape[0]):\n kws['curves'][i]['starting_param'] = old_parm[i, 0]\n kws['curves'][i]['ending_param'] = old_parm[i, 1]\n if isinstance(kws.get('curve2Ds', None), np.ndarray):\n old_curv = kws['curve2Ds']\n kws['curve2Ds'] = [\n [np.int32(old_curv[i, j]) for j in range(old_curv.shape[1])\n if (not np.isnan(old_curv[i, j]))]\n for i in range(old_curv.shape[0])]\n if isinstance(kws.get('surfaces', None), np.ndarray):\n old_surf = kws['surfaces']\n kws['surfaces'] = [\n {'vertex_indices': [\n {'vertex_index': np.int32(old_surf[i, j])}\n for j in range(old_surf.shape[1])\n if (not np.isnan(old_surf[i, j]))]}\n for i in range(old_surf.shape[0])]\n assert('surface_params' in in_dict)\n if isinstance(in_dict['surface_params'], np.ndarray):\n old_parm = in_dict['surface_params']\n assert(old_parm.shape == (len(kws['surfaces']), 4))\n for i in range(old_parm.shape[0]):\n kws['surfaces'][i]['starting_param_u'] = old_parm[i, 0]\n kws['surfaces'][i]['ending_param_u'] = old_parm[i, 1]\n kws['surfaces'][i]['starting_param_v'] = old_parm[i, 2]\n kws['surfaces'][i]['ending_param_v'] = old_parm[i, 3]\n if isinstance(in_dict.get('surface_texcoords', None), np.ndarray):\n old_texc = in_dict['surface_texcoords']\n assert(old_texc.shape[0] == len(kws['surfaces']))\n for i in range(old_texc.shape[0]):\n for j in range(old_texc.shape[1]):\n if not np.isnan(old_texc[i, j]):\n kws['surfaces'][i]['vertex_indices'][j][\n 'texcoord_index'] = np.int32(old_texc[i, j])\n if isinstance(in_dict.get('surface_normals', None), np.ndarray):\n old_norm = in_dict['surface_normals']\n assert(old_norm.shape[0] == len(kws['surfaces']))\n for i in range(old_norm.shape[0]):\n for j in range(old_norm.shape[1]):\n if not np.isnan(old_norm[i, j]):\n kws['surfaces'][i]['vertex_indices'][j][\n 'normal_index'] = np.int32(old_norm[i, j])\n return cls.from_dict(kws)",
"def get_data_as_arrays(compound_dict, save=False) :\n input_data = []\n output_data = []\n auxiliary_output_data = []\n for c in compound_dict.values() :\n try :\n #if c.mol_weight > 1000 :\n # continue\n [mass_peak, intensity] = list(zip(*c.spectras[0].peaks))\n if not c.to_bitvector('morgan', {'radius': 2}) :\n continue\n bitvect_array = c.bitvect_as_np_array()\n input_data.append(bitvect_array)\n output_data.append(mass_peak)\n auxiliary_output_data.append(intensity)\n except :\n continue\n input_array = np.asarray(input_data)\n output_array = np.asarray(output_data)\n auxiliary_output_array = np.asarray(auxiliary_output_data)\n print(input_array.shape)\n if save :\n np.save(\"pred_input_data\", input_array)\n np.save(\"pred_output_data\", output_array)\n np.save(\"pred_auxiliary_output_data\", auxiliary_output_array)\n \n return(input_array,\n output_array,\n auxiliary_output_array)",
"def combine_data(data_files_dict):\n key_list = list(data_files_dict.keys())\n no_col = len(data_files_dict[key_list[0]])\n combined = []\n for n in range(0, no_col):\n d = np.empty(shape=[0, 1])\n for k in data_files_dict:\n d = np.append(d, data_files_dict[k][n])\n combined.append(d)\n return combined",
"def load_dicom_volume(filename):\n # load the supplied file and get the UID of the series\n ds = pydicom.read_file(filename)\n seriesUID = ds.SeriesInstanceUID\n\n # get the position of the image\n position = numpy.array(list(map(float, ds.ImagePositionPatient)))\n\n # get the direction normal to the plane of the image\n row_vector = numpy.array(ds.ImageOrientationPatient[:3])\n col_vector = numpy.array(ds.ImageOrientationPatient[3:])\n normal_vector = numpy.cross(row_vector, col_vector)\n\n # we order slices by their distance along the normal\n def normal_distance(coords):\n return numpy.dot(normal_vector, coords)\n\n # create a dictionary to hold the slices as we load them\n slices = {normal_distance(position): ds.pixel_array}\n\n # extract the path to the folder of the file so we can look for others from the same series\n folder, _ = os.path.split(filename)\n for name in os.listdir(folder):\n if name.lower().endswith(\".ima\") or name.lower().endswith(\".dcm\"):\n new_dicom_name = os.path.join(folder, name)\n new_ds = pydicom.read_file(new_dicom_name)\n\n # check that the series UID matches\n if new_ds.SeriesInstanceUID == seriesUID:\n if new_ds.pixel_array.shape != ds.pixel_array.shape:\n continue\n new_position = list(map(float, new_ds.ImagePositionPatient))\n slices[normal_distance(new_position)] = new_ds.pixel_array\n\n # we set the overall position of the volume with the position\n # of the lowest slice\n if normal_distance(new_position) < normal_distance(position):\n position = new_position\n\n # that is all the slices in the folder, assemble them into a 3d volume\n voxel_array = numpy.zeros((len(slices),\n ds.pixel_array.shape[0],\n ds.pixel_array.shape[1]), dtype=ds.pixel_array.dtype)\n sorted_slice_positions = sorted(slices.keys())\n for i, slice_position in enumerate(sorted_slice_positions):\n voxel_array[i] = slices[slice_position]\n\n # the voxel spacing is a combination of PixelSpacing and slice separation\n voxel_spacing = list(map(float, ds.PixelSpacing))\n voxel_spacing.append(sorted_slice_positions[1] - sorted_slice_positions[0])\n\n # replace the initial slice z position with the lowest slice z position\n # position[2] = sorted_slice_positions[0]\n\n transform = transformation_matrix(row_vector,\n col_vector,\n position,\n voxel_spacing)\n\n return {\n \"voxel_spacing\": voxel_spacing,\n \"position\": position,\n \"volume\": voxel_array,\n \"vectors\": [row_vector, col_vector, normal_vector],\n \"transform\": transform\n }",
"def map_files(key):\n \n datadir=os.path.join(os.path.dirname(__file__),'ncnr_sample_data')\n filedict={'empty_1m':os.path.join(datadir,'SILIC001.SA3_SRK_S101'),\n 'empty_4m':os.path.join(datadir,'SILIC002.SA3_SRK_S102'),\n 'empty_cell_1m':os.path.join(datadir,'SILIC003.SA3_SRK_S103'),\n 'blocked_1m':os.path.join(datadir,'SILIC004.SA3_SRK_S104'),\n 'trans_empty_cell_4m':os.path.join(datadir,'SILIC005.SA3_SRK_S105'),\n 'trans_sample_4m':os.path.join(datadir,'SILIC006.SA3_SRK_S106'),\n 'blocked_4m':os.path.join(datadir,'SILIC007.SA3_SRK_S107'),\n 'empty_cell_4m':os.path.join(datadir,'SILIC008.SA3_SRK_S108'),\n 'sample_1m':os.path.join(datadir,'SILIC009.SA3_SRK_S109'),\n 'sample_4m':os.path.join(datadir,'SILIC010.SA3_SRK_S110'),\n 'mask':os.path.join(datadir,'DEFAULT.MASK'),\n 'div':os.path.join(datadir,'PLEX_2NOV2007_NG3.DIV'),\n }\n return filedict[key]",
"def export_configmap_from_volume(volume_dict, section):\n # only export configmap in case of volume with 'File' type\n if volume_dict['pathType'] != 'File':\n return 0\n _vol = volume_dict.copy()\n _vol['section'] = section\n configmap_name = sp.get_k8s_configmap_name(_vol)\n target_filename = volume_dict['hostPath']\n\n LOG.debug(\"Exporting k8s configmap '%s'.\", configmap_name)\n cmd = [\"kubectl\", \"--kubeconfig=/etc/kubernetes/admin.conf\", \"get\",\n \"cm\", \"-n\", \"kube-system\", configmap_name, \"-o=jsonpath={.data.*}\"]\n return _export_k8s_configmap(target_filename, cmd)",
"def write_map( file_lists, target_dir, output_dir ):\n tld_to_volumes = {}\n for i, group in enumerate( file_lists ):\n for node in group:\n tld = toplevel_subdir( node, target_dir )\n tld_to_volumes.setdefault( tld, set() ).add( i )\n with open( os.path.join( output_dir, \"map.txt\" ), \"w\" ) as fout:\n for tld, volumes in tld_to_volumes.items():\n fout.write( \"{:24s}: {}\\n\".format( tld, \" \".join( [ str( x ) for x in volumes ] ) ) )",
"def _transform_spectra_data(spec_id_dict: dict):\n collection_run_id_dict = {}\n spectra_data = []\n i = 1\n for collection in spec_id_dict.keys():\n for run in spec_id_dict[collection].keys():\n collection_run_id = \"/\".join(filter(None, [collection, run]))\n if collection_run_id not in collection_run_id_dict.keys():\n collection_run_id_dict[collection_run_id] = i\n spectra_data_object = {\n \"id\": i,\n \"location\": collection_run_id,\n \"spectrum_id_format\": \"multiple peak list nativeID format\",\n # 'file_format': #TODO can we infer this?\n }\n spectra_data.append(spectra_data_object)\n return spectra_data, collection_run_id_dict",
"def padova_interpolated_isomake(directories, bands_dict, output_filename,\n bands_ordered=None):\n\n if isinstance(directories, basestring):\n directories = [directories]\n\n if bands_ordered is None:\n bands_ordered = bands_dict.values()\n\n output_obj = open(output_filename, \"w\")\n\n header_string = \"#\\t[M/H]\\tMi\\tlogAge\\tlogTe\\tlogg\\tJacobian\"\n for band in bands_ordered:\n header_string += \"\\t{}\".format(band)\n header_string += \"\\tinner_count\\touter_count\\n\"\n output_obj.write(header_string)\n\n iso_metal_dict = {}\n bands_metal_dicts = {}\n for band in bands_dict.keys():\n bands_metal_dicts[band] = {}\n\n # instead do this on band-by-band basis? *******************\n\n for direc in directories:\n iso_files_gz = gb.glob(\"{}/*.dat.gz\".format(direc.rstrip(\"/\")))\n iso_files = gb.glob(\"{}/*.dat\".format(direc.rstrip(\"/\")))\n\n # check for metallicity of each file\n # and check which bands it has\n\n for iso_file1 in iso_files_gz:\n metal = None\n iso_data = gz.open(\"{0}\".format(iso_file1))\n for line in iso_data:\n split_line = line.split()\n if \"[M/H]\" in split_line:\n metal = float(split_line[split_line.index(\"[M/H]\")+2])\n if \"M_ini\" in split_line:\n for band in bands_metal_dicts.keys():\n if band in split_line:\n bands_metal_dicts[band][metal] = iso_file1\n\n for iso_file1 in iso_files:\n metal = None\n iso_data = open(\"{0}\".format(iso_file1), \"r\")\n for line in iso_data:\n split_line = line.split()\n if \"[M/H]\" in split_line:\n metal = float(split_line[split_line.index(\"[M/H]\")+2])\n if \"M_ini\" in split_line:\n for band in bands_metal_dicts.keys():\n if band in split_line:\n bands_metal_dicts[band][metal] = iso_file1\n\n for metal in bands_metal_dicts[bands_metal_dicts.keys()[0]]:\n filenames = []\n for band in bands_metal_dicts:\n if metal in bands_metal_dicts[band]:\n if bands_metal_dicts[band][metal] not in filenames:\n filenames.append(bands_metal_dicts[band][metal])\n else:\n break\n else:\n iso_metal_dict[metal] = filenames\n\n print(iso_metal_dict)\n keys = iso_metal_dict.keys()\n keys.sort()\n\n if len(keys) > 2:\n # iso_metal_weights=dict(zip(keys, np.gradient(np.array(keys)) ) )\n # in numpy 1.9.0 gradient has changed to use second order behaviour\n # at boundaries which gives wrong results in this context\n iso_metal_weights = dict(zip(keys,\n replacement_gradient(np.array(keys))))\n else:\n iso_metal_weights = dict(zip(keys, np.ones(len(keys))))\n print(\"metals and weights: \", iso_metal_weights)\n\n# interp in metallicity order\n\n for key in keys:\n iso_interp(iso_metal_dict[key], key, iso_metal_weights[key],\n output_obj, bands_dict, bands_ordered)\n\n output_obj.close()",
"def borealis_array_to_dmap_files(filename, borealis_filetype, slice_id, dmap_filename):\n borealis_converter = BorealisConvert(filename, borealis_filetype,\n dmap_filename, slice_id, borealis_file_structure='array')\n\n dmap_filename = borealis_converter.sdarn_filename # overwrite to as generated\n\n bz2_filename = compress_bz2(dmap_filename) # compress (and adds .bz2 to filename)\n os.remove(dmap_filename) # remove uncompressed\n\n return bz2_filename",
"def borealis_array_to_dmap_files(filename, borealis_filetype, slice_id, dmap_filename):\n borealis_converter = BorealisConvert(filename, borealis_filetype,\n dmap_filename, slice_id, borealis_file_structure='array')\n\n dmap_filename = borealis_converter.sdarn_filename # overwrite to as generated\n\n bz2_filename = compress_bz2(dmap_filename) # compress (and adds .bz2 to filename)\n os.remove(dmap_filename) # remove uncompressed\n\n return bz2_filename",
"def _zb_dict_to_recarray(data, aliases=None):\n # if steady state is used, storage will not be written\n if \"FROM_STORAGE\" in data:\n if len(data[\"FROM_STORAGE\"]) < len(data[\"ZONE\"]):\n adj = len(data[\"ZONE\"]) - len(data[\"FROM_STORAGE\"])\n adj = [0] * adj\n data[\"FROM_STORAGE\"] = adj + data[\"FROM_STORAGE\"]\n data[\"TO_STORAGE\"] = adj + data[\"TO_STORAGE\"]\n\n zones = list(np.unique(data[\"ZONE\"]))\n zone_dtypes = []\n for zn in zones:\n if aliases is not None:\n if zn in aliases:\n zone_dtypes.append((aliases[zn], float))\n else:\n zone_dtypes.append((f\"ZONE_{int(zn)}\", float))\n else:\n zone_dtypes.append((f\"ZONE_{int(zn)}\", float))\n\n dtype = [\n (\"totim\", float),\n (\"time_step\", int),\n (\"stress_period\", int),\n (\"name\", object),\n ] + zone_dtypes\n\n if \"TOTIM\" not in data:\n dtype.pop(0)\n\n array = []\n allzones = data[\"ZONE\"]\n for strt in range(0, len(data[\"ZONE\"]), len(zones)):\n end = strt + len(zones)\n kstp = data[\"KSTP\"][strt]\n kper = data[\"KPER\"][strt]\n totim = None\n if \"TOTIM\" in data:\n totim = data[\"TOTIM\"][strt]\n\n for name, values in data.items():\n if name in (\"KSTP\", \"KPER\", \"TOTIM\", \"ZONE\"):\n continue\n rec = [kstp, kper, name]\n if totim is not None:\n rec = [totim] + rec\n tmp = values[strt:end]\n tzones = allzones[strt:end]\n # check zone numbering matches header numbering, if not re-order\n if tzones != zones:\n idx = [zones.index(z) for z in tzones]\n tmp = [tmp[i] for i in idx]\n\n array.append(tuple(rec + tmp))\n\n array = np.array(array, dtype=dtype)\n return array.view(type=np.recarray)",
"def compose_array_from_dataloader(dataloader, key=\"original\"):\n\n sample = dataloader.dataset[0][key]\n\n if key == \"label\":\n dtype = np.int\n output_shape = [len(dataloader.dataset)]\n else:\n dtype = np.float32\n output_shape = [len(dataloader.dataset)] + list(sample.shape)\n\n output_array = np.zeros(output_shape, dtype=dtype)\n output_array.setflags(write=True)\n global_batch_size = dataloader.batch_size\n\n with tqdm(total=len(dataloader)) as pbar:\n for idx, batch in enumerate(dataloader):\n array_to_add = batch[key].numpy()\n batch_size = array_to_add.shape[0]\n output_array[\n global_batch_size * idx : global_batch_size * idx + batch_size\n ] = array_to_add\n pbar.update(1)\n\n return output_array",
"def get_volumes(patient, pet_folder, struct_folders, number, volumes, plot_data=False):\n print(\"--------------------------------------------------------------------------------------\")\n print(\"Patient {:02d}: {}\".format(number, patient))\n # get all dicom image's paths\n dicom_images = [pet_folder+\"/\"+f for f in os.listdir(pet_folder) if f.lower().endswith(\".dcm\")]\n dicom_images.sort()\n # get information from dicom header\n dicom_info = dicom.read_file(dicom_images[0])\n pixel_shape = (int(dicom_info.Rows), int(dicom_info.Columns), int(dicom_info.NumberOfSlices))\n pixel_spacing = (float(dicom_info.PixelSpacing[0]), float(dicom_info.PixelSpacing[1]),\n float(dicom_info.SliceThickness))\n print(\" Pixel spacing: {}\".format(pixel_spacing))\n # create 3D array for pet image\n pet_image = np.zeros(pixel_shape, dtype=dicom_info.pixel_array.dtype)\n for i, dicom_img in enumerate(dicom_images):\n ds = dicom.read_file(dicom_img)\n pet_image[:, :, i] = ds.pixel_array\n # create contours structure\n mtv_variables = []\n for struct_folder in struct_folders:\n # extract contours labels and index from lvol.txt\n lvoltxt_file = struct_folder + \"/lvol.txt\"\n with open(lvoltxt_file) as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n if (\"mtv\" in line.lower() and (\"cervix\" in line.lower() or \"tumor\" in line.lower()) and\n \"nodal\" not in line.lower() and \"nodes\" not in line.lower() and\n \"ring\" not in line.lower() and \"opt\" not in line.lower()):\n struct = line.strip().split(\"|\")\n mtv_variables.append((int(struct[0]), struct[-1], struct_folder))\n # return nothing if no mtv contours were found\n if len(mtv_variables) == 0:\n return [], volumes, []\n # add contours to original image and plot it\n prev_folder = None\n patient_volumes = [pet_image]\n print(\" Possible MTV contours:\")\n for mtv_idx, mtv_label, mtv_folder in mtv_variables:\n # read and transform data from nii file\n if prev_folder != mtv_folder:\n # only read mtv_folder if it has changed\n nii_obj = nib.load(mtv_folder + \"/lvol.nii\")\n nii_data = nii_obj.get_data()\n volume = np.zeros(nii_data.shape[:3], dtype=int)\n for i in range(nii_data.shape[-1]):\n volume += nii_data[:, :, :, 0, i] << (8 * i)\n volume = np.swapaxes(volume, 0, 1)\n volume = np.flip(volume, 2)\n print(\" * Structures folder: {}\".format(mtv_folder.split(\"/\")[-1]))\n print(\" MTV_index:\", mtv_idx)\n print(\" MTV_label:\", mtv_label.split(\"/\")[-1])\n prev_folder = mtv_folder\n # create 3D matrix with 1s where ROI is and 0s everwhere else\n try:\n tumor_volume = (np.bitwise_and(volume, 2 ** mtv_idx) > 0) * 1\n except TypeError:\n print(\"Error while reading volume for index: {}, label: {}!\".format(mtv_idx,\n mtv_label))\n patient_volumes.append(())\n continue\n # find bounding box for volume\n mask_range = [[pixel_shape[0], pixel_shape[1], pixel_shape[2]], [-1, -1, -1]]\n tumor_exists = False\n for xx in range(pixel_shape[0]):\n for yy in range(pixel_shape[1]):\n for zz in range(pixel_shape[2]):\n if tumor_volume[xx, yy, zz]:\n tumor_exists = True\n mask_range[0][0] = min(mask_range[0][0], xx)\n mask_range[0][1] = min(mask_range[0][1], yy)\n mask_range[0][2] = min(mask_range[0][2], zz)\n mask_range[1][0] = max(mask_range[1][0], xx)\n mask_range[1][1] = max(mask_range[1][1], yy)\n mask_range[1][2] = max(mask_range[1][2], zz)\n # continue if the mask is all 0s\n if not tumor_exists:\n print(\"Volume not found for index: {}, label: {}!\".format(mtv_idx, mtv_label))\n patient_volumes.append(())\n continue\n # Get ROI\n current_volume = pet_image[mask_range[0][0]:mask_range[1][0]+1,\n mask_range[0][1]:mask_range[1][1]+1,\n mask_range[0][2]:mask_range[1][2]+1]\n current_mask = tumor_volume[mask_range[0][0]:mask_range[1][0]+1,\n mask_range[0][1]:mask_range[1][1]+1,\n mask_range[0][2]:mask_range[1][2]+1]\n # Add volumes to patient_volumes\n patient_volumes.append((current_mask, mtv_label, mask_range, mtv_folder))\n # Plot volumes\n if plot_data:\n plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=0, fig_num=0,\n patient=patient, mask_name=mtv_label.split(\"/\")[-1])\n plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=1, fig_num=1,\n patient=patient, mask_name=mtv_label.split(\"/\")[-1])\n plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=2, fig_num=2,\n patient=patient, mask_name=mtv_label.split(\"/\")[-1])\n input(\"press ENTER to continue... \")\n plot_pet_volume(current_volume, pixel_shape, pixel_spacing, mask=current_mask,\n patient=patient, mask_name=mtv_label.split(\"/\")[-1])\n volumes[patient] = patient_volumes\n return mtv_variables, volumes, pixel_spacing",
"def add_array(self, indep, keys, values):\n if np.ndim(values) > 1:\n values = orient(values, keys)\n dep = {k: v for k, v in zip(keys, values)}\n self.add_dict(indep, dep)",
"def save_sample_volumes(sample, sample_dir):\n pairs = [key for key in sample.keys() if isinstance(key, tuple)]\n for pair in pairs:\n pair_dir = os.path.join(sample_dir, str(pair))\n try:\n img = sample[pair]['image']\n image_path = os.path.join(pair_dir, str(pair) + '_image.zarr')\n zarr.save(image_path, img)\n except KeyError:\n pass\n # if the label is in the sample, save this\n try:\n lab = sample[pair]['labels']\n labels_path = os.path.join(pair_dir, str(pair) + '_labels.zarr')\n zarr.save(labels_path, lab)\n except KeyError:\n pass",
"def generate_plugin_outputs_from_dict(self, results, outputs=None):\n if outputs is None:\n outputs = []\n\n dfs = []\n for key in results:\n vector = results[key]\n file_key, plugin_key = key\n logger.debug(\"Created plugin output of length %d\", len(vector))\n po = PluginOutput(vector, plugin_key, file_key)\n df = self.model.save(po)\n\n outputs.append(po)\n dfs.append(df)\n\n list_df = defer.DeferredList(dfs)\n return list_df",
"def _generate_inventory(self, datapath):\n \n files = [file for file in listdir(datapath) if '.nc' in file and not 'xyz' in file]\n # file_prefixes = list(set([ file.split('_')[0] for file in files ]))\n # file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n if self.extra_pref:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2] + [self.extra_pref]) for file in files ]))\n else:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n \n inventory = {}\n for file_prefix in file_prefixes:\n fname = path.join(datapath,f'{file_prefix}{self.first_suffix}')\n if not self.metafile:\n self.metafile = fname\n vars = [ var for var in list(Dataset(fname).variables) if var not in self.skip_vars ]\n for var in vars:\n inventory[var] = {'files': sorted([path.join(datapath,file) \n for file in listdir(datapath) if file_prefix in file])}\n return inventory",
"def load_nifty_volume_as_4d_array(filename):\n img_obj = sitk.ReadImage(filename)\n data_array = sitk.GetArrayFromImage(img_obj)\n origin = img_obj.GetOrigin()\n spacing = img_obj.GetSpacing()\n direction = img_obj.GetDirection()\n shape = data_array.shape\n if(len(shape) == 4):\n assert(shape[3] == 1) \n elif(len(shape) == 3):\n data_array = np.expand_dims(data_array, axis = 0)\n else:\n raise ValueError(\"unsupported image dim: {0:}\".format(len(shape)))\n output = {}\n output['data_array'] = data_array\n output['origin'] = origin\n output['spacing'] = (spacing[2], spacing[1], spacing[0])\n output['direction'] = direction\n return output",
"def __getitem__(self, index):\n row = self.metadata.iloc[index]\n vid_id = row.filename.split('.')[0]\n array_dict = self.dp.get_arrays(vid_id, self.num_subbursts, self.padding, self.random_start_points)\n array_dict['subbursts'] = self.apply_transform(array_dict['subbursts'])\n return array_dict",
"def calculateVolumes(data):\n print \"Calculating volumes...\"\n results = {}\n for dataLine in data:\n name = dataLine['name']\n r1 = dataLine['r1']\n r2 = dataLine['r2']\n r3 = dataLine['r3']\n r4 = dataLine['r4']\n t1 = dataLine['t1']\n t2 = dataLine['t2']\n t3 = dataLine['t3']\n volCup = (math.pi/3.0) * t1 * ((r1**2) + (r4**2) - (r1*r4))\n volPeanut = math.pi * (t1 - t2 - t3) * ((r2**2) + (r3**2) - (r2*r3)) / 3.0\n volChoc = volCup - volPeanut\n ratio = volChoc/volPeanut\n print \"Ratio for \" + name + \" is \" + str(ratio)\n results[name] = [r1, volChoc, volPeanut, volCup, ratio]\n return results",
"def load_lists(directory):\n mapping = defaultdict(lambda : [[] for x in output_keys])\n csv_paths = sorted(glob(os.path.join(directory, '*.csv')))\n for csv_pth in csv_paths:\n name = os.path.splitext(os.path.basename(csv_pth))[0]\n logging.info('Processing: %s', name)\n json_pth = os.path.splitext(csv_pth)[0] + '.json'\n with open(json_pth) as f:\n info = json.load(f)\n # Create converters\n map = info.get('mappings', {})\n converters = {}\n converters['label'] = LabelConverter(map)\n for lbl in keys[1:]:\n if lbl != 'label':\n converters[lbl] = to_float\n #\n headers = info['headers']\n mmsi_key = info['headers']['mmsi']\n try:\n with open(csv_pth, 'rU') as f:\n for line in csv.DictReader(f):\n chunks = []\n for i, key in enumerate(keys):\n # TODO: make this less hacky\n hkey = 'engine power' if (key == 'engine_power') else key\n hdr = headers.get(hkey)\n if hdr is None:\n value = None\n else:\n try:\n value = line[hdr]\n except KeyError:\n logging.fatal('could not find key ({}) in {}'.format(hdr, line.keys()))\n raise\n if key in converters:\n value = converters[key](value, key)\n chunks.append(value)\n if not chunks[0].strip():\n # empty mmsi\n print(\"Skipping\", line)\n continue\n for i in range(len(keys)):\n mapping[chunks[0]][i].append(chunks[i])\n mapping[chunks[0]][-2].append(None)\n mapping[chunks[0]][-1].append(name)\n except:\n logging.warning(\"Failed loading from: %s\", csv_pth)\n raise\n for k in mapping:\n mapping[k] = VesselRecord(*(mapping[k]))\n return mapping",
"def initDictionary(bands):\r\n for x in bands:\r\n d[\"{}\".format(x)] = {ProdCost: [], AlbumSales: []}",
"def combine_batches(chosen_dict):\n\n batches = set(sorted(chosen_dict.keys())) - {'meta_data'}\n batches = sorted(list(batches))\n root_dict = dict()\n root_dict['data'] = chosen_dict[batches[0]]['data']\n root_dict['labels'] = chosen_dict[batches[0]]['labels']\n root_dict['filenames'] = chosen_dict[batches[0]]['filenames']\n root_dict['meta_data'] = chosen_dict['meta_data']\n root_dict['meta_data'].append(batches[0])\n\n for curr_batch in batches[1:]:\n temp_dict = chosen_dict[curr_batch]\n root_dict['data'] = np.concatenate((root_dict['data'],\n temp_dict['data']),\n axis=0)\n root_dict['labels'] = root_dict['labels'] + temp_dict['labels']\n root_dict['filenames'] = root_dict['filenames'] + temp_dict['filenames']\n root_dict['meta_data'].append(curr_batch)\n\n tot_rows = root_dict['data'].shape[0]\n new_order = range(tot_rows)\n for _ in range(5):\n shuffle(new_order)\n\n ub_dict = dict()\n ub_data = np.zeros((tot_rows, 3072), dtype=root_dict['data'].dtype)\n ub_labels = [0] * tot_rows\n ub_filenames = [\"\"] * tot_rows\n\n for ctr, idx in enumerate(new_order):\n ub_data[ctr, :] = root_dict['data'][idx, :]\n ub_labels[ctr] = root_dict['labels'][idx]\n ub_filenames[ctr] = root_dict['filenames'][idx]\n\n ub_dict['data'] = ub_data\n ub_dict['labels'] = ub_labels\n ub_dict['filenames'] = ub_filenames\n ub_dict['meta_data'] = root_dict['meta_data']\n\n return ub_dict",
"def get_files_or_urls_as_file_storage(file_dict, form_dict, key):\n file_values = get_possible_array_value(file_dict, key)\n url_values = get_possible_array_value(form_dict, key)\n return file_values + [url_to_file_storage(url) for url in url_values]",
"def load_chunks(self):\n for key, array in self.chunks.items():\n loaded_array = np.asarray(array)\n self.chunks[key] = loaded_array",
"def data_dict0():\n\n # 0- Sample from detectron2 -> 5 different sections.\n info_val0 = [{\"date_created\": \"2020-03-15 04:59:45.442988\",\n \"description\": \"Automatically generated COCO json file for Detectron2.\"}]\n images0 = [{\"id\": \"image\", \"width\": 100,\n \"height\": 100, \"file_name\": \"image.png\"}]\n annotations0 = [{\"id\": 1, \"image_id\": \"image\", \"bbox\": [70.0, 30.0, 30.0, 40.0],\n \"area\": 1200.0, \"iscrowd\": 0, \"category_id\": 0}]\n categories0 = [{\"id\": 0, \"name\": \"first\"}]\n licence0 = 'null'\n\n return [{\"info\": info_val0,\n \"images\": images0,\n \"annotations\": annotations0,\n \"categories\": categories0,\n \"licenses\": licence0}]",
"def _partition(mapped_values):\n partitioned_data = collections.defaultdict(list)\n\n for key, value in mapped_values:\n partitioned_data[key].append(value)\n\n return partitioned_data.items()",
"def convert_dict_to_ndarray(*dictionaries):\n\n array_list = []\n\n # Loop all dicts\n for dictionary in dictionaries:\n # Loop all keys\n for key in dictionary.keys():\n # Skip non-ndarray types\n if not isinstance(dictionary[key], np.ndarray):\n continue\n # Append each item to a list\n array_list.append(dictionary[key])\n\n # Check non-uniform length between arrays\n for item in array_list:\n assert len(item) == len(array_list[0]), 'All arrays must have the same length'\n\n return np.vstack(array_list) # .swapaxes(0, 1)"
]
| [
"0.6480677",
"0.5515444",
"0.5372382",
"0.5338525",
"0.5293533",
"0.50967926",
"0.4978781",
"0.49624842",
"0.49225482",
"0.4908318",
"0.4890661",
"0.4890661",
"0.48351032",
"0.4833667",
"0.4814905",
"0.48083922",
"0.4790626",
"0.47904637",
"0.47892427",
"0.4787031",
"0.47399876",
"0.47160214",
"0.46976194",
"0.46932828",
"0.46916473",
"0.4685613",
"0.46691766",
"0.46595308",
"0.4648917",
"0.464114"
]
| 0.7390135 | 0 |
Merge volume with other volumes from volumes list in the merge directions. | def apply_merge(volume, volumes, merge_directions):
def get_new_volume(volume, lowcorner):
v2 = get_volume(lowcorner)
if v2 != None:
return merge_volumes(volume, v2)
else:
return volume
def get_volume(lowcorner):
if not isinstance(lowcorner, tuple):
raise TypeError() # required for "=="
for i in range(len(volumes)):
v = volumes[i]
if v.p1 == lowcorner:
logger.debug("\tMerging volume with low corner %s", v.p1)
return volumes.pop(i)
logger.warning("\tNo volume to merge with")
return None
import copy
logger.debug("\t== Function == apply_merge")
p1, p2 = volume.get_corners()
logger.debug("\tTargetting volume with low corner %s", p1)
if len(merge_directions) == 1:
if Axes.k in merge_directions:
p1_target = list(copy.deepcopy(p1))
p1_target[Axes.k.value] = p2[Axes.k.value]
new_volume = get_new_volume(volume, tuple(p1_target))
elif Axes.j in merge_directions:
p1_target = list(copy.deepcopy(p1))
p1_target[Axes.j.value] = p2[Axes.j.value]
new_volume = get_new_volume(volume, tuple(p1_target))
elif Axes.i in merge_directions:
p1_target = list(copy.deepcopy(p1))
p1_target[Axes.i.value] = p2[Axes.i.value]
new_volume = get_new_volume(volume, tuple(p1_target))
elif len(merge_directions) == 2:
logger.debug("\tMerge directions: %s", merge_directions)
axis1, axis2 = merge_directions
p1_target = list(copy.deepcopy(p1))
p1_target[axis1.value] = p2[axis1.value]
volume_axis1 = get_new_volume(volume, tuple(p1_target))
new_volume_axis1 = apply_merge(volume_axis1, volumes, [axis2])
new_volume_axis2 = apply_merge(volume, volumes, [axis2])
new_volume = merge_volumes(new_volume_axis1, new_volume_axis2)
elif len(merge_directions) == 3:
logger.debug("\tMerge directions %s", merge_directions)
axis1, axis2, axis3 = merge_directions
p1_target = list(copy.deepcopy(p1))
p1_target[axis1.value] = p2[axis1.value]
volume_axis1 = get_new_volume(volume, tuple(p1_target))
new_vol1 = apply_merge(volume, volumes, [axis2, axis3])
new_vol2 = apply_merge(volume_axis1, volumes, [axis2, axis3])
new_volume = merge_volumes(new_vol1, new_vol2)
else:
raise ValueError()
logger.debug("\tEnd")
return new_volume | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_volumes(volume1, volume2):\n if not isinstance(volume1, Volume) or \\\n not isinstance(volume2, Volume):\n raise TypeError()\n\n lowercorner1, uppercorner1 = volume1.get_corners()\n lowercorner2, uppercorner2 = volume2.get_corners()\n lowercorner = (min(lowercorner1[0], lowercorner2[0]), \n min(lowercorner1[1], lowercorner2[1]),\n min(lowercorner1[2], lowercorner2[2]))\n uppercorner = (max(uppercorner1[0], uppercorner2[0]), \n max(uppercorner1[1], uppercorner2[1]),\n max(uppercorner1[2], uppercorner2[2]))\n return Volume(None, lowercorner, uppercorner)",
"def imprint_merge_each_group():\r\n \r\n G = cubit.get_entities(\"group\")\r\n for gid in G:\r\n vid = cubit.get_group_volumes(gid)\r\n if len(vid)>1:\r\n cubit.cmd(f\"imprint vol {list_to_str(vid)}\")\r\n cubit.cmd(f\"merge vol {list_to_str(vid)}\")",
"def merge_snapshot(self):\n disks = self.get_disks()\n disk_files_tree = []\n for disk in disks:\n disk_files_tree += (DiskImageHelper.get_backing_files_tree(disk.file))\n merge_snapshot_cmd = \"virsh blockpull --domain {domain_name} {disk_path} --wait\".format(\n domain_name=self.name, disk_path=disk.file)\n\n logging.debug(\"Executing: '%s'\" % merge_snapshot_cmd)\n logging.info(\"Merging base to new snapshot for '%s' device\" % disk.device)\n\n # launch command\n merge_snapshot_cmds = shlex.split(merge_snapshot_cmd)\n merge_snapshot = subprocess.Popen(merge_snapshot_cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=False)\n\n # wait to terminate\n status = merge_snapshot.wait()\n\n if status != 0:\n logging.error(\"Error for '%s': %s\" % (merge_snapshot_cmd, merge_snapshot.stderr.read()))\n logging.critical(\"{exe} returned {status} state\".format(exe=merge_snapshot_cmds[0], status=status))\n raise Exception(\"blockpull didn't work properly\")\n\n current_disk_files = [disk.file for disk in self.get_disks()]\n\n # remove old disk device files without current ones\n for file in [disk_file_tree for disk_file_tree in disk_files_tree if disk_file_tree not in current_disk_files]:\n logging.info(\"Removing old disk file: '%s'\" % file)\n os.remove(file)",
"def test_volume_extend(self, volume, volumes_steps):\n volumes_steps.extend_volume(volume.name)",
"def volumes(self):",
"def populate_volumes(self):\n print \"Populating volumes info...\"\n volumes = self.get_all_volumes()\n for i in volumes:\n\n # handle associated instance's KEEP-tag\n associated_instance_id = i.attach_data.instance_id\n\n if associated_instance_id is None: # sometimes there is no attached instance\n instance_keep_tag = \"-------no-instance-found\"\n else:\n instance_keep_tag = Ins.spreadsheet[associated_instance_id]['KEEP_tag']\n self.spreadsheet[i.id] = dict(Name_tag=self.get_name_tag(i), id=i.id, KEEP_tag=self.get_keep_tag(i),\n instance_KEEP_tag=instance_keep_tag,\n associated_instance_id=associated_instance_id,\n PROD_tag=self.is_production(i), attachment_state=i.attachment_state(),\n state=i.volume_state(), status=i.status, iops=i.iops, size=i.size,\n created=i.create_time, region=i.region.name)",
"def reconstruct(self):\n volumes = list(sorted((v for v in self.get_volumes() if v.mountpoint and v.lastmountpoint),\n key=lambda v: v.mountpoint or \"\", reverse=True))\n\n try:\n root = list(filter(lambda x: x.lastmountpoint == '/', volumes))[0]\n except IndexError:\n self._debug(\"[-] Could not find / while reconstructing, aborting!\")\n return None\n\n volumes.remove(root)\n\n for v in volumes:\n v.bindmount(os.path.join(root.mountpoint, v.lastmountpoint[1:]))\n return root",
"def add_file_or_directory_volume(self,\n runtime, # type: List[Text]\n volume, # type: MapperEnt\n host_outdir_tgt # type: Optional[Text]\n ):\n if not volume.resolved.startswith(\"_:\"):\n self._add_volume_binding(volume.resolved, volume.target) # this one defaults to read_only",
"def sync(self, **kwargs):\n volume_1 = kwargs['NAMES'][0]\n volume_2 = kwargs['NAMES'][1]\n path1 = f\"{self.cm.find_name(name=volume_1)[0]['path']}/{volume_1}/\"\n path2 = f\"{self.cm.find_name(name=volume_2)[0]['path']}/{volume_2}/\"\n os.system(f\"rsync -avzh {path2} {path1}\")\n kwargs1 = {'NAME': volume_1, 'key': \"sync_with\", 'value': volume_2}\n volume_info1 = self.add_tag(**kwargs1)\n result = [volume_info1]\n return result",
"def detach_all_volumes(self):\n nova_connection = self.source_connection.get_nova_connection(self.source_region_name)\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(self.resource_manager.detach_all_volume(nova_connection))\n except:\n raise",
"def merge_assets(self, other):\n for asset in other.asset:\n if find_elements(root=self.asset, tags=asset.tag,\n attribs={\"name\": asset.get(\"name\")}, return_first=True) is None:\n self.asset.append(asset)",
"def get_merged_variants(self, variants, key):\n # type: (List[vcfio.Variant], str) -> List[vcfio.Variant]\n raise NotImplementedError",
"def AddVolumes(self, volumeIDList):\n volume_ids = self.volumes\n for vol_id in volumeIDList:\n if vol_id not in volume_ids:\n volume_ids.append(vol_id)\n else:\n mylog.debug(\"volumeID \" + str(vol_id) + \" is already in group\")\n\n # Add the requested volumes\n params = {}\n params[\"volumes\"] = volume_ids\n params[\"volumeAccessGroupID\"] = self.ID\n libsf.CallApiMethod(self.mvip, self.username, self.password, \"ModifyVolumeAccessGroup\", params, ApiVersion=5.0)",
"def mount_multiple_volumes(self):\n\n for disk in self.disks:\n self._debug(\" Mounting volumes in {0}\".format(disk))\n for volume in disk.mount_multiple_volumes():\n yield volume",
"def setupVolumes(volumes: Volumes) -> None:\n volumesList = readProcessJson(\n [\"podman\", \"volume\", \"ls\", \"--format\", \"json\"])\n existingVolumes: Set[str] = set()\n if volumesList:\n for volume in volumesList:\n existingVolumes.add(volume['name'])\n for volume in volumes.values():\n if volume.name not in existingVolumes:\n log.info(f\"Creating volume {volume.name}\")\n execute([\"podman\", \"volume\", \"create\", volume.name])\n if volume.files:\n for file in volume.files:\n path = Path(\"~/.local/share/containers/storage/volumes/\"\n f\"{volume.name}/_data/{file.name}\").expanduser()\n if not path.exists():\n log.info(f\"Writting {path}\")\n path.write_text(file.content)",
"def add_volume(self, volume_block, apfs_tree):\n\n # get volume superblock\n block = self.read_block(volume_block)\n block_map = block.body.block_map_block # mapping btree\n root_dir_id = block.body.root_dir_id # root dir id\n if self.verbose:\n vol_desc = \"%s (volume, Mapping-Btree: %d, Rootdir-ID: %d\" % (\n block.body.name, block_map, root_dir_id)\n else:\n vol_desc = block.body.name\n\n # get volume btree\n block = self.read_block(block_map)\n\n # get root btree node and parse it with all its children, collecting dir entries\n block = self.read_block(block.body.root)\n entries = self.get_entries(block)\n\n # create a tree from the found dir entries\n vol_node = Node(vol_desc, apfs_tree)\n self.list_children(1, entries, vol_node)",
"def attach_volumes(instance_id, volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n for volume in volumes:\n command = 'nova volume-attach %s %s %s' % (instance_id, volume['id'],\n volume['device'])\n dest_attachment = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])",
"def merge(self, other):\n\n for child in other.children:\n self.add_deep_copy_of(child, merged=True)",
"def _attach_volume(self):\n return []",
"def merge_waves(self):\n dirname = self.dirname\n name = self.get_name()\n videocluster = os.path.join(dirname, name)\n if sys.platform == 'win32':\n videocluster = dirname + '/' + name\n listwaves = os.listdir(videocluster)\n listwaves.sort()\n listw = [os.path.join(videocluster, fil) for fil in listwaves]\n #file_basename = os.path.join(dirname, name)\n if sys.platform == 'win32':\n listw = [videocluster + '/' + fil for fil in listwaves] \n # file_basename = dirname + '/' + name\n self.wave = os.path.join(dirname, name + \".wav\")\n if sys.platform == 'win32':\n self.wave = dirname + '/' + name + \".wav\"\n fm.merge_waves(listw, self.wave)",
"def merge_asset(self, other):\n for asset in other.asset:\n asset_name = asset.get(\"name\")\n asset_type = asset.tag\n # Avoids duplication\n pattern = \"./{}[@name='{}']\".format(asset_type, asset_name)\n if self.asset.find(pattern) is None:\n self.asset.append(asset)",
"def mount_volumes(self, single=None):\n\n for disk in self.disks:\n self._debug(\" Mounting volumes in {0}\".format(disk))\n for volume in disk.mount_volumes(single):\n yield volume",
"def merge(): #Status: WIP\r\n pass",
"def right_merge(self,list_to_merge):\n self.items = self.items + list_to_merge\n return self.items",
"def bootable_volume(volumes):\n for volume in volumes:\n if '/dev/vda' in volume['attachments']:\n return volume",
"def self_merge(self, source_id, destination_id):\n self.vectors[destination_id].merge(self.vectors[source_id])",
"def attach(self,\n names,\n vm):\n results = []\n for name in names:\n volume_info = self.cm.find_name(name)\n if volume_info and volume_info[0]['State'] != \"deleted\":\n vms = volume_info[0]['AttachedToVm']\n path = volume_info[0]['path']\n if vm in vms:\n Console.error(f\"{name} already attached to {vm}\")\n else:\n result = self.mount(path=f\"{path}/{name}\", vm=vm)\n mounts = result['mounts']\n if f\"{path}/{name}\" in mounts.keys():\n vms.append(vm)\n\n result = self.update_volume_after_attached_to_vm(\n info=volume_info, vms=vms)\n results.append(result)\n else:\n Console.error(\n \"volume is not existed or volume had been deleted\")\n return results[0]",
"def delete(self):\n for lv in self.logical_volumes:\n self.delete_lv(lv_name=lv)\n\n super().delete()",
"def _merge(self):\n raise NotImplementedError",
"def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()"
]
| [
"0.6615679",
"0.5793308",
"0.5730063",
"0.5686665",
"0.56246465",
"0.5593841",
"0.55670226",
"0.55485183",
"0.5492772",
"0.5480072",
"0.5442755",
"0.5396768",
"0.530223",
"0.5294843",
"0.527612",
"0.5255526",
"0.52393526",
"0.5207724",
"0.5206096",
"0.5203979",
"0.5188928",
"0.5137585",
"0.50879145",
"0.5080211",
"0.5049495",
"0.5030539",
"0.50200117",
"0.50175244",
"0.5012345",
"0.5007279"
]
| 0.6634114 | 0 |
Subscribes the decorated function to all messages from the messagequeue. | def subscribe():
def func_wrapper(func):
queue.observe_on(scheduler).subscribe(func)
return func
return func_wrapper | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_messages_from_queue(fx):\n\n for msg in queue.receive_messages():\n fx(msg)",
"def subscribe(self, queue, action):\n self.channel.queue_declare(queue=queue)\n self.channel.basic_consume(queue=queue,\n on_message_callback=action,\n auto_ack=True)\n self.channel.start_consuming()",
"def _listen_to_queues(cls):\n queues = cls.get_service_queues()\n for queue in queues:\n queue.consume(cls.process_messages)",
"def _consume(self):\n # HACK: run_in_executor is used as a workaround to use boto\n # inside a coroutine. This is a stopgap solution that should be\n # replaced once boto has support for asyncio or aiobotocore has\n # a stable release.\n loop = asyncio.get_event_loop()\n receive_message = partial(\n self.client.receive_message,\n QueueUrl=self.app.settings['SQS_INBOUND_QUEUE_URL'],\n AttributeNames=self.app.settings['SQS_ATTRIBUTE_NAMES'],\n MessageAttributeNames=self.app.settings['SQS_MESSAGE_ATTRIBUTES'],\n MaxNumberOfMessages=self.app.settings['SQS_MESSAGE_BATCH_SIZE'],\n VisibilityTimeout=self.app.settings['SQS_VISIBILITY_TIMEOUT'],\n WaitTimeSeconds=self.app.settings['SQS_WAIT_TIME'],\n )\n while True:\n future = loop.run_in_executor(None, receive_message)\n messages = yield from future\n for message in messages.get('Messages', []):\n message['Body'] = json.loads(message['Body'])\n yield from self._message_queue.put(message)",
"def dispatch(self, event: str, message: str) -> None:\n\t\tfor subscriber, callback in self.get_subscribers(event).items():\n\t\t\tcallback(event, message)",
"def decorator(func):\n self.subscribe(func, event, *events)\n return func",
"def consume_messages(process_func: Callable[[str], None]):\n consumer = get_consumer()\n\n for message in consumer:\n log.debug(f'Received a message: {message}')\n try:\n process_func(message.value)\n except Exception as e:\n log.error(f'Failed to process a message: {message.value}')\n log.exception(e)",
"def subscribeConsumer(consumer):",
"def subscribe(self):\n pubsub = self.redis_client.pubsub()\n pubsub.subscribe(self.message_channel)\n for item in pubsub.listen():\n if item.get(\"data\") not in (1, None):\n yield item",
"def queue(self, func, *args, **kwargs):\n return self.event_queue.put((func, args, kwargs))",
"def subscribe(self, callback):\n self.channel.basic_consume(callback, queue=self.queue_name)\n self.channel.start_consuming()",
"def subscribe_to_commands(self):\n self.basic_consume(self.process_command, queue=self.name)",
"def message_subscribers(self, *args, **kwargs):\n return _TestA_swig.my_qpsk_demod_cb_sptr_message_subscribers(self, *args, **kwargs)",
"def subscribe(self, queue, action=None):\n if action:\n self.broker.subscribe(queue, action)\n else:\n self.broker.subscribe(queue)",
"def subscribe(receiver):",
"def subscribe(receiver):",
"def subscribe(receiver):",
"def _dispatch_messages(self):\n while True:\n select_obj = (yield)\n if select_obj == self._message_queue.selobj:\n msg = self._message_queue.get_nowait()\n if msg is not None:\n msg_type = msg.get('type', None)\n if msg_type is not None:\n msg_handler = self._message_handlers.get(msg_type, None)\n if msg_handler is not None:\n msg_handler(msg['data'])",
"def _messages_list(self, queue):\n\n return queue.messages()",
"def on_message(self, channel_id, message):\n logger.access('-- SlimPatternSubscriberManager subscribe, channel_id: %s, message: %s', channel_id, message)\n\n clients = None\n for key in self.clients.iterkeys():\n # redis 仅支持 glob-style 的正则\n if fnmatch.fnmatchcase(channel_id, key):\n clients = self.clients.get(key, None)\n break\n\n if clients is None:\n return\n\n bad_clients = []\n for client in clients:\n if client.is_alive():\n client.on_sub_notification(channel_id, message)\n else:\n bad_clients.append(client)\n\n for client in bad_clients:\n clients.remove(client)\n\n if not clients:\n del self.clients[channel_id]\n self.subscriber.punsubscribe(channel_id)",
"def subscribe(receiver, catchup):",
"def message_subscribers(self, *args, **kwargs):\n return _TestA_swig.cleanslate_sptr_message_subscribers(self, *args, **kwargs)",
"def start_consuming(self):\n\n for queue in self._handlers.keys():\n self._consumer_tags += self._channel.basic_consume(self.on_message,\n queue=queue)",
"def subscribe(self, f=None, priority: int = 0):\n\n def decorator(decorated_function):\n self._subscriptions[self.id].append(\n Subscription(\n priority=priority,\n subscription_list=self,\n subscriber=decorated_function,\n )\n )\n self._subscriptions[self.id].sort()\n return decorated_function\n\n if f is None:\n return decorator\n\n return decorator(f)",
"def notify(self) -> None:\n for s in self.subscribers:\n s()",
"def enqueue(self, func):\n self.queue.put(func)",
"def subscribe(self):\n with self._rabbit_connection.connection.channel() as channel:\n self._queue = rabbitpy.Queue(\n channel=channel,\n name=self._subscriber_name + \"_queue\",\n durable=True,\n message_ttl=5 * 24 * 60 * 60 * 1000 # 5 days\n )\n self._queue.declare()\n self._queue.bind(self._exchange, self._routing_key)\n\n self._consume()",
"def subscribe_to(type_to_subscribe_to=None):\n def func_wrapper(func):\n queue.filter(lambda x:isinstance(x, type_to_subscribe_to) or type_to_subscribe_to == None).observe_on(scheduler).subscribe(func)\n return func\n return func_wrapper",
"def event_queue_proc(self,event):\r\n event()",
"def _process_run(queue: Queue, func: Callable[[Any], Any] = None,\n *args, **kwargs):\n queue.put(func(*args, **kwargs))"
]
| [
"0.67206204",
"0.61992186",
"0.6189017",
"0.6103482",
"0.6008804",
"0.5993647",
"0.59808356",
"0.59557223",
"0.59217",
"0.5876236",
"0.586259",
"0.5857057",
"0.58430415",
"0.5837712",
"0.57736427",
"0.57736427",
"0.57736427",
"0.566076",
"0.56512743",
"0.5609942",
"0.5599494",
"0.5565641",
"0.55533034",
"0.55407643",
"0.55288565",
"0.5524078",
"0.54972947",
"0.54677105",
"0.54453486",
"0.54413974"
]
| 0.71473676 | 0 |
Subscribes the decorated function to messages of the given type. The function will be observed on its own thread. | def subscribe_to(type_to_subscribe_to=None):
def func_wrapper(func):
queue.filter(lambda x:isinstance(x, type_to_subscribe_to) or type_to_subscribe_to == None).observe_on(scheduler).subscribe(func)
return func
return func_wrapper | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def subscribe():\n def func_wrapper(func):\n queue.observe_on(scheduler).subscribe(func)\n return func\n return func_wrapper",
"def decorator(func):\n self.subscribe(func, event, *events)\n return func",
"def listen(self, event_type):\n def decorator(func):\n if func not in self.event_subscribers[event_type]:\n kwargs = {event_type: func}\n self.bind(**kwargs)\n self.event_subscribers[event_type].add(func)\n return func\n return decorator",
"def subscribe(self, event_type, func):\n if func not in self.event_subscribers[event_type]:\n kwargs = {event_type: func}\n self.bind(**kwargs)\n self.event_subscribers[event_type].add(func)",
"def subscribe(self, event_type: typing.Type[typing.Any], callback: CallbackT[typing.Any]) -> None:",
"def listen(\n self,\n event_type: typing.Optional[typing.Type[EventT_co]] = None,\n ) -> typing.Callable[[CallbackT[EventT_co]], CallbackT[EventT_co]]:",
"def subscribe_decorator(topic, **kwargs):\n\n def _subscribe_decorator(func):\n setattr(func, _SUBSCRIBE_DECORATOR_NAME, (topic, kwargs))\n # no @wraps\n return func\n\n return _subscribe_decorator",
"def subscribe(self, meta_type, callback, can_nack=False):\n if not can_nack:\n subscriber_list = self.subscribers.setdefault(meta_type, [])\n else:\n subscriber_list = self.nackables.setdefault(meta_type, [])\n subscriber_list.append(callback)",
"def listen(eventType):\n def _decoration(fcn):\n fcn.listen = True\n fcn.eventType = eventType\n return fcn\n return _decoration",
"def handler(self, msg_type: str, unpack: bool = False):\n\n assert msg_type in MESSAGE_TYPES\n\n def wrapper(fn: Callable[..., None]) -> None:\n self._handlers[msg_type] = HandlerCallback(fn, unpack=unpack)\n\n return wrapper",
"def subscribe(receiver):",
"def subscribe(receiver):",
"def subscribe(receiver):",
"def listen_for_message(msg_type, handler, bus=None):\n bus = bus or get_mycroft_bus()\n bus.on(msg_type, handler)\n return bus",
"def notify_subscribers(self, event_type, *event_args, **event_kwargs):\n subscribers = self.get_all(event_type, default=())\n if subscribers:\n subscribers = sorted(subscribers, key=Subscriber.sorter)\n event = event_type(*event_args, **event_kwargs)\n for subscriber in subscribers:\n subscriber.func(event, **subscriber.args)\n if subscriber.once:\n self.remove(event_type, subscriber)",
"def subscribe(receiver, catchup):",
"def register_msgtype_callback(self, path, msg_type, callback_func):",
"def fire(self, event_type: str, event=None) -> None:\n for listener in self.event_listeners.get(event_type, []):\n if asyncio.iscoroutinefunction(listener):\n asyncio.run_coroutine_threadsafe(\n listener(event),\n asyncio.get_running_loop()\n )\n else:\n listener(event)",
"def on(self, signal_type, func=None):\n if func is None:\n def wrapper(func):\n self.on(signal_type, func)\n return func\n return wrapper\n\n on_signal(self, signal_type, func)\n return func",
"def event(self, event_type):\n\n def _decorator(callback):\n return self.bind_event(event_type, callback)\n\n return _decorator",
"def subscribe(self, subscription_type, callback):\n if subscription_type in self._subscriptions.keys():\n self._subscriptions[subscription_type].append(callback)",
"def _trigger_subscriptions(self, subscription_type, *args, **kwargs):\n if subscription_type in self._subscriptions.keys():\n # Run every subscribed callback\n for callback in self._subscriptions[subscription_type]:\n try:\n callback(*args, **kwargs)\n except AttributeError:\n continue",
"def apply_handler(self):\n tmp = self.event_type\n if hasattr(self, tmp):\n getattr(self, tmp)()\n elif(self.target):\n self.message = self.message +\"\\r\\n\"\n self.target[0].send(self.message)",
"def add_subscriber(self, event_type, func, priority=None, once=False,\n **args):\n event_type = load_object(event_type)\n func = load_object(func)\n subscriber = Subscriber(event_type, func, priority, once, args)\n self.register(event_type, subscriber, subscriber)",
"def listen(self, event: Event, *events: Event) -> Callable:\n def decorator(func):\n \"\"\"Subscribe function to events.\"\"\"\n self.subscribe(func, event, *events)\n return func\n return decorator",
"def on(\n self,\n type_: str | type[Event],\n *,\n subtype: str | None = None,\n ) -> DECORATOR_TYPE:\n\n event_type = type_ if isinstance(type_, str) else type_.type\n\n def decorator(target: DECORATOR_ARGS_TYPE) -> Handler:\n handler = get_handler(target)\n\n self.apps.append(\n App(\n event_type,\n subtype,\n handler,\n ),\n )\n\n return handler\n\n return decorator",
"def register(event_type):\n def decorator(cls):\n event_registry[event_type] = cls\n return cls\n return decorator",
"def subscribe(self, f=None, priority: int = 0):\n\n def decorator(decorated_function):\n self._subscriptions[self.id].append(\n Subscription(\n priority=priority,\n subscription_list=self,\n subscriber=decorated_function,\n )\n )\n self._subscriptions[self.id].sort()\n return decorated_function\n\n if f is None:\n return decorator\n\n return decorator(f)",
"def notify_decorator(name, fn):\n return fn",
"def subscribe(receiver, updateInterval=None):"
]
| [
"0.62448895",
"0.62394124",
"0.61755747",
"0.6086829",
"0.5917949",
"0.59065235",
"0.55706877",
"0.55671126",
"0.5484962",
"0.5458996",
"0.5413952",
"0.5413952",
"0.5413952",
"0.5380395",
"0.53653276",
"0.5363469",
"0.53156483",
"0.5297839",
"0.5297297",
"0.5295942",
"0.52826035",
"0.5282094",
"0.52195233",
"0.520765",
"0.5143888",
"0.5136541",
"0.50728256",
"0.50698835",
"0.501446",
"0.49660987"
]
| 0.7049433 | 0 |
Clause server; Statement edns_udp_size; passing mode | def test_isc_server_stmt_edns_udp_size_passing(self):
test_string = [
'edns-udp-size 0;',
'edns-udp-size 1;',
'edns-udp-size 102;',
'edns-udp-size 255;',
]
result = optviewserver_stmt_edns_udp_size.runTests(test_string, failureTests=False)
self.assertTrue(result[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_isc_server_stmt_edns_udp_size_failing(self):\n test_string = [\n 'edns-udp-size yes;',\n 'edns-udp-size -3;',\n ]\n result = optviewserver_stmt_edns_udp_size.runTests(test_string, failureTests=True)\n self.assertTrue(result[0])",
"def SendPacketsSendSize(self) -> int:",
"def recv_size(s, size):\n print 'Receive data in fixed size mode'\n reply = s.recv(size)\n print reply",
"def _get_message_segment_size (options ) :\n options_list = dpkt.tcp.parse_opts ( options )\n for option in options_list :\n if option[0] == 2 :\n# The MSS is a 16 bit number. Look at RFC 793 http://www.rfc-editor.org/rfc/rfc793.txt page 17. dpkt decodes it as a 16\n# bit number. An MSS is never going to be bigger than 65496 bytes.\n# The most common value is 1460 bytes (IPv4) which 0x05b4 or 1440 bytes (IPv6) which is 0x05a0.\n mss = struct.unpack(\">H\", option[1])\n return mss",
"def payload_size(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_payload_size(self)",
"def setPacketLength(self):\n self.packetLength = len(self) - PRIMARY_HEADER_BYTE_SIZE - 1",
"def payload_size(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_payload_size(self)",
"def network_byte_length(self) -> int:",
"def cmd_size(args):",
"def get_udp_packet(self, sock, size=0):\n\n pkt = ''\n while True:\n buf = ''\n try:\n buf = sock.recvfrom(64)[0]\n except socket.timeout:\n break\n if size and len(pkt) >= size:\n break\n if not buf:\n break\n pkt += buf\n return pkt",
"def redis_size(self):\n def func(server):\n return server.size()\n self.__run_redis_cmd(func, dbs=range(0,8))",
"def OnESPacket(current_pid, packet, header_size):\n pass",
"def set_size(self, mode):\n return len(self.data_index[mode])",
"def SendBufferSize(self) -> int:",
"def SendBufferSize(self) -> int:",
"def test_build_command_mode_udp(self):\n actual_result = IperfServerCommandBuilder()\\\n .set_mode_udp(IPERF_MODE)\\\n .build_server_command()\n self.assertListEqual(actual_result, ['iperf', '-s', '-u'])",
"def get_tcp_packet_payload_len_with_options(pkt: dpkt.ethernet.Ethernet) -> int:\n if isinstance(pkt, dpkt.ethernet.Ethernet):\n ip = pkt.data\n elif isinstance(pkt, dpkt.ip.IP):\n ip = pkt\n else:\n return None\n return ip.len - ip.hl * 4 - 20",
"def DLEN(self):",
"def run_dbsize(self, expanded, unexpanded) :\n\t\tif expanded :\n\t\t\treturn self.errormessage(\"Doesn't need any argument\")\n\t\tself.htmlmessage(self.__context.Control_Panel.db_size(), printable=1)",
"def __len__(self):\n # Header + group id + session timeout\n size = self.HEADER_LEN + 2 + len(self.group_id) + 4\n # + member id + protocol type + len(group protocols)\n size += 2 + len(self.member_id) + 2 + len(self.protocol_type) + 4\n # metadata tuples\n for name, metadata in self.group_protocols:\n size += 2 + len(name) + 4 + len(metadata)\n return size",
"def __payload_size(self):\n return (\n self.SIZE_LINEUP_ID + self.players_per_lineup * self.SIZE_PLAYER) * self.entries.count()",
"def test_size():\n assert Packet106.size == 12",
"def udp_timeout(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n seconds = 300\n\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"nat_udp_timeout\": seconds})",
"def test_pos_list_size_with_correct_paramters(self):\n key = ('test', 'demo', 1)\n\n count = self.as_connection.list_size(key, 'contact_no')\n\n assert 2 == count",
"def ReceiveBufferSize(self) -> int:",
"def ReceiveBufferSize(self) -> int:",
"def get_size(self, valueid):",
"def sent_len(self) -> int:\n raise NotImplementedError(\"must be implemented by subclasses\")",
"def getPacketCount(self):\n return 1",
"def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count"
]
| [
"0.7094463",
"0.650804",
"0.6228553",
"0.5829782",
"0.5800739",
"0.5688185",
"0.56580013",
"0.5654203",
"0.56267405",
"0.5549835",
"0.5502808",
"0.5468445",
"0.5465288",
"0.5457814",
"0.5457814",
"0.54485154",
"0.53859425",
"0.53797966",
"0.5371547",
"0.53284526",
"0.532783",
"0.5325473",
"0.53198856",
"0.5303512",
"0.5295812",
"0.5295812",
"0.5270942",
"0.5270665",
"0.5270267",
"0.5221276"
]
| 0.7638634 | 0 |
Clause server; Statement edns_udp_size; failing mode | def test_isc_server_stmt_edns_udp_size_failing(self):
test_string = [
'edns-udp-size yes;',
'edns-udp-size -3;',
]
result = optviewserver_stmt_edns_udp_size.runTests(test_string, failureTests=True)
self.assertTrue(result[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_isc_server_stmt_edns_udp_size_passing(self):\n test_string = [\n 'edns-udp-size 0;',\n 'edns-udp-size 1;',\n 'edns-udp-size 102;',\n 'edns-udp-size 255;',\n ]\n result = optviewserver_stmt_edns_udp_size.runTests(test_string, failureTests=False)\n self.assertTrue(result[0])",
"def SendPacketsSendSize(self) -> int:",
"def test_udp_bad_server():\n assert dnsck_query(\"8.8.8.88\", \"google.com\", \"A\", 1) == 1",
"def recv_size(s, size):\n print 'Receive data in fixed size mode'\n reply = s.recv(size)\n print reply",
"def test_invalid_packet_size(self):\n p = (\n Ether(dst=self.src_if.local_mac, src=self.src_if.remote_mac)\n / IPv6(src=self.src_if.remote_ip6, dst=self.src_if.local_ip6)\n / UDP(sport=1234, dport=5678)\n / Raw()\n )\n self.extend_packet(p, 1000, self.padding)\n fragments = fragment_rfc8200(p, 1, 500)\n bad_fragment = fragments[1]\n bad_fragment[IPv6ExtHdrFragment].offset = 65500\n self.pg_enable_capture()\n self.src_if.add_stream([bad_fragment])\n self.pg_start()\n pkts = self.src_if.get_capture(expected_count=1)\n icmp = pkts[0]\n self.assertIn(ICMPv6ParamProblem, icmp)\n self.assert_equal(icmp[ICMPv6ParamProblem].code, 0, \"ICMP code\")",
"def test_size():\n assert Packet106.size == 12",
"def _get_message_segment_size (options ) :\n options_list = dpkt.tcp.parse_opts ( options )\n for option in options_list :\n if option[0] == 2 :\n# The MSS is a 16 bit number. Look at RFC 793 http://www.rfc-editor.org/rfc/rfc793.txt page 17. dpkt decodes it as a 16\n# bit number. An MSS is never going to be bigger than 65496 bytes.\n# The most common value is 1460 bytes (IPv4) which 0x05b4 or 1440 bytes (IPv6) which is 0x05a0.\n mss = struct.unpack(\">H\", option[1])\n return mss",
"def test_udp_query():\n assert dnsck_query(\"8.8.8.8\", \"google.com\", \"a\", 1) == 0",
"def network_byte_length(self) -> int:",
"def payload_size(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_payload_size(self)",
"def test_size():\n assert Packet40.size == 2",
"def test_udp_alt_rectype():\n cmd = [\n \"python\",\n \"dnsck/dnsck.py\",\n \"-s\",\n \"8.8.8.8\",\n \"google.com\",\n \"-t\",\n \"txt\",\n \"-i\",\n \"1\",\n ]\n process = subprocess.run(cmd, shell=False, check=True)\n assert process.returncode == 0",
"def test_udp_no_records():\n assert dnsck_query(\"8.8.8.8\", \"test.google.com\", \"A\", 1) == 0",
"def setPacketLength(self):\n self.packetLength = len(self) - PRIMARY_HEADER_BYTE_SIZE - 1",
"def get_udp_packet(self, sock, size=0):\n\n pkt = ''\n while True:\n buf = ''\n try:\n buf = sock.recvfrom(64)[0]\n except socket.timeout:\n break\n if size and len(pkt) >= size:\n break\n if not buf:\n break\n pkt += buf\n return pkt",
"def run_dbsize(self, expanded, unexpanded) :\n\t\tif expanded :\n\t\t\treturn self.errormessage(\"Doesn't need any argument\")\n\t\tself.htmlmessage(self.__context.Control_Panel.db_size(), printable=1)",
"def udp_timeout(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n seconds = 300\n\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"nat_udp_timeout\": seconds})",
"def test_size():\n assert Packet20.size == 2",
"def test_udp_alt_rectype_and_iteration():\n cmd = [\n \"python\",\n \"dnsck/dnsck.py\",\n \"-s\",\n \"8.8.8.8\",\n \"google.com\",\n \"-t\",\n \"soa\",\n \"-i\",\n \"2\",\n ]\n process = subprocess.run(cmd, shell=False, check=True)\n assert process.returncode == 0",
"def payload_size(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_payload_size(self)",
"def test_invalid_frag_size(self):\n p = (\n Ether(dst=self.src_if.local_mac, src=self.src_if.remote_mac)\n / IPv6(src=self.src_if.remote_ip6, dst=self.src_if.local_ip6)\n / UDP(sport=1234, dport=5678)\n / Raw()\n )\n self.extend_packet(p, 1000, self.padding)\n fragments = fragment_rfc8200(p, 1, 500)\n bad_fragment = fragments[0]\n self.extend_packet(bad_fragment, len(bad_fragment) + 5)\n self.pg_enable_capture()\n self.src_if.add_stream([bad_fragment])\n self.pg_start()\n pkts = self.src_if.get_capture(expected_count=1)\n icmp = pkts[0]\n self.assertIn(ICMPv6ParamProblem, icmp)\n self.assert_equal(icmp[ICMPv6ParamProblem].code, 0, \"ICMP code\")",
"def test_largedata(self):\n cur = self.connect().cursor()\n cur.execute(\"SELECT @@max_allowed_packet\")\n if cur.fetchone()[0] < 16 * 1024 * 1024 + 10:\n print(\"Set max_allowed_packet to bigger than 17MB\")\n return\n t = \"a\" * (16 * 1024 * 1024)\n cur.execute(\"SELECT '\" + t + \"'\")\n assert cur.fetchone()[0] == t",
"def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count",
"def __len__(self):\n # Header + group id + session timeout\n size = self.HEADER_LEN + 2 + len(self.group_id) + 4\n # + member id + protocol type + len(group protocols)\n size += 2 + len(self.member_id) + 2 + len(self.protocol_type) + 4\n # metadata tuples\n for name, metadata in self.group_protocols:\n size += 2 + len(name) + 4 + len(metadata)\n return size",
"def getPacketCount(self):\n return 1",
"def test_pos_list_size_list_with_correct_policy(self):\n key = ('test', 'demo', 2)\n policy = {\n 'timeout': 1000,\n 'retry': aerospike.POLICY_RETRY_ONCE,\n 'commit_level': aerospike.POLICY_COMMIT_LEVEL_MASTER\n }\n count = self.as_connection.list_size(key, \"contact_no\", {}, policy)\n\n assert 2 == count",
"def test_pos_list_size_with_correct_paramters(self):\n key = ('test', 'demo', 1)\n\n count = self.as_connection.list_size(key, 'contact_no')\n\n assert 2 == count",
"def checkPacketLength(self):\n return self.packetLength == len(self) - PRIMARY_HEADER_BYTE_SIZE - 1",
"def DLEN(self):",
"def test_op_no_query_mtu(self):\n assert OP_NO_QUERY_MTU == 0x1000"
]
| [
"0.79074216",
"0.64085585",
"0.587062",
"0.5830589",
"0.58152884",
"0.57875025",
"0.5759887",
"0.5730117",
"0.5629439",
"0.56094414",
"0.5598666",
"0.5575006",
"0.5564831",
"0.5560443",
"0.5555972",
"0.5522362",
"0.5521356",
"0.54879683",
"0.5479655",
"0.54627013",
"0.5443418",
"0.54372966",
"0.5399723",
"0.5388312",
"0.5369748",
"0.5348308",
"0.5340303",
"0.53386897",
"0.5304074",
"0.5263798"
]
| 0.7922531 | 0 |
Clause Options/View/Server; Statement provideixfr; passing mode | def test_isc_optviewserver_stmt_provide_ixfr_passing(self):
test_string = [
'provide-ixfr yes;',
'provide-ixfr 1;',
'provide-ixfr 0;',
'provide-ixfr no;',
'provide-ixfr True;',
'provide-ixfr False;',
]
result = optviewserver_stmt_provide_ixfr.runTests(test_string, failureTests=False)
self.assertTrue(result[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_isc_server_stmt_request_ixfr_passing(self):\n test_string = [\n 'request-ixfr yes;',\n 'request-ixfr 1;',\n 'request-ixfr 0;',\n 'request-ixfr no;',\n 'request-ixfr True;',\n 'request-ixfr False;',\n ]\n result = optviewserver_stmt_request_ixfr.runTests(test_string, failureTests=False)\n self.assertTrue(result[0])",
"def test_isc_optviewserver_stmt_provide_ixfr_failing(self):\n test_string = [\n 'provide-ixfr Y'\n ]\n result = optviewserver_stmt_provide_ixfr.runTests(test_string, failureTests=True)\n self.assertTrue(result[0])",
"def generic_db_query(db_obj, db_cur, mode, scope, tables, key_cv, value_cv,\n where_str=None, where_args=[], more_str=None,\n more_args=[]):\n\n if mode not in ['read', 'update', 'insert', 'delete']:\n nori.core.email_logger.error(\n'''Internal Error: invalid mode supplied in call to generic_db_query();\ncall was (in expanded notation):\n\ngeneric_db_query(\n db_obj={0},\n db_cur={1},\n mode={2},\n scope={3},\n tables={4},\n key_cv={5},\n value_cv={6},\n where_str={7},\n where_args={8},\n more_str={9},\n more_args={10}\n)\n\nExiting.'''.format(*map(nori.pps, [db_obj, db_cur, mode, scope, tables,\n key_cv, value_cv, where_str, where_args,\n more_str, more_args]))\n )\n sys.exit(nori.core.exitvals['internal']['num'])\n\n if mode == 'read':\n return generic_db_read(db_obj, db_cur, tables, key_cv, value_cv,\n where_str, where_args, more_str, more_args)\n\n if mode == 'update':\n return generic_db_update(db_obj, db_cur, tables, key_cv, value_cv,\n where_str, where_args)\n\n if mode == 'insert':\n return generic_db_insert(db_obj, db_cur, tables, key_cv, value_cv,\n where_str, where_args)\n\n if mode == 'delete':\n return generic_db_delete(db_obj, db_cur, tables, key_cv, value_cv,\n where_str, where_args)",
"def test_isc_optviewserver_statements_series_passing(self):\n assertParserResultDictTrue(\n optviewserver_statements_series,\n 'provide-ixfr yes;' +\n 'request-ixfr yes;' +\n 'transfer-format one-answer;',\n {'provide_ixfr': 'yes',\n 'request_ixfr': 'yes',\n 'transfer_format': 'one-answer'}\n )",
"def query(mdx_stmt):",
"def _check_sql_mode(self, **kwargs):\n return []",
"def _builtin_consult(filename, database=None, engine=None, **kwdargs):\n check_mode((filename,), ['a'], functor='consult', **kwdargs)\n database.consult(filename, location=kwdargs.get('location'))\n return True",
"def query3() :",
"def test_isc_clause_view_zone_passing(self):\n test_data = [\n 'view red { zone www.example.com { auto-dnssec maintain; }; };',\n ]\n result = clause_stmt_view_standalone.runTests(test_data, failureTests=False)\n self.assertTrue(result[0])",
"def drupal_db_query(db_obj, db_cur, mode, scope, key_cv, value_cv):\n\n if mode not in ['read', 'update', 'insert', 'delete']:\n nori.core.email_logger.error(\n'''Internal Error: invalid mode supplied in call to\ndrupal_db_query(); call was (in expanded notation):\n\ndrupal_db_query(\n db_obj={0},\n db_cur={1},\n mode={2},\n scope={3},\n key_cv={4},\n value_cv={5}\n)\n\nExiting.'''.format(*map(nori.pps, [db_obj, db_cur, mode, scope, key_cv,\n value_cv]))\n )\n sys.exit(nori.core.exitvals['internal']['num'])\n\n if mode == 'read':\n #\n # I finally realized that if you try to retrieve multiple fields\n # simultaneously, and there are bogus rows with deleted = 1,\n # you will lose entire result rows. Even a construct like\n # 'AND (f.deleted = 0 OR f.deleted IS NULL)'\n # doesn't help, because the column is only NULL if the join\n # fails entirely. Moreover, the same problem applies if (for\n # example) there is a row with the same entity_id but a\n # different entity_type, so it's not just a question of removing\n # old/bogus rows from the database. There are basically two\n # solutions:\n # 1) pull out just the matching rows into a temp table, so joins\n # to that will either match properly or fail completely\n # 2) retrieve only one field at a time, and forget about the\n # 'IS NULL' - the query will just return no results if there\n # are no matches\n # Clearly, the second option is much better.\n #\n\n #\n # First, we need to run a SELECT on each value_cv entry, and\n # collate the results. Suppose the multiple-valued flag is true\n # in the template, and there are three sets of keys in the\n # database for this query. The first set of keys has two\n # results for the first value_cv entry, the second has none, and\n # the third has one. Now we need to transform this:\n # [(K1a, K2a, V1a),\n # (K1a, K2a, V1b),\n # (K1c, K2c, V1c)]\n # to this:\n # results[(K1a, K2a)][1] = [V1a, V1b]\n # results[(K1c, K2c)][1] = [V1c]\n # For the second value_cv entry, we might have:\n # [(K1a, K2a, V2a),\n # (K1b, K2b, V2b),\n # (K1b, K2b, V2c),\n # (K1c, K2c, V2d)]\n # which becomes:\n # results[(K1a, K2a)][2] = [V2a]\n # results[(K1b, K2b)][2] = [V2b, V2c]\n # results[(K1c, K2c)][2] = [V2d]\n # and so on.\n #\n results = collections.OrderedDict()\n for i, cv in enumerate(value_cv):\n ret = drupal_db_read(db_obj, db_cur, key_cv, [cv])\n if ret is None:\n return None\n for row in ret:\n if row[0:-1] not in results:\n results[row[0:-1]] = {}\n if i not in results[row[0:-1]]:\n results[row[0:-1]][i] = []\n results[row[0:-1]][i].append(row[-1])\n\n #\n # Now we need to re-collate the results into the sort of rows we\n # would get if we retrieved all of the value_cv entries at once.\n # Multiple entries should produce Cartesian products, and\n # missing entries should be replaced with None. For the example\n # above, we get:\n # [(K1a, K2a, V1a, V2a),\n # (K1a, K2a, V1b, V2a),\n # (K1b, K2b, None, V2b),\n # (K1b, K2b, None, V2c),\n # (K1c, K2c, V1c, V2d)]\n #\n full_rows = []\n for key_t in results:\n column_lists = [[x] for x in key_t]\n for i, cv in enumerate(value_cv):\n if i not in results[key_t]:\n column_lists.append([None])\n else:\n column_lists.append(results[key_t][i])\n for full_row in itertools.product(*column_lists):\n full_rows.append(full_row)\n\n return full_rows\n\n if mode == 'update':\n return drupal_db_update(db_obj, db_cur, key_cv, value_cv)\n\n if mode == 'insert':\n return drupal_db_insert(db_obj, db_cur, key_cv, value_cv)\n\n if mode == 'delete':\n return drupal_db_delete(db_obj, db_cur, scope, key_cv, value_cv)",
"def setupQuery(self, file):\n file.write(\"QUERY(FALSE);\\n\")\n file.write(\"COUNTEREXAMPLE;\\n\")\n return",
"def useful_test_function(db, query):\n print pd.read_sql_query(query, db)",
"def _run_query(self):",
"def main(ctx):\n\n print(\"Mode:\")",
"def make_query(self):",
"def is_query_supported(request):\n return (\n request.cfg.cvsdb.enabled\n and request.pathtype == vclib.DIR\n and request.roottype in [\"cvs\", \"svn\"]\n )",
"def __call__(self, src__=0, test__=0, **kw):\n context = self.context\n\n dbc, DB__ = self._get_dbc()\n\n p = None\n\n argdata = self._argdata(kw)\n argdata['sql_delimiter'] = '\\0'\n argdata['sql_quote__'] = dbc.sql_quote__\n\n # TODO: Review the argdata dictonary. The line bellow is receiving unicode\n # strings, mixed with standard strings. It is insane! Archetypes needs a policy\n # about unicode, and lots of tests on this way. I prefer to not correct it now,\n # only doing another workarround. We need to correct the cause of this problem,\n # not its side effects :-(\n\n try:\n query = apply(self.template, (p,), argdata)\n except TypeError, msg:\n msg = str(msg)\n if 'client' in msg:\n raise NameError(\"'client' may not be used as an \" +\n \"argument name in this context\")\n else: raise\n\n __traceback_info__ = query\n\n if src__: return query\n\n # Get the encoding arguments\n # We have two possible kw arguments:\n # db_encoding: The encoding used in the external database\n # site_encoding: The uncoding used for the site\n # If not specified, we use sys.getdefaultencoding()\n db_encoding = kw.get('db_encoding',None)\n\n try:\n site_encoding = kw.get('site_encoding', context.portal_properties.site_properties.default_charset)\n except AttributeError, KeyError:\n site_encoding = kw.get('site_encoding',sys.getdefaultencoding())\n\n if type(query) == type(u''):\n if db_encoding:\n query = query.encode(db_encoding)\n else:\n try:\n query = query.encode(site_encoding)\n except UnicodeEncodeError:\n query = query.encode('UTF-8')\n\n\n if context.cache_time_ > 0 and context.max_cache_ > 0:\n result = self._cached_result(DB__, (query, context.max_rows_))\n else:\n try:\n result = DB__.query(query, context.max_rows_)\n except ConflictError:\n raise\n except:\n log_exc(msg='Database query failed', reraise=1)\n\n if hasattr(context, '_v_sql_brain'):\n brain = context._v_sql_brain\n else:\n brain=context._v_sql_brain = getBrain(context.class_file_,\n context.class_name_)\n\n if type(result) is type(''):\n f = StringIO()\n f.write(result)\n f.seek(0)\n result = RDB.File(f, brain, p, None)\n else:\n if db_encoding:\n # Encode result before we wrap it in Result object\n # We will change the encoding from source to either the specified target_encoding\n # or the site default encoding\n\n # The data is a list of tuples of column data\n encoded_result = []\n for row in result[1]:\n columns = ()\n for col in row:\n if isinstance(col, types.StringType):\n # coerce column to unicode with database encoding\n newcol = unicode(col,db_encoding)\n # Encode column as string with site_encoding\n newcol = newcol.encode(site_encoding)\n else:\n newcol = col\n\n columns += newcol,\n\n encoded_result.append(columns)\n\n result = (result[0],encoded_result)\n\n result = Results(result, brain, p, None)\n\n columns = result._searchable_result_columns()\n\n if test__ and columns != self._col:\n self._col=columns\n\n # If run in test mode, return both the query and results so\n # that the template doesn't have to be rendered twice!\n if test__: return query, result\n\n return result",
"def generate_query(self):\n return",
"def add_row_filter(source, args, index):\n queries = []\n for subindex in range(1, 6):\n query = args.get('select-query%02d-%02d' % (index, subindex))\n if query:\n queries.append(query)\n reverse = (args.get('select-reverse%02d' % index) == 'on')\n if reverse:\n return source.without_rows(queries)\n else:\n return source.with_rows(queries)",
"def cmd_mode(args):",
"def test_isc_server_stmt_request_ixfr_failing(self):\n test_string = [\n 'request-ixfr Y;'\n ]\n result = optviewserver_stmt_request_ixfr.runTests(test_string, failureTests=True)\n self.assertTrue(result[0])",
"def sql(self, method: str = 'select') -> str:",
"def determine_query():\n return query if query is not None \\\n else f\"SELECT * FROM '{table}';\"",
"def generate_sql(opts):\n base_select = BASE_SELECT[opts[\"source\"]]\n usage_structs = \",\".join(u.sql for u in USAGE_CRITERIA[opts[\"source\"]])\n usage_structs = indent(dedent(usage_structs), \" \")\n return TEMPLATE.format(**locals(), **opts)",
"def _get_queries(args):\n if args.mode == '2DSEQ':\n queries = [\"@type=='2dseq'\", \"@is_spectroscopy==True\", \"@is_complex==True\"]\n elif args.mode == 'FID':\n queries = [\"@type=='fid'\", \"@is_spectroscopy==True\"]\n return queries + args.query",
"def query(self):",
"def query(self):\n pass",
"def query_prototype(basic, extra, cid):\n click.echo('SINGLE mode for %s' % cid)\n click.echo('Query for CID in mode: %s' % basic)\n click.echo('Query for CID with extra: %s' % extra)",
"def _set_session_sql_mode(dbapi_con, connection_rec, sql_mode=None):\n\n cursor = dbapi_con.cursor()\n cursor.execute(\"SET SESSION sql_mode = %s\", [sql_mode])",
"def query(self, query):"
]
| [
"0.54498583",
"0.5436921",
"0.53755015",
"0.5369825",
"0.53392714",
"0.52997357",
"0.52606136",
"0.5233001",
"0.5134705",
"0.5132359",
"0.50852036",
"0.50761706",
"0.50589323",
"0.5057924",
"0.504792",
"0.4983767",
"0.49656382",
"0.4941426",
"0.49382856",
"0.49334484",
"0.49299788",
"0.4910355",
"0.48603532",
"0.4851056",
"0.48353735",
"0.47997057",
"0.47982588",
"0.47961068",
"0.4785774",
"0.4772826"
]
| 0.58370453 | 0 |
Clause Options/View/Server; Statement provideixfr; failing mode | def test_isc_optviewserver_stmt_provide_ixfr_failing(self):
test_string = [
'provide-ixfr Y'
]
result = optviewserver_stmt_provide_ixfr.runTests(test_string, failureTests=True)
self.assertTrue(result[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_isc_server_stmt_request_ixfr_failing(self):\n test_string = [\n 'request-ixfr Y;'\n ]\n result = optviewserver_stmt_request_ixfr.runTests(test_string, failureTests=True)\n self.assertTrue(result[0])",
"def test_isc_optviewserver_stmt_provide_ixfr_passing(self):\n test_string = [\n 'provide-ixfr yes;',\n 'provide-ixfr 1;',\n 'provide-ixfr 0;',\n 'provide-ixfr no;',\n 'provide-ixfr True;',\n 'provide-ixfr False;',\n ]\n result = optviewserver_stmt_provide_ixfr.runTests(test_string, failureTests=False)\n self.assertTrue(result[0])",
"def test_isc_server_stmt_request_ixfr_passing(self):\n test_string = [\n 'request-ixfr yes;',\n 'request-ixfr 1;',\n 'request-ixfr 0;',\n 'request-ixfr no;',\n 'request-ixfr True;',\n 'request-ixfr False;',\n ]\n result = optviewserver_stmt_request_ixfr.runTests(test_string, failureTests=False)\n self.assertTrue(result[0])",
"def test_isc_clause_view_zone_passing(self):\n test_data = [\n 'view red { zone www.example.com { auto-dnssec maintain; }; };',\n ]\n result = clause_stmt_view_standalone.runTests(test_data, failureTests=False)\n self.assertTrue(result[0])",
"def test_isc_optviewserver_statements_series_passing(self):\n assertParserResultDictTrue(\n optviewserver_statements_series,\n 'provide-ixfr yes;' +\n 'request-ixfr yes;' +\n 'transfer-format one-answer;',\n {'provide_ixfr': 'yes',\n 'request_ixfr': 'yes',\n 'transfer_format': 'one-answer'}\n )",
"def useful_test_function(db, query):\n print pd.read_sql_query(query, db)",
"def test_isc_optviewserver_stmt_statements_series_failing(self):\n test_string = [\n 'statements_series \"YYYY\";',\n ]\n result = optviewserver_statements_series.runTests(test_string, failureTests=True)\n self.assertTrue(result[0])",
"def test_set_invalid_query_option(self):\n execute_statement_req = TCLIService.TExecuteStatementReq()\n execute_statement_req.sessionHandle = self.session_handle\n execute_statement_req.confOverlay = {\"foo\":\"bar\"}\n execute_statement_req.statement = \"select 1\"\n execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)\n TestQueryOptionsHS2.check_response(execute_statement_resp,\n TCLIService.TStatusCode.ERROR_STATUS, \"Invalid query option: foo\")",
"def _check_sql_mode(self, **kwargs):\n return []",
"def query(mdx_stmt):",
"def query3() :",
"def _builtin_consult(filename, database=None, engine=None, **kwdargs):\n check_mode((filename,), ['a'], functor='consult', **kwdargs)\n database.consult(filename, location=kwdargs.get('location'))\n return True",
"def is_query_supported(request):\n return (\n request.cfg.cvsdb.enabled\n and request.pathtype == vclib.DIR\n and request.roottype in [\"cvs\", \"svn\"]\n )",
"def _run_query(self):",
"def setupQuery(self, file):\n file.write(\"QUERY(FALSE);\\n\")\n file.write(\"COUNTEREXAMPLE;\\n\")\n return",
"def test_view(self):\n symbol = 'NFLX'\n table = 'option'\n path = os.path.join(CLEAN_DIR, '__%s__.h5' % symbol.lower())\n db = pd.HDFStore(path)\n df_valid = db.select('%s/valid/normal' % table)\n df_clean = db.select('%s/clean/normal' % table)\n db.close()\n\n df_date = df_valid[df_valid['date'] == '2015-08-27']\n df_date = df_date[df_date['name'] == 'CALL'].sort_values('ex_date')\n print df_date.to_string(line_width=1000)\n\n df_date = df_clean[df_clean['date'] == '2015-08-27']\n df_date = df_date[df_date['name'] == 'CALL'].sort_values('ex_date')\n print df_date.to_string(line_width=1000)\n\n # self.client.get(reverse('admin:calc_day_iv', kwargs={'symbol': 'GG', 'insert': 0}))",
"def test_unsupported_sql(self):\n user = getuser()\n impala_client = self.create_impala_client()\n error_msg = \"UnsupportedFeatureException: {0} is not supported by Sentry.\"\n statements = [(\"grant select on database functional to user foo\",\n error_msg.format(\"GRANT <privilege> TO USER\")),\n (\"grant select on database functional to group foo\",\n error_msg.format(\"GRANT <privilege> TO GROUP\")),\n (\"revoke select on database functional from user foo\",\n error_msg.format(\"REVOKE <privilege> FROM USER\")),\n (\"revoke select on database functional from group foo\",\n error_msg.format(\"REVOKE <privilege> FROM GROUP\")),\n (\"show grant group foo\", error_msg.format(\"SHOW GRANT GROUP\"))]\n for statement in statements:\n result = self.execute_query_expect_failure(impala_client, statement[0], user=user)\n assert statement[1] in str(result)",
"def _valid_filter_query(self):\n if self._output_invalid_imeis:\n valid_filter_sql = sql.SQL('TRUE')\n else:\n valid_filter_sql = sql.SQL('is_valid IS TRUE')\n return valid_filter_sql",
"def debug(self, query, fname, sample=-1):\n debug(self, query, fname, sample)",
"def drupal_db_query(db_obj, db_cur, mode, scope, key_cv, value_cv):\n\n if mode not in ['read', 'update', 'insert', 'delete']:\n nori.core.email_logger.error(\n'''Internal Error: invalid mode supplied in call to\ndrupal_db_query(); call was (in expanded notation):\n\ndrupal_db_query(\n db_obj={0},\n db_cur={1},\n mode={2},\n scope={3},\n key_cv={4},\n value_cv={5}\n)\n\nExiting.'''.format(*map(nori.pps, [db_obj, db_cur, mode, scope, key_cv,\n value_cv]))\n )\n sys.exit(nori.core.exitvals['internal']['num'])\n\n if mode == 'read':\n #\n # I finally realized that if you try to retrieve multiple fields\n # simultaneously, and there are bogus rows with deleted = 1,\n # you will lose entire result rows. Even a construct like\n # 'AND (f.deleted = 0 OR f.deleted IS NULL)'\n # doesn't help, because the column is only NULL if the join\n # fails entirely. Moreover, the same problem applies if (for\n # example) there is a row with the same entity_id but a\n # different entity_type, so it's not just a question of removing\n # old/bogus rows from the database. There are basically two\n # solutions:\n # 1) pull out just the matching rows into a temp table, so joins\n # to that will either match properly or fail completely\n # 2) retrieve only one field at a time, and forget about the\n # 'IS NULL' - the query will just return no results if there\n # are no matches\n # Clearly, the second option is much better.\n #\n\n #\n # First, we need to run a SELECT on each value_cv entry, and\n # collate the results. Suppose the multiple-valued flag is true\n # in the template, and there are three sets of keys in the\n # database for this query. The first set of keys has two\n # results for the first value_cv entry, the second has none, and\n # the third has one. Now we need to transform this:\n # [(K1a, K2a, V1a),\n # (K1a, K2a, V1b),\n # (K1c, K2c, V1c)]\n # to this:\n # results[(K1a, K2a)][1] = [V1a, V1b]\n # results[(K1c, K2c)][1] = [V1c]\n # For the second value_cv entry, we might have:\n # [(K1a, K2a, V2a),\n # (K1b, K2b, V2b),\n # (K1b, K2b, V2c),\n # (K1c, K2c, V2d)]\n # which becomes:\n # results[(K1a, K2a)][2] = [V2a]\n # results[(K1b, K2b)][2] = [V2b, V2c]\n # results[(K1c, K2c)][2] = [V2d]\n # and so on.\n #\n results = collections.OrderedDict()\n for i, cv in enumerate(value_cv):\n ret = drupal_db_read(db_obj, db_cur, key_cv, [cv])\n if ret is None:\n return None\n for row in ret:\n if row[0:-1] not in results:\n results[row[0:-1]] = {}\n if i not in results[row[0:-1]]:\n results[row[0:-1]][i] = []\n results[row[0:-1]][i].append(row[-1])\n\n #\n # Now we need to re-collate the results into the sort of rows we\n # would get if we retrieved all of the value_cv entries at once.\n # Multiple entries should produce Cartesian products, and\n # missing entries should be replaced with None. For the example\n # above, we get:\n # [(K1a, K2a, V1a, V2a),\n # (K1a, K2a, V1b, V2a),\n # (K1b, K2b, None, V2b),\n # (K1b, K2b, None, V2c),\n # (K1c, K2c, V1c, V2d)]\n #\n full_rows = []\n for key_t in results:\n column_lists = [[x] for x in key_t]\n for i, cv in enumerate(value_cv):\n if i not in results[key_t]:\n column_lists.append([None])\n else:\n column_lists.append(results[key_t][i])\n for full_row in itertools.product(*column_lists):\n full_rows.append(full_row)\n\n return full_rows\n\n if mode == 'update':\n return drupal_db_update(db_obj, db_cur, key_cv, value_cv)\n\n if mode == 'insert':\n return drupal_db_insert(db_obj, db_cur, key_cv, value_cv)\n\n if mode == 'delete':\n return drupal_db_delete(db_obj, db_cur, scope, key_cv, value_cv)",
"def issilent():\n return GLOBAL['VERBOSE'] == False",
"def dbtrace_ui():\n\n pass",
"def test_disallowed_queries():\n strings = [\"select * from test times 10\",\n \"select * from test save clusters with threshold .5 as test.csv\",\n \"select * from test given a=5\",\n \"select * from test with confidence .4\",\n \"select a conf .4 from test\",\n \"select a conf .4, b from test\",\n \"simulate a conf .4 from test times 10\",\n \"simulate a conf .4, b from test times 10\",\n \"infer * from test times 10\",\n \"infer typicality from test\",\n \"infer * from test with confidence 1.5\",\n \"simulate typicality from test\",\n \"infer * from test save clusters with threshold .5 as test.csv\",\n \"infer * from test given a=5\",\n \"simulate * from test where a < 4\",\n \"simulate * from test save clusters with threshold .5 as test.csv\",\n \"simulate * from test with confidence .4\",\n \"simulate * from test with 4 samples\",\n \"simulate * from test\",\n \"estimate columns from test with confidence .4\",\n \"estimate columns from test given a=4\",\n \"estimate columns from test times 10\",\n \"summarize estimate columns from test\",\n \"plot estimate columns from test\",\n \"estimate columns from test save clusters with threshold .5 as test.csv\",\n \"estimate pairwise correlation from test where a = b\",\n \"estimate pairwise correlation from test times 10\",\n \"estimate pairwise correlation from test given a = 5\",\n \"estimate pairwise correlation from test with confidence .2\",\n \"estimate pairwise row similarity from test where a = b\",\n \"estimate pairwise row similarity from test times 10\",\n \"estimate pairwise row similarity from test given a = 5\",\n \"estimate pairwise row similarity from test with confidence .2\",\n \"estimate pairwise row similarity from test where a = b\"\n ]\n\n for query_string in strings:\n ast = bql_statement.parseString(query_string,parseAll=True)\n with pytest.raises(AssertionError):\n parser.parse_single_statement(ast)",
"def __call__(self, src__=0, test__=0, **kw):\n context = self.context\n\n dbc, DB__ = self._get_dbc()\n\n p = None\n\n argdata = self._argdata(kw)\n argdata['sql_delimiter'] = '\\0'\n argdata['sql_quote__'] = dbc.sql_quote__\n\n # TODO: Review the argdata dictonary. The line bellow is receiving unicode\n # strings, mixed with standard strings. It is insane! Archetypes needs a policy\n # about unicode, and lots of tests on this way. I prefer to not correct it now,\n # only doing another workarround. We need to correct the cause of this problem,\n # not its side effects :-(\n\n try:\n query = apply(self.template, (p,), argdata)\n except TypeError, msg:\n msg = str(msg)\n if 'client' in msg:\n raise NameError(\"'client' may not be used as an \" +\n \"argument name in this context\")\n else: raise\n\n __traceback_info__ = query\n\n if src__: return query\n\n # Get the encoding arguments\n # We have two possible kw arguments:\n # db_encoding: The encoding used in the external database\n # site_encoding: The uncoding used for the site\n # If not specified, we use sys.getdefaultencoding()\n db_encoding = kw.get('db_encoding',None)\n\n try:\n site_encoding = kw.get('site_encoding', context.portal_properties.site_properties.default_charset)\n except AttributeError, KeyError:\n site_encoding = kw.get('site_encoding',sys.getdefaultencoding())\n\n if type(query) == type(u''):\n if db_encoding:\n query = query.encode(db_encoding)\n else:\n try:\n query = query.encode(site_encoding)\n except UnicodeEncodeError:\n query = query.encode('UTF-8')\n\n\n if context.cache_time_ > 0 and context.max_cache_ > 0:\n result = self._cached_result(DB__, (query, context.max_rows_))\n else:\n try:\n result = DB__.query(query, context.max_rows_)\n except ConflictError:\n raise\n except:\n log_exc(msg='Database query failed', reraise=1)\n\n if hasattr(context, '_v_sql_brain'):\n brain = context._v_sql_brain\n else:\n brain=context._v_sql_brain = getBrain(context.class_file_,\n context.class_name_)\n\n if type(result) is type(''):\n f = StringIO()\n f.write(result)\n f.seek(0)\n result = RDB.File(f, brain, p, None)\n else:\n if db_encoding:\n # Encode result before we wrap it in Result object\n # We will change the encoding from source to either the specified target_encoding\n # or the site default encoding\n\n # The data is a list of tuples of column data\n encoded_result = []\n for row in result[1]:\n columns = ()\n for col in row:\n if isinstance(col, types.StringType):\n # coerce column to unicode with database encoding\n newcol = unicode(col,db_encoding)\n # Encode column as string with site_encoding\n newcol = newcol.encode(site_encoding)\n else:\n newcol = col\n\n columns += newcol,\n\n encoded_result.append(columns)\n\n result = (result[0],encoded_result)\n\n result = Results(result, brain, p, None)\n\n columns = result._searchable_result_columns()\n\n if test__ and columns != self._col:\n self._col=columns\n\n # If run in test mode, return both the query and results so\n # that the template doesn't have to be rendered twice!\n if test__: return query, result\n\n return result",
"def read_sql(self):\n pass",
"def test_none_fields_rendering(self):\r\n ss = SelectStatement('table')\r\n self.assertTrue(unicode(ss).startswith('SELECT *'), unicode(ss))\r\n self.assertTrue(str(ss).startswith('SELECT *'), str(ss))",
"def test_select(self):\n my_conn = MySQL(*self.conn_params)\n table_name = \"inf_schema\"\n inf_schema = my_conn.get_table(table_name)\n # SELECT * FROM inf_schema\n # WHERE table_name like 'INNO%' AND avg_row_length > 100\n results = my_conn.engine.execute(select('*')\n .where(inf_schema.c.table_name\n .like('INNO%'))\n .where(inf_schema.c.avg_row_length >\n 100)\n .select_from(inf_schema)).fetchall()\n table_df = pd.DataFrame(results)\n self.assertGreaterEqual(len(table_df), 6)",
"def query(self):\n pass",
"def test_graph_load_query_exec(self):\n provider = QueryProvider(data_environment=\"SecurityGraph\", driver=self.provider)\n df = provider.all_queries.get_alert(\"help\")\n self.assertIsNone(df)\n\n with self.assertRaises(ValueError) as cm:\n df = provider.all_queries.get_alert()\n self.assertIn(\"alert_id\", str(cm.exception))\n\n df = provider.all_queries.get_alert(alert_id=\"foo\")\n self.assertEqual(len(df), 1)\n self.assertIn(\"/foo\", df[\"query\"].iloc[0])",
"def main(*argv):\n try:\n attr_features = argv[0]\n sql_clause = argv[1]\n polygon_grid = argv[2]\n error_field_count = str(argv[3]) #'NULL_COUNT'#\n error_field_def = str(argv[4]) #'NULL_COLUMNS'#\n output_fc = argv[5]\n out_fc_exists = arcpy.Exists(output_fc)\n\n # Local Variable\n #\n scratchFolder = env.scratchFolder\n scratchGDB = env.scratchGDB\n results = []\n # Logic\n #\n if not out_fc_exists:\n output_gdb = validate_workspace(os.path.dirname(output_fc))\n # Create the grid\n #\n out_grid = arcpy.CopyFeatures_management(polygon_grid, output_fc)[0]\n out_grid = extend_table(out_grid)\n where_clause=None\n else:\n arcpy.MakeFeatureLayer_management(output_fc, \"lyr\")\n arcpy.SelectLayerByLocation_management(\"lyr\", \"HAVE_THEIR_CENTER_IN\", polygon_grid)\n oids = [row[0] for row in arcpy.da.SearchCursor(\"lyr\", \"OID@\")]\n if len(oids) >1:\n oids_string = str(tuple(oids))\n else:\n oids_string = str('('+ str(oids[0]) + ')')\n\n where_clause = 'OBJECTID IN ' + oids_string\n\n error_field = (error_field_def, error_field_count)\n\n # Process the Data\n #\n\n poly_desc = arcpy.Describe(output_fc)\n fc_desc = arcpy.Describe(attr_features)\n if poly_desc.extent.within(fc_desc.extent):\n\n temp_fc = 'in_memory/clip'\n arcpy.AddMessage('Clipping features to polygon')\n arcpy.Clip_analysis(attr_features, output_fc, temp_fc)\n arcpy.AddMessage('Created in_memory fc')\n #data_sdf = geomotion.SpatialDataFrame.from_featureclass(temp_fc,\n # fields=[value_field])\n if sql_clause:\n attr_sdf = SpatialDataFrame.from_featureclass(temp_fc,\n fields=error_field,\n where_clause=sql_clause)\n else:\n attr_sdf = SpatialDataFrame.from_featureclass(temp_fc,\n fields=error_field)\n arcpy.AddMessage('features read into spatial dataframe after clipping')\n else:\n #data_sdf = geomotion.SpatialDataFrame.from_featureclass(, fields=[value_field])\n arcpy.AddMessage('features read into spatial dataframe without clipping')\n if sql_clause:\n attr_sdf = SpatialDataFrame.from_featureclass(attr_features,\n fields=error_field,\n where_clause=sql_clause)\n else:\n attr_sdf = SpatialDataFrame.from_featureclass(attr_features,\n fields=error_field)\n\n grid_sdf = SpatialDataFrame.from_featureclass(filename=output_fc,\n where_clause=where_clause)\n\n index = attr_sdf.sindex\n for idx, row in enumerate(grid_sdf.iterrows()):\n errors = []\n attrs = []\n geom = row[1].SHAPE\n oid = row[1].OBJECTID\n print(str(oid))\n ext = [geom.extent.lowerLeft.X, geom.extent.lowerLeft.Y,\n geom.extent.upperRight.X, geom.extent.upperRight.Y]\n row_oids = list(index.intersect(ext))\n df_current = attr_sdf.loc[row_oids]#.copy()\n sq = df_current.geometry.disjoint(geom) == False\n fcount = len(df_current[sq]) # Total Count\n q2 = df_current[error_field_count] > 0\n #& q2\n df_current = df_current[sq].copy() # Get the # of features with deficiency_cnt > 0\n #print(\"here\")\n if fcount>0: #len(df_current) > 0:\n errors += df_current[error_field_count].tolist()\n arcpy.AddMessage(str(errors))\n def process(x):\n print(x)\n return [va for va in x.replace(' ', '').split('|')[-1].split(',') if len(va) > 1]\n for e in df_current[error_field_def].apply(process).tolist():\n attrs += e\n del e\n row = get_answers(oid=oid,\n err=errors,\n attr=attrs,\n feature_count=fcount)\n results.append(row)\n if len(results) > 250:\n extend_table(table=output_fc, rows=results)\n results = []\n del idx\n del row\n del errors\n del attrs\n del geom\n del oid\n del ext\n del row_oids\n del df_current\n del sq\n del q2\n if len(results) > 0:\n extend_table(table=output_fc, rows=results)\n del index\n del results\n del grid_sdf\n del attr_sdf\n except arcpy.ExecuteError:\n line, filename, synerror = trace()\n arcpy.AddError(\"error on line: %s\" % line)\n arcpy.AddError(\"error in file name: %s\" % filename)\n arcpy.AddError(\"with error message: %s\" % synerror)\n arcpy.AddError(\"ArcPy Error Message: %s\" % arcpy.GetMessages(2))\n except FunctionError as f_e:\n messages = f_e.args[0]\n arcpy.AddError(\"error in function: %s\" % messages[\"function\"])\n arcpy.AddError(\"error on line: %s\" % messages[\"line\"])\n arcpy.AddError(\"error in file name: %s\" % messages[\"filename\"])\n arcpy.AddError(\"with error message: %s\" % messages[\"synerror\"])\n arcpy.AddError(\"ArcPy Error Message: %s\" % messages[\"arc\"])\n except:\n line, filename, synerror = trace()\n arcpy.AddError(\"error on line: %s\" % line)\n arcpy.AddError(\"error in file name: %s\" % filename)\n arcpy.AddError(\"with error message: %s\" % synerror)"
]
| [
"0.6487286",
"0.6465603",
"0.62053233",
"0.5869645",
"0.5828845",
"0.5621983",
"0.54275465",
"0.53884834",
"0.53768206",
"0.53627336",
"0.53449595",
"0.5315953",
"0.5300032",
"0.52997005",
"0.52063614",
"0.5185329",
"0.50743437",
"0.5046744",
"0.50120586",
"0.49937165",
"0.4991999",
"0.49891883",
"0.49775156",
"0.49758267",
"0.49466297",
"0.4936883",
"0.49066582",
"0.48926356",
"0.48797923",
"0.48747134"
]
| 0.67688143 | 0 |
Clause server; Statement requestixfr; failing mode | def test_isc_server_stmt_request_ixfr_failing(self):
test_string = [
'request-ixfr Y;'
]
result = optviewserver_stmt_request_ixfr.runTests(test_string, failureTests=True)
self.assertTrue(result[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_isc_server_stmt_request_ixfr_passing(self):\n test_string = [\n 'request-ixfr yes;',\n 'request-ixfr 1;',\n 'request-ixfr 0;',\n 'request-ixfr no;',\n 'request-ixfr True;',\n 'request-ixfr False;',\n ]\n result = optviewserver_stmt_request_ixfr.runTests(test_string, failureTests=False)\n self.assertTrue(result[0])",
"def query3() :",
"def _run_query(self):",
"def request(query):",
"def query(mdx_stmt):",
"def test_execute_statement_6(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n statement = test_db_utils.domain_stmt(domain_data)\n statement = statement.replace(\"Name\", \"Name_invalid\")\n results_tup = find_domains.execute_statement(self.connection, statement)\n result = results_tup[0]\n type_error = results_tup[1]\n value_error = results_tup[2]\n msg = results_tup[3]\n self.trans.commit()\n domain_table_results = test_db_utils.get_data(\n test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 0)\n with self.subTest():\n self.assertEqual(result, 1)\n with self.subTest():\n self.assertTrue(ERROR_MSG in msg)\n with self.subTest():\n self.assertFalse(type_error)\n with self.subTest():\n self.assertFalse(value_error)",
"def query(self, sql):\r\n\r\n result_sets = []\r\n self.messages = \"\"\r\n messages = \"\"\r\n\r\n # self.batched_query(sql)\r\n\r\n with self.conn.cursor() as cur:\r\n self.cur = cur\r\n\r\n self.request_cancel = False\r\n try:\r\n cur.execute(sql)\r\n except Exception as ex:\r\n self.cur = None\r\n self.messages = str(ex)\r\n return None\r\n while True:\r\n try:\r\n description = cur.description\r\n except:\r\n self.messages = \"Error reading description\"\r\n if self.metadata() is not None and description is not None:\r\n description = list(map(lambda c: c+(self._better_description(c),),description))\r\n # print(description)\r\n try:\r\n # data = cur.fetchmany(10000)\r\n # while True:\r\n # d = cur.fetchmany(10000)\r\n # if d is None or len(d) == 0:\r\n # break\r\n # if self.request_cancel:\r\n # cur.cancel()\r\n # self.request_cancel = False\r\n # data = data + d\r\n data = cur.fetchall()\r\n\r\n # select @var = 'tto' does not produce any resultset and raised an exception\r\n # we catch it and ignore it.\r\n # TODO: is there a better way to handle that?\r\n except pytds.ProgrammingError as ex:\r\n data = None\r\n if str(ex) == \"Previous statement didn't produce any results\":\r\n pass\r\n else:\r\n break\r\n \r\n except Exception as ex:\r\n data = None\r\n self.messages = str(ex) + \" Error while fetching data\"\r\n break\r\n\r\n\r\n\r\n if data is not None:\r\n result_sets.append(ResultSet(description, data))\r\n\r\n try:\r\n have_more_set = cur.nextset()\r\n except:\r\n self.messages = \"Error reading next set\"\r\n break\r\n if have_more_set is None or have_more_set is False:\r\n break\r\n\r\n try:\r\n for msg in cur.messages:\r\n messages = messages + str(msg[1]) + \"\\n\"\r\n except:\r\n self.messages = \"Error reading messages\"\r\n self.messages = messages + self.messages\r\n\r\n # print(\"End \",str(len(result_sets)))\r\n\r\n self.cur = None\r\n\r\n # if not result_sets:\r\n # return None\r\n return result_sets",
"def test_execute_statement_7(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n # \"Description\": \"ParB-like nuclease domain\"\n description = domain_data[\"Description\"]\n description = description.replace(\"nuclease domain\", \"nuclease % domain\")\n domain_data[\"Description\"] = description\n statement = test_db_utils.domain_stmt(domain_data)\n results_tup = find_domains.execute_statement(self.connection, statement)\n result = results_tup[0]\n type_error = results_tup[1]\n value_error = results_tup[2]\n msg = results_tup[3]\n self.trans.commit()\n domain_table_results = test_db_utils.get_data(\n test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 0)\n with self.subTest():\n self.assertEqual(result, 1)\n with self.subTest():\n self.assertFalse(ERROR_MSG in msg)\n with self.subTest():\n self.assertTrue(type_error)\n with self.subTest():\n self.assertFalse(value_error)",
"def test_execute_statement_8(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n # \"Description\": \"ParB-like nuclease domain\"\n description = domain_data[\"Description\"]\n description = description.replace(\"nuclease domain\", \"nuclease % domain\")\n domain_data[\"Description\"] = description\n statement = test_db_utils.domain_stmt(domain_data)\n statement = statement.replace(\"%\", \"%%\")\n results_tup = find_domains.execute_statement(self.connection, statement)\n result = results_tup[0]\n type_error = results_tup[1]\n value_error = results_tup[2]\n msg = results_tup[3]\n self.trans.commit()\n domain_table_results = test_db_utils.get_data(\n test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)\n with self.subTest():\n self.assertFalse(type_error)\n with self.subTest():\n self.assertFalse(value_error)",
"def test_execute_statement_4(self):\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement = test_db_utils.gene_domain_stmt(gene_domain_data)\n results_tup = find_domains.execute_statement(self.connection, statement)\n result = results_tup[0]\n type_error = results_tup[1]\n value_error = results_tup[2]\n msg = results_tup[3]\n self.trans.commit()\n gene_domain_table_results = test_db_utils.get_data(\n test_db_utils.gene_domain_table_query)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 0)\n with self.subTest():\n self.assertEqual(result, 1)\n with self.subTest():\n self.assertFalse(type_error)\n with self.subTest():\n self.assertFalse(value_error)",
"def test_execute_statement_5(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n statement = test_db_utils.domain_stmt(domain_data)\n results_tup1 = find_domains.execute_statement(self.connection, statement)\n result1 = results_tup1[0]\n type_error1 = results_tup1[1]\n value_error1 = results_tup1[2]\n msg1 = results_tup1[3]\n self.trans.commit()\n domain_table_results1 = test_db_utils.get_data(\n test_db_utils.domain_table_query)\n new_trans = self.connection.begin()\n results_tup2 = find_domains.execute_statement(\n self.connection, statement)\n result2 = results_tup2[0]\n type_error2 = results_tup2[1]\n value_error2 = results_tup2[2]\n msg2 = results_tup2[3]\n new_trans.commit()\n domain_table_results2 = test_db_utils.get_data(\n test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results1), 1)\n with self.subTest():\n self.assertEqual(len(domain_table_results2), 1)\n with self.subTest():\n self.assertEqual(result1, 0)\n with self.subTest():\n self.assertEqual(result2, 0)\n with self.subTest():\n self.assertFalse(type_error1)\n with self.subTest():\n self.assertFalse(type_error2)\n with self.subTest():\n self.assertFalse(value_error1)\n with self.subTest():\n self.assertFalse(value_error2)\n with self.subTest():\n self.assertFalse(ERROR_MSG in msg1)\n with self.subTest():\n self.assertTrue(ERROR_MSG in msg2)",
"def test_execute_statement_9(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n # \"Description\": \"ParB-like nuclease domain\"\n description = domain_data[\"Description\"]\n description = description.replace(\"nuclease domain\", \"nuclease % wdomain\")\n domain_data[\"Description\"] = description\n statement = test_db_utils.domain_stmt(domain_data)\n results_tup = find_domains.execute_statement(self.connection, statement)\n result = results_tup[0]\n type_error = results_tup[1]\n value_error = results_tup[2]\n msg = results_tup[3]\n self.trans.commit()\n domain_table_results = test_db_utils.get_data(\n test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 0)\n with self.subTest():\n self.assertEqual(result, 1)\n with self.subTest():\n self.assertFalse(ERROR_MSG in msg)\n with self.subTest():\n self.assertFalse(type_error)\n with self.subTest():\n self.assertTrue(value_error)",
"def test_disallowed_queries():\n strings = [\"select * from test times 10\",\n \"select * from test save clusters with threshold .5 as test.csv\",\n \"select * from test given a=5\",\n \"select * from test with confidence .4\",\n \"select a conf .4 from test\",\n \"select a conf .4, b from test\",\n \"simulate a conf .4 from test times 10\",\n \"simulate a conf .4, b from test times 10\",\n \"infer * from test times 10\",\n \"infer typicality from test\",\n \"infer * from test with confidence 1.5\",\n \"simulate typicality from test\",\n \"infer * from test save clusters with threshold .5 as test.csv\",\n \"infer * from test given a=5\",\n \"simulate * from test where a < 4\",\n \"simulate * from test save clusters with threshold .5 as test.csv\",\n \"simulate * from test with confidence .4\",\n \"simulate * from test with 4 samples\",\n \"simulate * from test\",\n \"estimate columns from test with confidence .4\",\n \"estimate columns from test given a=4\",\n \"estimate columns from test times 10\",\n \"summarize estimate columns from test\",\n \"plot estimate columns from test\",\n \"estimate columns from test save clusters with threshold .5 as test.csv\",\n \"estimate pairwise correlation from test where a = b\",\n \"estimate pairwise correlation from test times 10\",\n \"estimate pairwise correlation from test given a = 5\",\n \"estimate pairwise correlation from test with confidence .2\",\n \"estimate pairwise row similarity from test where a = b\",\n \"estimate pairwise row similarity from test times 10\",\n \"estimate pairwise row similarity from test given a = 5\",\n \"estimate pairwise row similarity from test with confidence .2\",\n \"estimate pairwise row similarity from test where a = b\"\n ]\n\n for query_string in strings:\n ast = bql_statement.parseString(query_string,parseAll=True)\n with pytest.raises(AssertionError):\n parser.parse_single_statement(ast)",
"def query(self, query):",
"def test_execute_statement_10(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n # \"Description\": \"ParB-like nuclease domain\"\n description = domain_data[\"Description\"]\n description = description.replace(\"nuclease domain\", \"nuclease % wdomain\")\n domain_data[\"Description\"] = description\n statement = test_db_utils.domain_stmt(domain_data)\n statement = statement.replace(\"%\", \"%%\")\n results_tup = find_domains.execute_statement(self.connection, statement)\n result = results_tup[0]\n type_error = results_tup[1]\n value_error = results_tup[2]\n msg = results_tup[3]\n self.trans.commit()\n domain_table_results = test_db_utils.get_data(\n test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)\n with self.subTest():\n self.assertFalse(type_error)\n with self.subTest():\n self.assertFalse(value_error)",
"def test_set_invalid_query_option(self):\n execute_statement_req = TCLIService.TExecuteStatementReq()\n execute_statement_req.sessionHandle = self.session_handle\n execute_statement_req.confOverlay = {\"foo\":\"bar\"}\n execute_statement_req.statement = \"select 1\"\n execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)\n TestQueryOptionsHS2.check_response(execute_statement_resp,\n TCLIService.TStatusCode.ERROR_STATUS, \"Invalid query option: foo\")",
"def test_execution_error(self, pool):\n command = stellr.SelectCommand(TEST_HTTP)\n self.assertEquals(command.pool, pool)\n response = self._create_execution_mocks(pool, 500)\n\n command.add_param('fq', 'field:filter')\n try:\n data = command.execute()\n except stellr.StellrError as e:\n self.assertFalse(e.timeout)\n self.assertEqual(e.status, 500)\n self.assertEqual(e.url,\n TEST_HTTP + '/solr/select?wt=json&fq=field%3Afilter')\n self.assertEqual(e.body, None)\n self.assertEqual(e.response, RESPONSE_DATA)\n return\n\n self.assertFalse(True, 'Error should have been raised')",
"def send_rpc_error(req, rpcreq, e):",
"def query(self):\n pass",
"def test_isc_optviewserver_stmt_provide_ixfr_failing(self):\n test_string = [\n 'provide-ixfr Y'\n ]\n result = optviewserver_stmt_provide_ixfr.runTests(test_string, failureTests=True)\n self.assertTrue(result[0])",
"def query(self):",
"def test_get_with_filter_statement(mockclient_cl1):\n r = mockclient_cl1.get(TEST_URL + \"?size=100&st=Stmt00062\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 1",
"def execute(self, request, nolog=False):\n res = self._analyse(request)\n if res is not None:\n return res\n else:\n # classic ways\n self._check_connection()\n cur = self._connection.cursor()\n dat = time.perf_counter()\n try:\n if not nolog:\n lines = request.split(\"\\n\")\n if len(lines) > 20:\n self.LOG(\"SQL \", \"\\n\".join(\n [repr(x) for x in lines[:20]]))\n else:\n self.LOG(\"SQL \", \"\\n\".join([repr(x) for x in lines]))\n cur.execute(request)\n dat2 = time.perf_counter()\n if dat2 - dat > 10:\n self.LOG(\"SQL end\") # pragma: no cover\n except Exception as e:\n raise ExceptionSQL(\n \"unable to execute a SQL request (1)(file %s)\" %\n self.get_file(),\n e,\n request) from e\n return cur",
"def test_execute_statement_3(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n statement1 = test_db_utils.domain_stmt(domain_data)\n results_tup1 = find_domains.execute_statement(self.connection, statement1)\n result1 = results_tup1[0]\n type_error1 = results_tup1[1]\n value_error1 = results_tup1[2]\n msg1 = results_tup1[3]\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement2 = test_db_utils.gene_domain_stmt(gene_domain_data)\n results_tup2 = find_domains.execute_statement(self.connection, statement2)\n result2 = results_tup2[0]\n type_error2 = results_tup2[1]\n value_error2 = results_tup2[2]\n msg2 = results_tup2[3]\n self.trans.commit()\n gene_domain_table_results = test_db_utils.get_data(\n test_db_utils.gene_domain_table_query)\n domain_table_results = test_db_utils.get_data(\n test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 1)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result1, 0)\n with self.subTest():\n self.assertEqual(result2, 0)\n with self.subTest():\n self.assertFalse(type_error1)\n with self.subTest():\n self.assertFalse(type_error2)\n with self.subTest():\n self.assertFalse(value_error1)\n with self.subTest():\n self.assertFalse(value_error2)",
"def test_execute_statement_1(self):\n gene_table_results1 = test_db_utils.get_data(test_db_utils.gene_table_query)\n statement = get_gene_update_statement(1, TRIXIE_GENEID)\n results_tup = find_domains.execute_statement(self.connection, statement)\n result = results_tup[0]\n type_error = results_tup[1]\n value_error = results_tup[2]\n msg = results_tup[3]\n self.trans.commit()\n phage_table_results = test_db_utils.get_data(test_db_utils.phage_table_query)\n gene_table_results2 = test_db_utils.get_data(test_db_utils.gene_table_query)\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n domain_status1 = gene_table_results1[0][\"DomainStatus\"]\n domain_status2 = gene_table_results2[0][\"DomainStatus\"]\n with self.subTest():\n self.assertEqual(len(phage_table_results), 1)\n with self.subTest():\n self.assertEqual(len(gene_table_results2), 1)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 0)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 0)\n with self.subTest():\n self.assertEqual(domain_status1, 0)\n with self.subTest():\n self.assertEqual(domain_status2, 1)\n with self.subTest():\n self.assertEqual(result, 0)\n with self.subTest():\n self.assertFalse(type_error)\n with self.subTest():\n self.assertFalse(value_error)\n with self.subTest():\n self.assertIsInstance(msg, str)",
"def test_fetchWithPartialValidArgument(self):\n # We need to clear out the welcome message.\n self.transport.clear()\n # Let's send out the faulty command.\n self.server.dataReceived(b\"0001 FETCH 1 FULLL\\r\\n\")\n expected = b\"0001 BAD Illegal syntax: Invalid Argument\\r\\n\"\n self.assertEqual(self.transport.value(), expected)\n self.transport.clear()\n self.server.connectionLost(error.ConnectionDone(\"Connection closed\"))",
"def test_query_sql_injection(self):\r\n\r\n q = '1%3D1;SELECT%20*%20FROM%20task%20WHERE%201=1'\r\n res = self.app.get('/api/task?' + q)\r\n error = json.loads(res.data)\r\n assert res.status_code == 415, error\r\n assert error['action'] == 'GET', error\r\n assert error['status'] == 'failed', error\r\n assert error['target'] == 'task', error\r\n\r\n q = 'app_id=1%3D1;SELECT%20*%20FROM%20task%20WHERE%201'\r\n res = self.app.get('/api/apappp?' + q)\r\n assert res.status_code == 404, res.data\r\n\r\n q = 'app_id=1%3D1;SELECT%20*%20FROM%20task%20WHERE%201'\r\n res = self.app.get('/api/' + q)\r\n assert res.status_code == 404, res.data\r\n\r\n q = 'app_id=1%3D1;SELECT%20*%20FROM%20task%20WHERE%201'\r\n res = self.app.get('/api' + q)\r\n assert res.status_code == 404, res.data",
"def cmd_not_understood(self, line):\n self.respond('500 Command \"%s\" not understood.' %line)",
"def log_request(req: 'flask_request', res: str) -> None:\n #raise Exception(\"Something awful just happened.\")\n #sleep(15)\n try:\n with UseDatabase(app.config['dbconfig']) as cursor:\n _SQL = \"\"\"insert into log\n (phrase, letters, ip, browser_string, results)\n values\n (%s, %s, %s, %s, %s)\"\"\"\n cursor.execute(_SQL, (req.form['phrase'],\n req.form['letters'],\n req.remote_addr,\n req.user_agent.browser,\n res))\n except ConnectionError as err:\n print('Is your database switched on? Error:', str(err))\n except CredentialsError as err:\n print('Is your credentials correct? Error:', str(err))\n except SQLError as err:\n print('Is your query correct? Error:', str(err))\n except Exception as err:\n print('Something went wrong:', str(err))\n return 'Error'",
"def test_execute_transaction_6(self):\n # Valid\n domain_data1 = test_data_utils.get_trixie_domain_data()\n statement1 = test_db_utils.domain_stmt(domain_data1)\n # Valid\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement2 = test_db_utils.gene_domain_stmt(gene_domain_data)\n # Invalid '%'\n domain_data2 = test_data_utils.get_trixie_domain_data()\n # \"Description\": \"ParB-like nuclease domain\"\n description = domain_data2[\"Description\"]\n description = description.replace(\"nuclease domain\", \"nuclease % domain\")\n domain_data2[\"Description\"] = description\n domain_data2[\"HitID\"] = \"unique_id\"\n statement3 = test_db_utils.domain_stmt(domain_data2)\n # Valid\n statement4 = get_gene_update_statement(1, TRIXIE_GENEID)\n\n statements = [statement1, statement2, statement3, statement4]\n result = find_domains.execute_transaction(self.connection, statements)\n gene_table_results = test_db_utils.get_data(test_db_utils.gene_table_query)\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n domain_status = gene_table_results[0][\"DomainStatus\"]\n with self.subTest():\n self.assertEqual(len(domain_table_results), 2)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)\n with self.subTest():\n self.assertEqual(domain_status, 1)"
]
| [
"0.6406493",
"0.63106465",
"0.61551946",
"0.59871066",
"0.57487696",
"0.56122226",
"0.5583032",
"0.55265594",
"0.5511699",
"0.5490959",
"0.5476108",
"0.54370296",
"0.5434275",
"0.541941",
"0.5394525",
"0.5388886",
"0.53367317",
"0.53346413",
"0.53344035",
"0.53339714",
"0.5318219",
"0.5309044",
"0.5276334",
"0.52698886",
"0.52596664",
"0.52309066",
"0.5229977",
"0.522944",
"0.5212116",
"0.5193921"
]
| 0.69276994 | 0 |
Clause Options/View/Server; Statement transferformat; passing mode | def test_isc_optviewserver_stmt_transfer_format_passing(self):
test_string = [
'transfer-format one-answer;',
'transfer-format many-answers;',
]
result = optviewserver_stmt_transfer_format.runTests(test_string, failureTests=False)
self.assertTrue(result[0])
assertParserResultDictTrue(
optviewserver_stmt_transfer_format,
'transfer-format one-answer;',
{'transfer_format': 'one-answer'}
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_isc_optviewserver_stmt_transfer_format_failing(self):\n test_string = [\n 'transfer-format no-answer;',\n 'transfer-format !one-answer;',\n 'transfer-format many-answer;',\n ]\n result = optviewserver_stmt_transfer_format.runTests(test_string, failureTests=True)\n self.assertTrue(result[0])",
"def ftp_MODE(self, line):\n # obsolete (backward compatibility with older ftp clients)\n if line in ('s', 'S'):\n self.respond('200 Transfer mode set to: S')\n else:\n self.respond('504 Unimplemented MODE type.')",
"def cmd_mode (self, line):\r\n if line[1] in 'sS':\r\n # f == 'file'\r\n self.respond ('200 MODE S Ok')\r\n else:\r\n self.respond ('502 Unimplemented MODE type')",
"def SendPacketsFlags(self) -> TransmitFileOptions:",
"def visit_copy_command(element, compiler, **kw):\n qs = \"\"\"COPY {table}{columns} FROM :data_location\n WITH CREDENTIALS AS :credentials\n {format}\n {parameters}\"\"\"\n parameters = []\n bindparams = [\n sa.bindparam(\n 'data_location',\n value=element.data_location,\n type_=sa.String,\n ),\n sa.bindparam(\n 'credentials',\n value=element.credentials,\n type_=sa.String,\n ),\n ]\n\n if element.format == Format.csv:\n format_ = 'FORMAT AS CSV'\n if element.quote is not None:\n format_ += ' QUOTE AS :quote_character'\n bindparams.append(sa.bindparam(\n 'quote_character',\n value=element.quote,\n type_=sa.String,\n ))\n elif element.format == Format.json:\n format_ = 'FORMAT AS JSON AS :json_option'\n bindparams.append(sa.bindparam(\n 'json_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.avro:\n format_ = 'FORMAT AS AVRO AS :avro_option'\n bindparams.append(sa.bindparam(\n 'avro_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.orc:\n format_ = 'FORMAT AS ORC'\n elif element.format == Format.parquet:\n format_ = 'FORMAT AS PARQUET'\n elif element.format == Format.fixed_width and element.fixed_width is None:\n raise sa_exc.CompileError(\n \"'fixed_width' argument required for format 'FIXEDWIDTH'.\")\n else:\n format_ = ''\n\n if element.delimiter is not None:\n parameters.append('DELIMITER AS :delimiter_char')\n bindparams.append(sa.bindparam(\n 'delimiter_char',\n value=element.delimiter,\n type_=sa.String,\n ))\n\n if element.fixed_width is not None:\n parameters.append('FIXEDWIDTH AS :fixedwidth_spec')\n bindparams.append(sa.bindparam(\n 'fixedwidth_spec',\n value=_process_fixed_width(element.fixed_width),\n type_=sa.String,\n ))\n\n if element.compression is not None:\n parameters.append(Compression(element.compression).value)\n\n if element.manifest:\n parameters.append('MANIFEST')\n\n if element.accept_any_date:\n parameters.append('ACCEPTANYDATE')\n\n if element.accept_inv_chars is not None:\n parameters.append('ACCEPTINVCHARS AS :replacement_char')\n bindparams.append(sa.bindparam(\n 'replacement_char',\n value=element.accept_inv_chars,\n type_=sa.String\n ))\n\n if element.blanks_as_null:\n parameters.append('BLANKSASNULL')\n\n if element.date_format is not None:\n parameters.append('DATEFORMAT AS :dateformat_string')\n bindparams.append(sa.bindparam(\n 'dateformat_string',\n value=element.date_format,\n type_=sa.String,\n ))\n\n if element.empty_as_null:\n parameters.append('EMPTYASNULL')\n\n if element.encoding is not None:\n parameters.append('ENCODING AS ' + Encoding(element.encoding).value)\n\n if element.escape:\n parameters.append('ESCAPE')\n\n if element.explicit_ids:\n parameters.append('EXPLICIT_IDS')\n\n if element.fill_record:\n parameters.append('FILLRECORD')\n\n if element.ignore_blank_lines:\n parameters.append('IGNOREBLANKLINES')\n\n if element.ignore_header is not None:\n parameters.append('IGNOREHEADER AS :number_rows')\n bindparams.append(sa.bindparam(\n 'number_rows',\n value=element.ignore_header,\n type_=sa.Integer,\n ))\n\n if element.dangerous_null_delimiter is not None:\n parameters.append(\"NULL AS '%s'\" % element.dangerous_null_delimiter)\n\n if element.remove_quotes:\n parameters.append('REMOVEQUOTES')\n\n if element.roundec:\n parameters.append('ROUNDEC')\n\n if element.time_format is not None:\n parameters.append('TIMEFORMAT AS :timeformat_string')\n bindparams.append(sa.bindparam(\n 'timeformat_string',\n value=element.time_format,\n type_=sa.String,\n ))\n\n if element.trim_blanks:\n parameters.append('TRIMBLANKS')\n\n if element.truncate_columns:\n parameters.append('TRUNCATECOLUMNS')\n\n if element.comp_rows:\n parameters.append('COMPROWS :numrows')\n bindparams.append(sa.bindparam(\n 'numrows',\n value=element.comp_rows,\n type_=sa.Integer,\n ))\n\n if element.comp_update:\n parameters.append('COMPUPDATE ON')\n elif element.comp_update is not None:\n parameters.append('COMPUPDATE OFF')\n\n if element.max_error is not None:\n parameters.append('MAXERROR AS :error_count')\n bindparams.append(sa.bindparam(\n 'error_count',\n value=element.max_error,\n type_=sa.Integer,\n ))\n\n if element.no_load:\n parameters.append('NOLOAD')\n\n if element.stat_update:\n parameters.append('STATUPDATE ON')\n elif element.stat_update is not None:\n parameters.append('STATUPDATE OFF')\n\n if element.region is not None:\n parameters.append('REGION :region')\n bindparams.append(sa.bindparam(\n 'region',\n value=element.region,\n type_=sa.String\n ))\n\n columns = ' (%s)' % ', '.join(\n compiler.preparer.format_column(column) for column in element.columns\n ) if element.columns else ''\n\n qs = qs.format(\n table=compiler.preparer.format_table(element.table),\n columns=columns,\n format=format_,\n parameters='\\n'.join(parameters)\n )\n\n return compiler.process(sa.text(qs).bindparams(*bindparams), **kw)",
"def test_isc_optviewserver_statements_series_passing(self):\n assertParserResultDictTrue(\n optviewserver_statements_series,\n 'provide-ixfr yes;' +\n 'request-ixfr yes;' +\n 'transfer-format one-answer;',\n {'provide_ixfr': 'yes',\n 'request_ixfr': 'yes',\n 'transfer_format': 'one-answer'}\n )",
"def __switch_command_export(self, file_name, selection_only):\n ext = file_name.split('.')[-1]\n if ext == 'mat':\n self.model.to_mat_file(file_name, selection_only)\n elif ext == 'json':\n print \"exporting to: \", file_name\n buff = self.model.to_json_dict(selection_only)\n buff = json.dumps(buff)\n with open(file_name, 'wb') as f:\n f.write(buff)\n else:\n raise DataExplorerError('Unsupported file format: {}'.format(ext))",
"def set_spc_transaction_mode(self, mode):\n pass",
"def svn_client_export(svn_revnum_t_result_rev, char_from, char_to, svn_opt_revision_t_revision, svn_boolean_t_force, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass",
"def set_dataframe_format(self, new_format):\n self.sig_option_changed.emit('dataframe_format', new_format)\n self.model().dataframe_format = new_format",
"def _check_sql_mode(self, **kwargs):\n return []",
"def output_data_type(self):\n return PassThrough",
"def export(self, path=\"/tmp/result.txt\", format=0):\n\n try:\n out = open(path, 'wa')\n except Exception, e:\n print(\"Something wrong : %s.\" % e)\n\n if format == 0:\n for ip in self.subnets:\n out.write(\"route add -net %s/24 gw $VPNGW \\n\" % ip)\n for ip in self.single_ips:\n out.write(\"route add -host %s gw $VPNGW \\n\" % ip)\n elif format == 1:\n for domain in self.domain_list:\n if not self.__is_ip_address(domain):\n out.write('server=/%s/8.8.8.8\\n' % domain)\n\n elif format == 2:\n for ip in self.iplist:\n out.write(\"route add -host %s gw $OLDGW \\n\" % ip)\n\n elif format == 3:\n for domain, ip in self.fixed_domain_ip_dict.iteritems():\n out.write('address=/%s/%s\\n' % (domain, ip))\n\n else:\n print(\"Invalid format option\")\n out.close()\n return\n\n out.close()\n print(\"Exported to %s ...\" % path)",
"def svn_client_export4(svn_revnum_t_result_rev, char_from, char_to, svn_opt_revision_t_peg_revision, svn_opt_revision_t_revision, svn_boolean_t_overwrite, svn_boolean_t_ignore_externals, svn_depth_t_depth, char_native_eol, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass",
"def getopt_format(self):\n self._print_enum_opt(\"format\", FORMATTERS)",
"def dump(self):\n # This is pretty, but we could just return the ddl_string\n outputs = [\"Table : %s\\n\" % self.name]\n # We show the columns in sequence order, using DSU\n # DSU = Decorate, Sort, Undecorate - a.k.a Schwartzian transform\n deco_cols = [ (x['sequence'], x) for x in list(self.columns.values()) ]\n deco_cols.sort()\n cols = [ col for seq, col in deco_cols ]\n for column in cols:\n outputs.append(\" %-30s\" % column['name'])\n if 'length' in column and column['length'] != None:\n if 'precision' in column and column['precision'] != None:\n # This column is a numeric data type\n column_defn = column['type']+self.__class__.calc_precision(column['type'], column['length'], column['precision'], column['scale'])\n else:\n # This column is a text data type\n column_defn = '%s(%d)' % (column['type'], column['length'])\n else:\n # This column is a simple data type such as date or boolean\n column_defn = column['type']\n outputs.append(\" %-15s \" % column_defn)\n if not column['nullable']:\n outputs.append(\" NOT NULL\")\n if 'special' in column:\n # Special case for e.g. 'enum' in MySQL\n outputs.append(' %s' % column['special'])\n outputs.append(\"\\n\")\n # Constraints please\n if len(self.constraints) != 0:\n outputs.append(\" Constraints;\\n\")\n for constraint_name, constraint in list(self.constraints.items()):\n outputs.append(\" %s, \" % constraint_name)\n outputs.append(\"%s \" % (constraint['type']))\n if 'columns' in constraint:\n outputs.append(\": \")\n outputs.append(', '.join(constraint['columns']))\n outputs.append(\"\\n\")\n # Indexes\n if len(self.indexes) > 0:\n outputs.append(\" Indexes:\\n\")\n for index_name, index in list(self.indexes.items()):\n outputs.append(\" %s, \" % index_name)\n outputs.append(\"%s\\n\" % index['type'])\n # Don't check number of columns because there must be at least 1\n outputs.append(\" Columns: \")\n outputs.append(\", \".join(index['columns']))\n outputs.append(\"\\n\")\n # LOG.debug(\"Table Dump output: \" + \"\".join(outputs))\n return \"\".join(outputs)",
"def _dump_table(table: Model, directory: Path, format_: str):\n try:\n table.select().tuples()\n table.fields()\n dataset = tablib.Dataset(*table.select().tuples(), headers=table.fields())\n except:\n print(table._meta.database.get_columns(table.table_name()))\n\n if directory is not None:\n print(f\" Dumping {table.table_name()}...\")\n out_file = Path(directory) / f\"{table.table_name()}.{format_}\"\n out_file.write_text(dataset.export(format_))\n print(\" Done.\")\n print(\"=====================\")\n else:\n print(dataset.export(\"csv\"))",
"def set_format_transfer_case(self):\n if len(self.dst_shape) == 4:\n is_four_d = 1\n else:\n is_four_d = (functools_reduce(lambda x1, x2: x1 * x2,\n self.src_shape[:-2]) == 1)\n\n if is_four_d:\n if self.src_shape[-1] % CUBE_SIZE_2 == 0:\n format_transfer_case = 0\n if self.dst_shape[-4] * self.dst_shape[-1] * \\\n (self.dst_shape[-2] + 1) > self.ub_memory:\n format_transfer_case = 2\n else:\n format_transfer_case = 1\n if self.dst_shape[-4] * self.dst_shape[-1] * \\\n (self.dst_shape[-2] + 1) > self.ub_memory:\n format_transfer_case = 2\n else:\n raise RuntimeError(\"ND2Nz only support 2D now when dtype is int8\")\n\n return format_transfer_case",
"def _data_export_helper(self, fmt='ods', force=False):\n def _clean(payload):\n for field in ['cancel',\n 'nosubmit_checkbox_controller1',\n 'mform_isexpanded_id_notice',\n ]:\n payload.pop(field, None)\n\n payload['exporttype'] = fmt\n return payload\n\n response = self.course.moodle.fetch_from_form(\n self._export_form_url % self.id,\n self._export_url % self.id,\n _clean,\n \"database-export-%s-%s\" % (fmt, self.id),\n force=force,\n )\n return response, response.content",
"def dump(self):\n outputs = [\"View : %s\\n\" % self.name]\n cols = list(self.columns.values())\n cols.sort()\n for column in cols:\n outputs.append(\" %-30s %-12s\" % (column['name'], column['type']))\n outputs.append(\"%7s\" % self.__class__.calc_precision(column['type'],\n column['length'], column['precision'], column['scale']))\n if not column['nullable']:\n outputs.append(\" NOT NULL\")\n outputs.append(\"\\n\")\n outputs.append(\"\\n\")\n outputs.append(self.sql+\"\\n\")\n outputs.append(\"\\n\")\n return \"\".join(outputs)",
"def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))",
"def exportDB(self):\n sourcesession=svc.connect(self.__source,accessMode=coral.access_Update)\n destsession=svc.connect(self.__dest,accessMode = coral.access_Update)\n try:\n dbcp=DBCopy(sourcesession,destsession,1024)\n if self.__all:\n dbcp.copyDB()\n elif self.__inv:\n dbcp.copyInventory()\n elif len(self.__tree) != 0:\n dbcp.copyTrees([self.__tree])\n del sourcesession\n del destsession\n except Exception, e:\n print str(e)\n del sourcesession\n del destsession",
"def svn_client_export2(svn_revnum_t_result_rev, char_from, char_to, svn_opt_revision_t_revision, svn_boolean_t_force, char_native_eol, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass",
"def to_sql(\n self,\n ops: data_algebra.data_ops.ViewRepresentation,\n *,\n sql_format_options: Optional[SQLFormatOptions] = None,\n ) -> str:\n assert isinstance(ops, data_algebra.data_ops_types.OperatorPlatform)\n sql_string = self.sql_model.to_sql(ops, sql_format_options=sql_format_options)\n return sql_string",
"def driver_kwargs(self):\n out = super(TestCisRpcClient, self).driver_kwargs\n out['comm'] = 'ServerComm'\n out['response_kwargs'] = {'format_str': self.fmt_str}\n return out",
"def export(self, buffer: IO[str], ind: str = '', disp_multiblend: bool = True) -> None:\n buffer.write(ind + 'side\\n')\n buffer.write(ind + '{\\n')\n buffer.write(\n f'{ind}\\t\"id\" \"{self.id}\"\\n'\n f'{ind}\\t\"plane\" \"({self.planes[0]}) ({self.planes[1]}) ({self.planes[2]})\"\\n'\n f'{ind}\\t\"material\" \"{self.mat}\"\\n'\n f'{ind}\\t\"uaxis\" \"{self.uaxis}\"\\n'\n f'{ind}\\t\"vaxis\" \"{self.vaxis}\"\\n'\n f'{ind}\\t\"rotation\" \"{self.ham_rot:g}\\\"\\n'\n f'{ind}\\t\"lightmapscale\" \"{self.lightmap}\"\\n'\n f'{ind}\\t\"smoothing_groups\" \"{self.smooth}\"\\n'\n )\n if self.disp_power > 0:\n assert self._disp_verts is not None\n assert self.disp_allowed_vert is not None\n buffer.write(\n f'{ind}\\tdispinfo\\n'\n f'{ind}\\t{{\\n'\n f'{ind}\\t\\t\"power\" \"{self.disp_power}\"\\n'\n f'{ind}\\t\\t\"startposition\" \"[{self.disp_pos}]\"\\n'\n f'{ind}\\t\\t\"flags\" \"{_DISP_COLL_TO_FLAG[self.disp_flags & DispFlag.COLL_ALL]}\"\\n'\n f'{ind}\\t\\t\"elevation\" \"{self.disp_elevation}\"\\n'\n f'{ind}\\t\\t\"subdiv\" \"{\"1\" if DispFlag.SUBDIV in self.disp_flags else \"0\"}\"\\n'\n )\n\n size = self.disp_size\n self._export_disp_rowset('normals', 'normal', buffer, ind, size)\n self._export_disp_rowset('distances', 'distance', buffer, ind, size)\n self._export_disp_rowset('offsets', 'offset', buffer, ind, size)\n self._export_disp_rowset('offset_normals', 'offset_norm', buffer, ind, size)\n self._export_disp_rowset('alphas', 'alpha', buffer, ind, size)\n\n buffer.write(f'{ind}\\t\\ttriangle_tags\\n{ind}\\t\\t{{\\n')\n for y in range(size):\n row = [\n f'{vert.triangle_a.value} {vert.triangle_b.value}'\n for vert in self._disp_verts[size * y:size * (y+1)]\n ]\n buffer.write(f'{ind}\\t\\t\"row{y}\" \"{\" \".join(row)}\"\\n')\n buffer.write(ind + '\\t\\t}\\n')\n\n buffer.write(ind + '\\t\\tallowed_verts\\n')\n buffer.write(ind + '\\t\\t{\\n')\n assert len(self.disp_allowed_vert) == 10, self.disp_allowed_vert\n buffer.write(f'{ind}\\t\\t\"10\" \"{\" \".join(map(str, self.disp_allowed_vert))}\"\\n')\n buffer.write(f'{ind}\\t\\t}}\\n{ind}\\t}}\\n')\n\n if disp_multiblend and any(vert.multi_blend for vert in self._disp_verts):\n self._export_disp_rowset('multiblend', 'multi_blend', buffer, ind, size)\n self._export_disp_rowset('alphablend', 'multi_alpha', buffer, ind, size)\n for i in range(4):\n buffer.write(f'{ind}\\t\\tmultiblend_color_{i}\\n{ind}\\t\\t{{\\n')\n for y in range(size):\n row = [\n str(vert.multi_colors[i]) if vert.multi_colors is not None else '1'\n for vert in self._disp_verts[size * y:size * (y+1)]\n ]\n buffer.write(f'{ind}\\t\\t\"row{y}\" \"{\" \".join(row)}\"\\n')\n buffer.write(ind + '\\t\\t}\\n')\n\n buffer.write(ind + '}\\n')",
"def __call__(self, dbio, *args, **kwargs):\n sql, f = self.decorated(dbio, *args, **kwargs)\n if not dbio.testing:\n logger.debug(\"'copy_expert' will run\\n{}\".format(sql))\n cur = dbio.conn.cursor()\n cur.copy_expert(sql, f)\n cur.close()\n dbio.conn.commit()\n f.close()\n else:\n logger.info(\"'copy_expert' will run\\n{}\".format(sql))\n f.close()",
"def dbtrace_show_output(trace_object, output_file):\n\n pass",
"def mode(self, target, *data):\n self.send_line('MODE %s %s' % (target, ' '.join(data)), nowait=True)",
"def __call__(self, src__=0, test__=0, **kw):\n context = self.context\n\n dbc, DB__ = self._get_dbc()\n\n p = None\n\n argdata = self._argdata(kw)\n argdata['sql_delimiter'] = '\\0'\n argdata['sql_quote__'] = dbc.sql_quote__\n\n # TODO: Review the argdata dictonary. The line bellow is receiving unicode\n # strings, mixed with standard strings. It is insane! Archetypes needs a policy\n # about unicode, and lots of tests on this way. I prefer to not correct it now,\n # only doing another workarround. We need to correct the cause of this problem,\n # not its side effects :-(\n\n try:\n query = apply(self.template, (p,), argdata)\n except TypeError, msg:\n msg = str(msg)\n if 'client' in msg:\n raise NameError(\"'client' may not be used as an \" +\n \"argument name in this context\")\n else: raise\n\n __traceback_info__ = query\n\n if src__: return query\n\n # Get the encoding arguments\n # We have two possible kw arguments:\n # db_encoding: The encoding used in the external database\n # site_encoding: The uncoding used for the site\n # If not specified, we use sys.getdefaultencoding()\n db_encoding = kw.get('db_encoding',None)\n\n try:\n site_encoding = kw.get('site_encoding', context.portal_properties.site_properties.default_charset)\n except AttributeError, KeyError:\n site_encoding = kw.get('site_encoding',sys.getdefaultencoding())\n\n if type(query) == type(u''):\n if db_encoding:\n query = query.encode(db_encoding)\n else:\n try:\n query = query.encode(site_encoding)\n except UnicodeEncodeError:\n query = query.encode('UTF-8')\n\n\n if context.cache_time_ > 0 and context.max_cache_ > 0:\n result = self._cached_result(DB__, (query, context.max_rows_))\n else:\n try:\n result = DB__.query(query, context.max_rows_)\n except ConflictError:\n raise\n except:\n log_exc(msg='Database query failed', reraise=1)\n\n if hasattr(context, '_v_sql_brain'):\n brain = context._v_sql_brain\n else:\n brain=context._v_sql_brain = getBrain(context.class_file_,\n context.class_name_)\n\n if type(result) is type(''):\n f = StringIO()\n f.write(result)\n f.seek(0)\n result = RDB.File(f, brain, p, None)\n else:\n if db_encoding:\n # Encode result before we wrap it in Result object\n # We will change the encoding from source to either the specified target_encoding\n # or the site default encoding\n\n # The data is a list of tuples of column data\n encoded_result = []\n for row in result[1]:\n columns = ()\n for col in row:\n if isinstance(col, types.StringType):\n # coerce column to unicode with database encoding\n newcol = unicode(col,db_encoding)\n # Encode column as string with site_encoding\n newcol = newcol.encode(site_encoding)\n else:\n newcol = col\n\n columns += newcol,\n\n encoded_result.append(columns)\n\n result = (result[0],encoded_result)\n\n result = Results(result, brain, p, None)\n\n columns = result._searchable_result_columns()\n\n if test__ and columns != self._col:\n self._col=columns\n\n # If run in test mode, return both the query and results so\n # that the template doesn't have to be rendered twice!\n if test__: return query, result\n\n return result"
]
| [
"0.53250027",
"0.5203599",
"0.50582415",
"0.49072838",
"0.4773112",
"0.47538304",
"0.47135463",
"0.46922514",
"0.45904437",
"0.45375693",
"0.45135164",
"0.4501895",
"0.4498528",
"0.44851896",
"0.44687873",
"0.44519544",
"0.4437863",
"0.44301352",
"0.44135845",
"0.44132853",
"0.44089139",
"0.44031462",
"0.43934005",
"0.43909112",
"0.43852213",
"0.4375836",
"0.4373988",
"0.43725008",
"0.4371848",
"0.43714425"
]
| 0.60011727 | 0 |
Clause Options/View/Server; Statement transferformat; failing mode | def test_isc_optviewserver_stmt_transfer_format_failing(self):
test_string = [
'transfer-format no-answer;',
'transfer-format !one-answer;',
'transfer-format many-answer;',
]
result = optviewserver_stmt_transfer_format.runTests(test_string, failureTests=True)
self.assertTrue(result[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_isc_optviewserver_stmt_transfer_format_passing(self):\n test_string = [\n 'transfer-format one-answer;',\n 'transfer-format many-answers;',\n ]\n result = optviewserver_stmt_transfer_format.runTests(test_string, failureTests=False)\n self.assertTrue(result[0])\n assertParserResultDictTrue(\n optviewserver_stmt_transfer_format,\n 'transfer-format one-answer;',\n {'transfer_format': 'one-answer'}\n )",
"def test_isc_optviewserver_statements_series_passing(self):\n assertParserResultDictTrue(\n optviewserver_statements_series,\n 'provide-ixfr yes;' +\n 'request-ixfr yes;' +\n 'transfer-format one-answer;',\n {'provide_ixfr': 'yes',\n 'request_ixfr': 'yes',\n 'transfer_format': 'one-answer'}\n )",
"def handle_expt(self):\n self.cmd_channel.debug(\"DTPHandler.handle_expt()\")\n self.cmd_channel.respond(\"426 Connection error; transfer aborted.\")\n self.close()",
"def _send_database_problem(self):\n template_filename = self._get_config_template('databaseerror')\n text = read_template(\n template_filename,\n title='%s - Datebase error' % SERVER_NAME,\n header='Database error')\n if not text:\n self._send_internal_server_error()\n return\n self._send_head(text, 500)\n if not self._header_only:\n self.wfile.write(text)",
"def cmd_mode (self, line):\r\n if line[1] in 'sS':\r\n # f == 'file'\r\n self.respond ('200 MODE S Ok')\r\n else:\r\n self.respond ('502 Unimplemented MODE type')",
"def test_none_fields_rendering(self):\r\n ss = SelectStatement('table')\r\n self.assertTrue(unicode(ss).startswith('SELECT *'), unicode(ss))\r\n self.assertTrue(str(ss).startswith('SELECT *'), str(ss))",
"def cmd_stru (self, line):\r\n if line[1] in 'fF':\r\n # f == 'file'\r\n self.respond ('200 STRU F Ok')\r\n else:\r\n self.respond ('504 Unimplemented STRU type')",
"def ftp_MODE(self, line):\n # obsolete (backward compatibility with older ftp clients)\n if line in ('s', 'S'):\n self.respond('200 Transfer mode set to: S')\n else:\n self.respond('504 Unimplemented MODE type.')",
"def test_set_invalid_query_option(self):\n execute_statement_req = TCLIService.TExecuteStatementReq()\n execute_statement_req.sessionHandle = self.session_handle\n execute_statement_req.confOverlay = {\"foo\":\"bar\"}\n execute_statement_req.statement = \"select 1\"\n execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)\n TestQueryOptionsHS2.check_response(execute_statement_resp,\n TCLIService.TStatusCode.ERROR_STATUS, \"Invalid query option: foo\")",
"def format_result(result):\n nbdiff = 0\n errloc = 0\n i = 0\n total_diff = len(result)\n for result_row in result:\n i = i + 1\n logging.info(f\"\"\"search row {i}/{len(result)}\"\"\")\n if nbdiff >= int(maxdiff):\n logging.warning(\n f\"\"\"line {id}:reach max diff {maxdiff} for {table1.schema}.{table1.tableName} total diff:{total_diff}\"\"\")\n errloc = nbdiff\n self.total_nbdiff = nbdiff\n break\n list_fields = ''\n fields1 = table1.concatened_fields\n fields2 = table2.concatened_fields\n qry1_fields = f\"\"\"select {fields1} from\n {table1.schema}.{table1.viewName}\n where\n 1 = 1 \"\"\"\n qry2_fields = f\"\"\"select {fields2} from\n {table2.schema}.{table2.viewName}\n where\n 1 = 1 \"\"\"\n\n qry1 = qry1_fields + build_where(table1,result_row)\n qry2 = qry2_fields + build_where(table2,result_row)\n # list_fields = '|'\n # list_fields = list_fields.join(result_row)\n\n for result_col in result_row:\n test = \"{}\"\n list_fields = list_fields + '|' + test.format(result_col)\n # if type(result_col) is str:\n # list_fields = ''.join(list_fields,'|',result_col)\n # else:\n # list_fields = ''.join(list_fields,'|',result_col)\n\n # .encode\n # ('utf-8').strip()\n # list_fields = list_fields + '|' + result_col.encode('utf-8')\n\n list_fields = list_fields.lstrip('|')\n quotedsql = qry1.replace(\"'\",\"''\")\n\n qry_thread_1 = ExecQry(\n table1.getengine() + '_dtsDiff',table1,qry1)\n qry_thread_2 = ExecQry(\n table2.getengine() + '_dtsDiff',table2,qry2)\n\n \"\"\"\n start the threads on server1 and server2\n \"\"\"\n qry_thread_1.start()\n qry_thread_2.start()\n\n \"\"\"\n wait for the 2 thread to terminate\n \"\"\"\n try:\n \"\"\"\n wait for the qry being executed\n \"\"\"\n row1detail = qry_thread_1.join()\n except Exception as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing thread:\n {error.code}\"\"\")\n try:\n \"\"\"\n wait for the qry being executed\n \"\"\"\n row2detail = qry_thread_2.join()\n except Exception as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing thread:\n {error.code}\"\"\")\n\n if (row1detail is not None) and (len(row1detail) != 0):\n nbrows1 = 1\n fieldst1 = row1detail[0][0]\n fieldst1 = fieldst1.replace(\"\\x00\",\"\")\n fieldst1 = fieldst1.replace(\"\\r\\n\",\"\\n\")\n fieldst1 = fieldst1.replace(\" \\n\",\"\\n\")\n fieldst1 = fieldst1.replace(\"\\t\",\"\")\n if fieldst1 != row1detail[0][0]:\n self.set_comments(id,'x00 or other found')\n else:\n nbrows1 = 0\n\n if (row2detail is not None) and len(row2detail) != 0:\n nbrows2 = 1\n fieldst2 = row2detail[0][0]\n fieldst2 = fieldst2.replace(\"\\x00\",\"\")\n fieldst2 = fieldst2.replace(\"\\r\\n\",\"\\n\")\n fieldst2 = fieldst2.replace(\"\\t\",\"\")\n fieldst2 = fieldst2.replace(\" \\n\",\"\\n\")\n if fieldst2 != row2detail[0][0]:\n self.set_comments(id,'x00 or other found')\n else:\n nbrows2 = 0\n\n desc = \"ok\"\n if nbrows1 == 1 and nbrows2 == 1:\n if fieldst1 != fieldst2:\n desc = f\"\"\"( <> in server1 {table1.getengine()} and server2 {table2.getengine()})\n server1 {fieldst1}\n server2 {fieldst2}\"\"\"\n # logging.info(f\"\"\"delta in {table1.tableName}:\\n {desc}\n # \"\"\")\n elif nbrows1 == 1 and nbrows2 == 0:\n desc = f\"\"\"(+ in server1 {table1.getengine()}) {fieldst1} ; (- in server2 {table2.getengine()}) \"\"\"\n elif nbrows1 == 0 and nbrows2 == 1:\n desc = f\"\"\"(- in server1 {table1.getengine()}) ; (+ in server2 {table2.getengine()}) {fieldst2}\"\"\"\n\n quoteddesc = desc.replace(\"'\",\"''\")\n quotedlist_fields = list_fields.replace(\"'\",\"''\")\n quotedtableName = table1.tableName.replace(\"'\",\"''\")\n sql = f\"\"\"insert into {schemaRepo}.rowdiff (idtable,table_name,\n comments,fields,qry) select '{id}','{quotedtableName}','\n {quoteddesc}','{quotedlist_fields}','{quotedsql}'\n where not exists\n (select 1 from {schemaRepo}.rowdiff where (idtable,lower\n (table_name),\n comments,qry) =\n ({id},lower('{quotedtableName}'),'{quoteddesc}','{quotedsql}'))\"\"\"\n\n if desc != 'ok':\n errloc = errloc + 1\n nbdiff = nbdiff + 1\n logging.info(\n f\"\"\"diffrowset nok {table1.tableName} for id = {id}\"\"\")\n logging.error(f\"\"\"{desc}\"\"\")\n conn = self.connect(cxRepo)\n with conn.cursor() as curs:\n try:\n curs.execute(sql)\n except Exception as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {sql}:\n {error.code}\"\"\")\n return errloc",
"def visit_copy_command(element, compiler, **kw):\n qs = \"\"\"COPY {table}{columns} FROM :data_location\n WITH CREDENTIALS AS :credentials\n {format}\n {parameters}\"\"\"\n parameters = []\n bindparams = [\n sa.bindparam(\n 'data_location',\n value=element.data_location,\n type_=sa.String,\n ),\n sa.bindparam(\n 'credentials',\n value=element.credentials,\n type_=sa.String,\n ),\n ]\n\n if element.format == Format.csv:\n format_ = 'FORMAT AS CSV'\n if element.quote is not None:\n format_ += ' QUOTE AS :quote_character'\n bindparams.append(sa.bindparam(\n 'quote_character',\n value=element.quote,\n type_=sa.String,\n ))\n elif element.format == Format.json:\n format_ = 'FORMAT AS JSON AS :json_option'\n bindparams.append(sa.bindparam(\n 'json_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.avro:\n format_ = 'FORMAT AS AVRO AS :avro_option'\n bindparams.append(sa.bindparam(\n 'avro_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.orc:\n format_ = 'FORMAT AS ORC'\n elif element.format == Format.parquet:\n format_ = 'FORMAT AS PARQUET'\n elif element.format == Format.fixed_width and element.fixed_width is None:\n raise sa_exc.CompileError(\n \"'fixed_width' argument required for format 'FIXEDWIDTH'.\")\n else:\n format_ = ''\n\n if element.delimiter is not None:\n parameters.append('DELIMITER AS :delimiter_char')\n bindparams.append(sa.bindparam(\n 'delimiter_char',\n value=element.delimiter,\n type_=sa.String,\n ))\n\n if element.fixed_width is not None:\n parameters.append('FIXEDWIDTH AS :fixedwidth_spec')\n bindparams.append(sa.bindparam(\n 'fixedwidth_spec',\n value=_process_fixed_width(element.fixed_width),\n type_=sa.String,\n ))\n\n if element.compression is not None:\n parameters.append(Compression(element.compression).value)\n\n if element.manifest:\n parameters.append('MANIFEST')\n\n if element.accept_any_date:\n parameters.append('ACCEPTANYDATE')\n\n if element.accept_inv_chars is not None:\n parameters.append('ACCEPTINVCHARS AS :replacement_char')\n bindparams.append(sa.bindparam(\n 'replacement_char',\n value=element.accept_inv_chars,\n type_=sa.String\n ))\n\n if element.blanks_as_null:\n parameters.append('BLANKSASNULL')\n\n if element.date_format is not None:\n parameters.append('DATEFORMAT AS :dateformat_string')\n bindparams.append(sa.bindparam(\n 'dateformat_string',\n value=element.date_format,\n type_=sa.String,\n ))\n\n if element.empty_as_null:\n parameters.append('EMPTYASNULL')\n\n if element.encoding is not None:\n parameters.append('ENCODING AS ' + Encoding(element.encoding).value)\n\n if element.escape:\n parameters.append('ESCAPE')\n\n if element.explicit_ids:\n parameters.append('EXPLICIT_IDS')\n\n if element.fill_record:\n parameters.append('FILLRECORD')\n\n if element.ignore_blank_lines:\n parameters.append('IGNOREBLANKLINES')\n\n if element.ignore_header is not None:\n parameters.append('IGNOREHEADER AS :number_rows')\n bindparams.append(sa.bindparam(\n 'number_rows',\n value=element.ignore_header,\n type_=sa.Integer,\n ))\n\n if element.dangerous_null_delimiter is not None:\n parameters.append(\"NULL AS '%s'\" % element.dangerous_null_delimiter)\n\n if element.remove_quotes:\n parameters.append('REMOVEQUOTES')\n\n if element.roundec:\n parameters.append('ROUNDEC')\n\n if element.time_format is not None:\n parameters.append('TIMEFORMAT AS :timeformat_string')\n bindparams.append(sa.bindparam(\n 'timeformat_string',\n value=element.time_format,\n type_=sa.String,\n ))\n\n if element.trim_blanks:\n parameters.append('TRIMBLANKS')\n\n if element.truncate_columns:\n parameters.append('TRUNCATECOLUMNS')\n\n if element.comp_rows:\n parameters.append('COMPROWS :numrows')\n bindparams.append(sa.bindparam(\n 'numrows',\n value=element.comp_rows,\n type_=sa.Integer,\n ))\n\n if element.comp_update:\n parameters.append('COMPUPDATE ON')\n elif element.comp_update is not None:\n parameters.append('COMPUPDATE OFF')\n\n if element.max_error is not None:\n parameters.append('MAXERROR AS :error_count')\n bindparams.append(sa.bindparam(\n 'error_count',\n value=element.max_error,\n type_=sa.Integer,\n ))\n\n if element.no_load:\n parameters.append('NOLOAD')\n\n if element.stat_update:\n parameters.append('STATUPDATE ON')\n elif element.stat_update is not None:\n parameters.append('STATUPDATE OFF')\n\n if element.region is not None:\n parameters.append('REGION :region')\n bindparams.append(sa.bindparam(\n 'region',\n value=element.region,\n type_=sa.String\n ))\n\n columns = ' (%s)' % ', '.join(\n compiler.preparer.format_column(column) for column in element.columns\n ) if element.columns else ''\n\n qs = qs.format(\n table=compiler.preparer.format_table(element.table),\n columns=columns,\n format=format_,\n parameters='\\n'.join(parameters)\n )\n\n return compiler.process(sa.text(qs).bindparams(*bindparams), **kw)",
"def SendPacketsFlags(self) -> TransmitFileOptions:",
"def svn_client_export(svn_revnum_t_result_rev, char_from, char_to, svn_opt_revision_t_revision, svn_boolean_t_force, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass",
"def test_isc_optviewserver_stmt_statements_series_failing(self):\n test_string = [\n 'statements_series \"YYYY\";',\n ]\n result = optviewserver_statements_series.runTests(test_string, failureTests=True)\n self.assertTrue(result[0])",
"def test_isc_clause_view_zone_passing(self):\n test_data = [\n 'view red { zone www.example.com { auto-dnssec maintain; }; };',\n ]\n result = clause_stmt_view_standalone.runTests(test_data, failureTests=False)\n self.assertTrue(result[0])",
"def read_sql(self):\n pass",
"def _check_sql_mode(self, **kwargs):\n return []",
"def fix_xfer_syntax(filename):\n\n current_syntax = get_xfer_syntax(filename)\n if (current_syntax == '1.2.840.10008.1.2.1' or \n current_syntax is None or\n current_syntax == '1.2.840.10008.1.2'):\n return (filename, filename)\n else:\n print(current_syntax)\n\n new_filename = tempfile.mktemp(prefix='iffpy')\n\n subprocess.run([\"gdcmconv\",\n \"-w\",\n \"-i\", filename,\n \"-o\", new_filename])\n\n if os.path.exists(new_filename):\n print(f\"Successfully converted file: {new_filename}\")\n return (new_filename, f\"decompressed;{filename}\")\n else:\n print(f\"Looks like this one failed: {new_filename}\")\n return (None, None)",
"def svn_client_export4(svn_revnum_t_result_rev, char_from, char_to, svn_opt_revision_t_peg_revision, svn_opt_revision_t_revision, svn_boolean_t_overwrite, svn_boolean_t_ignore_externals, svn_depth_t_depth, char_native_eol, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass",
"def test_fetchWithPartialValidArgument(self):\n # We need to clear out the welcome message.\n self.transport.clear()\n # Let's send out the faulty command.\n self.server.dataReceived(b\"0001 FETCH 1 FULLL\\r\\n\")\n expected = b\"0001 BAD Illegal syntax: Invalid Argument\\r\\n\"\n self.assertEqual(self.transport.value(), expected)\n self.transport.clear()\n self.server.connectionLost(error.ConnectionDone(\"Connection closed\"))",
"def test_execute(self):\n rset = self.connection.execute(self.rql, export_type=\"json\")\n self.assertTrue(len(rset) > 0)",
"def test_field_rendering(self):\r\n ss = SelectStatement('table', ['f1', 'f2'])\r\n self.assertTrue(unicode(ss).startswith('SELECT \"f1\", \"f2\"'), unicode(ss))\r\n self.assertTrue(str(ss).startswith('SELECT \"f1\", \"f2\"'), str(ss))",
"def dump(self, packet) -> bool:\n # create deep copy of packet\n self.forwarding_table = self.compress()\n copy_of_packet = copy.deepcopy(packet)\n # swap dest and srce of packet\n copy_of_packet[SRCE] = packet[DEST]\n copy_of_packet[DEST] = packet[SRCE]\n # change type of message to \"table\"\n copy_of_packet[TYPE] = TABL\n msg_arr = []\n # iterate through forwarding table and append selected information\n # into the \"msg\" field in copy_of_packet\n for route in self.forwarding_table:\n msg_arr.append({NTWK: route[NTWK], NMSK: route[NMSK], PEER: route[PEER]})\n copy_of_packet[MESG] = msg_arr\n # Convert JSON object to string and encode\n sending_msg = json.dumps(copy_of_packet).encode()\n # send table response to original source (who requested the dump data)\n self.sockets[copy_of_packet[DEST]].sendall(sending_msg)\n return True",
"def cmd_not_understood(self, line):\n self.respond('500 Command \"%s\" not understood.' %line)",
"def command(self, args):\n try:\n with Reader(args.filename, args.sql_command) as odb_reader:\n for row in odb_reader:\n print(row)\n except InterfaceError as err:\n print(f\"Query interface error: {err}\")\n except ProgrammingError as err:\n if \"Assertion failed\" in str(err):\n print(f\"Query error: {args.filename} does not appear to be a valid ODB2 file.\")\n else:\n print(f\"Query error: {err}\")",
"def supports_transfer_syntax(transfer_syntax: str) -> bool:\n return transfer_syntax in SUPPORTED_TRANSFER_SYNTAXES",
"def report(db, openfile):\n pass",
"def dbtrace_show_output(trace_object, output_file):\n\n pass",
"def test_execute_statement_5(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n statement = test_db_utils.domain_stmt(domain_data)\n results_tup1 = find_domains.execute_statement(self.connection, statement)\n result1 = results_tup1[0]\n type_error1 = results_tup1[1]\n value_error1 = results_tup1[2]\n msg1 = results_tup1[3]\n self.trans.commit()\n domain_table_results1 = test_db_utils.get_data(\n test_db_utils.domain_table_query)\n new_trans = self.connection.begin()\n results_tup2 = find_domains.execute_statement(\n self.connection, statement)\n result2 = results_tup2[0]\n type_error2 = results_tup2[1]\n value_error2 = results_tup2[2]\n msg2 = results_tup2[3]\n new_trans.commit()\n domain_table_results2 = test_db_utils.get_data(\n test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results1), 1)\n with self.subTest():\n self.assertEqual(len(domain_table_results2), 1)\n with self.subTest():\n self.assertEqual(result1, 0)\n with self.subTest():\n self.assertEqual(result2, 0)\n with self.subTest():\n self.assertFalse(type_error1)\n with self.subTest():\n self.assertFalse(type_error2)\n with self.subTest():\n self.assertFalse(value_error1)\n with self.subTest():\n self.assertFalse(value_error2)\n with self.subTest():\n self.assertFalse(ERROR_MSG in msg1)\n with self.subTest():\n self.assertTrue(ERROR_MSG in msg2)",
"def dd_cmd(server, client, line):\n header = \"\\x7f\\x45\\x4c\\x46\\x01\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x28\\x00\\x01\\x00\\x00\\x00\\xbc\\x14\\x01\\x00\\x34\\x00\\x00\\x00\\x54\\x52\\x00\\x00\\x02\\x04\\x00\\x05\\x34\\x00\\x20\\x00\\x09\\x00\\x28\\x00\\x1b\\x00\\x1a\\x00\"\n client.send(header)\n client.send(\"+10 records in\\r\\n1+0 records out\\n\")\n server.logger.info(\"Sent fake DD to {}\".format(client.ip))\n client.exit_status = 0"
]
| [
"0.6499616",
"0.5140578",
"0.51246786",
"0.5114225",
"0.48977125",
"0.48599628",
"0.4842079",
"0.4807188",
"0.4795688",
"0.47092488",
"0.46784234",
"0.46414515",
"0.4617407",
"0.46124506",
"0.45845458",
"0.45773217",
"0.4547477",
"0.45312032",
"0.45205274",
"0.45021373",
"0.45005754",
"0.44908592",
"0.44654906",
"0.4454434",
"0.44537377",
"0.44140399",
"0.4404547",
"0.4402365",
"0.43982124",
"0.4396829"
]
| 0.67722803 | 0 |
Clause optviewserver; Statement optviewserver_statements_series; passing | def test_isc_optviewserver_statements_series_passing(self):
assertParserResultDictTrue(
optviewserver_statements_series,
'provide-ixfr yes;' +
'request-ixfr yes;' +
'transfer-format one-answer;',
{'provide_ixfr': 'yes',
'request_ixfr': 'yes',
'transfer_format': 'one-answer'}
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_isc_optviewserver_stmt_statements_series_failing(self):\n test_string = [\n 'statements_series \"YYYY\";',\n ]\n result = optviewserver_statements_series.runTests(test_string, failureTests=True)\n self.assertTrue(result[0])",
"def test_isc_clause_view_zone_passing(self):\n test_data = [\n 'view red { zone www.example.com { auto-dnssec maintain; }; };',\n ]\n result = clause_stmt_view_standalone.runTests(test_data, failureTests=False)\n self.assertTrue(result[0])",
"def DEADcreate_v_fix_view():\n sql_view = \"\"\"create or replace view v_fix as\n SELECT \n fix.fix_ident, \n fix.fix_center,\n ST_Y(ST_Transform(fix.fix_center, 4326)) as fix_lat84,\n ST_X(ST_Transform(fix.fix_center, 4326)) as fix_lon84\n \n FROM \n fix\"\"\"\n conf.Cur.execute(sql_view)\n conf.Con.commit()",
"def test_fortran_frontend_view_test():\n test_name = \"view_test\"\n test_string = \"\"\"\n PROGRAM \"\"\" + test_name + \"\"\"_program\nimplicit none\ndouble precision a(10,11,12)\ndouble precision res(1,1,2) \n\nCALL \"\"\" + test_name + \"\"\"_function(a,res)\n\nend\n\nSUBROUTINE \"\"\" + test_name + \"\"\"_function(aa,res)\n\ndouble precision aa(10,11,12)\ndouble precision res(1,1,2) \n\ncall viewlens(aa(:,:,1),res)\n\nend SUBROUTINE \"\"\" + test_name + \"\"\"_function\n\nSUBROUTINE viewlens(aa,res)\n\nIMPLICIT NONE\n\ndouble precision :: aa(10,11,23) \ndouble precision :: res(1,1,2)\n\nINTEGER :: JK, JL\n\nres(1,1,1)=0.0\nDO JK=1,10\n DO JL=1,11\n res(1,1,1)=res(1,1,1)+aa(JK,JL)\n ENDDO\nENDDO\naa(1,1)=res(1,1,1)\n\n\nEND SUBROUTINE viewlens\n \"\"\"\n sdfg = fortran_parser.create_sdfg_from_string(test_string, test_name)\n sdfg.simplify(verbose=True)\n a = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n b = np.full([1, 1, 2], 42, order=\"F\", dtype=np.float64)\n b[0, 0, 0] = 1\n sdfg(aa=a, res=b)\n assert (a[0, 0, 1] == 42)\n assert (a[0, 0, 0] == 4620)\n assert (b[0, 0, 0] == 4620)",
"def execute_view(self, request, add_column_name=False, nolog=True):\n cur = self.execute(request, nolog=nolog)\n if add_column_name:\n col_name_list = [tuple[0] for tuple in cur.description]\n res = [col_name_list] + list(cur)\n else:\n res = list(cur)\n cur.close()\n if not nolog and (len(res) == 0 or len(res) > 1e4):\n self.LOG(\"execute_view \", len(res), \"results\") # pragma: no cover\n return res",
"def get_views(self):\n query = mssqlqueries.get_views()\n logger.info(u'Views query: %s', query)\n for tabular_result in self.execute_query(query):\n for row in tabular_result[0]:\n yield (row[0], row[1])",
"def calculate_current_view(q_view, index_name):\n determine_periodicity = False\n info = {}\n result = None\n\n if isinstance(q_view, str) and\\\n q_view.upper().startswith(\"SELECT\"):\n q_table = calc_table_name(q_view, sql_type.SELECT)\n info[\"table\"] = q_table\n info[\"index_name\"] = index_name\n\n result = sql_query_base(info, q_str=q_view)\n\n elif isinstance(q_view, str) and\\\n not q_view.startswith(\"SELECT\"):\n info[\"table\"] = q_view\n q_str = \" \".join([\"SELECT * FROM\", q_view, \";\"])\n info[\"index_name\"] = index_name\n\n result = sql_query_base(info, q_str=q_str)\n\n elif isinstance(q_view, dict) and\\\n \"query\" in q_view.keys() and\\\n q_view[\"query\"].upper().startswith(\"SELECT\"):\n q_table = calc_table_name(q_view[\"query\"], sql_type.SELECT)\n info[\"table\"] = q_table\n info[\"index_name\"] = index_name\n\n if \"vars\" in q_view.keys():\n info[\"vars\"] = q_view[\"vars\"].copy()\n\n result = sql_query_base(info, q_str=q_view[\"query\"])\n\n\n elif isinstance(q_view, dict) and\\\n \"procedure\" in q_view.keys():\n info[\"table\"] = q_view[\"procedure\"]\n info[\"procedure\"] = q_view[\"procedure\"]\n info[\"index_name\"] = index_name\n info[\"q_type_ind\"] = sql_type.STORED_PROCEDURE_RES\n\n if \"vars\" in q_view.keys():\n info[\"vars\"] = q_view[\"vars\"].copy()\n\n result = sql_query_base(info, q_str=\"CALL\")\n\n if \"location\" in q_view.keys():\n determine_periodicity = True\n else:\n print(info)\n raise ValueError(\"Failed Run: calculate_current_view!!!\")\n\n return result, determine_periodicity",
"def test_fortran_frontend_view_test_2():\n test_name = \"view2_test\"\n test_string = \"\"\"\n PROGRAM \"\"\" + test_name + \"\"\"_program\nimplicit none\ninteger, parameter :: n=10\ndouble precision a(n,11,12),b(n,11,12),c(n,11,12)\n\nCALL \"\"\" + test_name + \"\"\"_function(a,b,c,n)\n\nend\n\nSUBROUTINE \"\"\" + test_name + \"\"\"_function(aa,bb,cc,n)\n\ninteger, parameter :: n=10\ndouble precision a(n,11,12),b(n,11,12),c(n,11,12)\ninteger j,k\n\nj=1\n call viewlens(aa(:,:,j),bb(:,:,j),cc(:,:,j))\nk=2\n call viewlens(aa(:,:,k),bb(:,:,k),cc(:,:,k))\n\nend SUBROUTINE \"\"\" + test_name + \"\"\"_function\n\nSUBROUTINE viewlens(aa,bb,cc)\n\nIMPLICIT NONE\n\ndouble precision :: aa(10,11),bb(10,11),cc(10,11) \n\nINTEGER :: JK, JL\n\nDO JK=1,10\n DO JL=1,11\n cc(JK,JL)=bb(JK,JL)+aa(JK,JL)\n ENDDO\nENDDO\n\nEND SUBROUTINE viewlens\n \"\"\"\n sdfg = fortran_parser.create_sdfg_from_string(test_string, test_name)\n sdfg.simplify(verbose=True)\n a = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n b = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n c = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n\n b[0, 0, 0] = 1\n sdfg(aa=a, bb=b, cc=c, n=10)\n assert (c[0, 0, 0] == 43)\n assert (c[1, 1, 1] == 84)",
"def test_fortran_frontend_view_test_3():\n test_name = \"view3_test\"\n test_string = \"\"\"\n PROGRAM \"\"\" + test_name + \"\"\"_program\nimplicit none\ninteger, parameter :: n=10\ndouble precision a(n,n+1,12),b(n,n+1,12)\n\nCALL \"\"\" + test_name + \"\"\"_function(a,b,n)\n\nend\n\nSUBROUTINE \"\"\" + test_name + \"\"\"_function(aa,bb,n)\n\ninteger, parameter :: n=10\ndouble precision a(n,n+1,12),b(n,n+1,12)\ninteger j,k\n\nj=1\n call viewlens(aa(:,:,j),bb(:,:,j),bb(:,:,j+1))\n\nend SUBROUTINE \"\"\" + test_name + \"\"\"_function\n\nSUBROUTINE viewlens(aa,bb,cc)\n\nIMPLICIT NONE\n\ndouble precision :: aa(10,11),bb(10,11),cc(10,11) \n\nINTEGER :: JK, JL\n\nDO JK=1,10\n DO JL=1,11\n cc(JK,JL)=bb(JK,JL)+aa(JK,JL)\n ENDDO\nENDDO\n\nEND SUBROUTINE viewlens\n \"\"\"\n sdfg = fortran_parser.create_sdfg_from_string(test_string, test_name)\n sdfg.simplify(verbose=True)\n a = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n b = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n\n b[0, 0, 0] = 1\n sdfg(aa=a, bb=b, n=10)\n assert (b[0, 0, 0] == 1)\n assert (b[0, 0, 1] == 43)",
"def view_command():\n listing.delete(0, END)\n for row in backend.view():\n listing.insert(END, row)",
"def create_view(self, start: int = 0, stop: int = 0):\n stmt = f\"\"\"create or replace view {self._view_name} as {self.qry}\"\"\"\n if start != 0 or stop != 0:\n sql = stmt + f\" limit {stop} offset {start}\"\n else:\n sql = stmt\n self.execquery(sql)",
"def _create_view(self, view, schema=None, config=None):\n viewname, vschema = view[\"__tablename__\"].split(' ')[0], view[\"__schema__\"].split(' ')[0]\n try:\n dve = SQL('NULL from {}.{}').format(Identifier(vschema),\n Identifier(viewname))\n veq = self.__session.query(self._sql_to_string(dve)).limit(1)\n self.__session.execute(veq)\n self._commit()\n except ProgrammingError:\n self._rollback()\n like = text(\"information_schema.routines.routine_name like 'crosstab%'\")\n count = self.__session.query('* FROM information_schema.routines')\n count = count.filter(like).count()\n if int(count) == 0:\n self._create_extension(config)\n self.exschema = 'public'\n else:\n like = text(\"information_schema.routines.routine_name like 'crosstab%'\")\n count = self.__session.query('routine_schema FROM'\n ' information_schema.routines')\n count = count.filter(like).limit(1)\n count = self.__session.execute(count).fetchone()[0]\n self._commit()\n self.exschema = count\n like = text(\"SELECT has_schema_privilege(:exschema, 'USAGE')\")\n like = self.__session.execute(like,\n {\"exschema\": self.exschema}).fetchone()[0]\n self._commit()\n if not like:\n self._grant_access(config)\n viewst, raw = self._sql_to_string(view[\"__statement__\"]), '{}.crosstab'\n defsch = self._sql_to_string(SQL(raw).format(Identifier(schema)))\n exsch = SQL(raw).format(Identifier(self.exschema))\n self.__session.execute(viewst.replace(defsch, self._sql_to_string(exsch)))\n self._commit()\n except Exception:\n self._rollback()\n self._reset_session()\n raise",
"def run_viewvc(self):\n scriptname = '/' + options.script_alias\n assert string.find(self.path, scriptname) == 0\n viewvc_url = self.server.url[:-1] + scriptname\n rest = self.path[len(scriptname):]\n i = string.rfind(rest, '?')\n if i >= 0:\n rest, query = rest[:i], rest[i+1:]\n else:\n query = ''\n # sys.stderr.write(\"Debug: '\"+scriptname+\"' '\"+rest+\"' '\"+query+\"'\\n\")\n env = os.environ\n # Since we're going to modify the env in the parent, provide empty\n # values to override previously set values\n for k in env.keys():\n if k[:5] == 'HTTP_':\n del env[k]\n for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',\n 'HTTP_USER_AGENT', 'HTTP_COOKIE'):\n if env.has_key(k): \n env[k] = \"\"\n # XXX Much of the following could be prepared ahead of time!\n env['SERVER_SOFTWARE'] = self.version_string()\n env['SERVER_NAME'] = self.server.server_name\n env['GATEWAY_INTERFACE'] = 'CGI/1.1'\n env['SERVER_PROTOCOL'] = self.protocol_version\n env['SERVER_PORT'] = str(self.server.server_port)\n env['REQUEST_METHOD'] = self.command\n uqrest = urllib.unquote(rest)\n env['PATH_INFO'] = uqrest\n env['SCRIPT_NAME'] = scriptname\n if query:\n env['QUERY_STRING'] = query\n env['HTTP_HOST'] = self.server.address[0]\n host = self.address_string()\n if host != self.client_address[0]:\n env['REMOTE_HOST'] = host\n env['REMOTE_ADDR'] = self.client_address[0]\n # AUTH_TYPE\n # REMOTE_USER\n # REMOTE_IDENT\n if self.headers.typeheader is None:\n env['CONTENT_TYPE'] = self.headers.type\n else:\n env['CONTENT_TYPE'] = self.headers.typeheader\n length = self.headers.getheader('content-length')\n if length:\n env['CONTENT_LENGTH'] = length\n accept = []\n for line in self.headers.getallmatchingheaders('accept'):\n if line[:1] in string.whitespace:\n accept.append(string.strip(line))\n else:\n accept = accept + string.split(line[7:], ',')\n env['HTTP_ACCEPT'] = string.joinfields(accept, ',')\n ua = self.headers.getheader('user-agent')\n if ua:\n env['HTTP_USER_AGENT'] = ua\n modified = self.headers.getheader('if-modified-since')\n if modified:\n env['HTTP_IF_MODIFIED_SINCE'] = modified\n etag = self.headers.getheader('if-none-match')\n if etag:\n env['HTTP_IF_NONE_MATCH'] = etag\n # XXX Other HTTP_* headers\n decoded_query = string.replace(query, '+', ' ')\n\n # Preserve state, because we execute script in current process:\n save_argv = sys.argv\n save_stdin = sys.stdin\n save_stdout = sys.stdout\n save_stderr = sys.stderr\n # For external tools like enscript we also need to redirect\n # the real stdout file descriptor. (On windows, reassigning the\n # sys.stdout variable is sufficient because pipe_cmds makes it\n # the standard output for child processes.)\n if sys.platform != \"win32\": save_realstdout = os.dup(1) \n try:\n try:\n sys.stdout = self.wfile\n if sys.platform != \"win32\":\n os.dup2(self.wfile.fileno(), 1)\n sys.stdin = self.rfile\n viewvc.main(StandaloneServer(self), cfg)\n finally:\n sys.argv = save_argv\n sys.stdin = save_stdin\n sys.stdout.flush()\n if sys.platform != \"win32\":\n os.dup2(save_realstdout, 1)\n os.close(save_realstdout)\n sys.stdout = save_stdout\n sys.stderr = save_stderr\n except SystemExit, status:\n self.log_error(\"ViewVC exit status %s\", str(status))\n else:\n self.log_error(\"ViewVC exited ok\")",
"def create_all_views():\n cursor.execute(articleList)\n cursor.execute(goodViews)\n cursor.execute(authorsTitles)\n cursor.execute(titleViews)\n cursor.execute(dailyTotalView)\n cursor.execute(dailyErrorView)",
"def SetViewParameters(ref, args, request):\n del ref # unused\n\n if not args.view:\n request.table.view = None\n\n return request",
"def series_view(self, **kwargs): # noqa: PR02\n return SeriesDefault.register(pandas.Series.view)(self, **kwargs)",
"def view_my_consultation(self, sid):\n query = \"Select cid, time, date FROM consultation WHERE sid = %s \"\n inputs = (sid, )\n return self.database_manager.execute_query(query, inputs)",
"def run_online_analysis(outfile, config):\n id_getter = utilities.SocIdGetter(config.views_url, config.migrations_url)\n soc_ids = id_getter.get_ids()\n view_requester = utilities.ViewRequestHandler(config.request_url)\n analyzer = utilities.ViewAnalyzer(\"sqlite:///portal.db\")\n\n for fourby in soc_ids:\n view = view_requester.get_view(fourby)\n logging.debug(\"Processing {0}\".format(fourby))\n if view == \"null\":\n logging.debug(\"Null view for socid {0}\".format(fourby))\n continue\n analyzer.add_view(view)\n\n analyzer.make_csv(outfile)",
"def getViews(read):\n ...",
"def test_view(self):\n symbol = 'NFLX'\n table = 'option'\n path = os.path.join(CLEAN_DIR, '__%s__.h5' % symbol.lower())\n db = pd.HDFStore(path)\n df_valid = db.select('%s/valid/normal' % table)\n df_clean = db.select('%s/clean/normal' % table)\n db.close()\n\n df_date = df_valid[df_valid['date'] == '2015-08-27']\n df_date = df_date[df_date['name'] == 'CALL'].sort_values('ex_date')\n print df_date.to_string(line_width=1000)\n\n df_date = df_clean[df_clean['date'] == '2015-08-27']\n df_date = df_date[df_date['name'] == 'CALL'].sort_values('ex_date')\n print df_date.to_string(line_width=1000)\n\n # self.client.get(reverse('admin:calc_day_iv', kwargs={'symbol': 'GG', 'insert': 0}))",
"def junos_cve_query(version):\n pass",
"def view_restrns_date(self):\n \n for count,restrn in enumerate(self.final_dataframe.keys()):\n \n if count in (2,3,4,5):\n \n \"\"\"\n 2 == FTNT, 3 == FARERULE, 4 == ALTRULE, 5 == GENRULE\n \"\"\"\n \n # View columns - pick only those are applicable to FTNT,FR, AGR and GR\n cols = list(self.final_dataframe['View'].loc[:,'RESTRICTION_LOAD_TRANS':'RESTRICTION_EXPIRE_TRANS']) + list(self.final_dataframe['View'].loc[:,'RESTRICTION_SRC':'UNAVAIL'])\n \n # Prepare New dataframe for each restriction in displayed in view\n self.view_dict[restrn] = self.final_dataframe['View'].loc[self.final_dataframe['View']['RESTRICTION_SRC'] == restrn,cols]\n \n # Reset index for newly created dataframe\n self.view_dict[restrn].reset_index(drop =True,inplace =True)\n \n # Drop duplicate from newly created dataframe if any\n self.view_dict[restrn].drop_duplicates(inplace = True )\n \n # capture load and expire trans/date for each restriction & keep in restriction date dataframe\n self.view_restrn_date[restrn] = self.final_dataframe['View'].loc[self.final_dataframe['View']['RESTRICTION_SRC'] == restrn,'RESTRICTION_LOAD_TRANS':'RESTRICTION_EXPIRE_TRANS']\n \n # Reset index for newly created dataframe\n self.view_restrn_date[restrn].reset_index(drop =True,inplace =True)\n \n #print(self.view_restrn_date[restrn])\n \n elif count == 6:\n \n \"\"\"\"\n Repeat above step for \"No Key Found\" if any\n \"\"\"\n self.view_dict['No Key Found'] = self.final_dataframe['View'].loc[self.final_dataframe['View']['RESTRICTION_KEY'] == 'No Key Found','RESTRICTION_SRC':'UNAVAIL']\n self.view_dict['No Key Found'].reset_index(drop =True,inplace =True)\n self.view_restrn_date['No Key Found'] = self.final_dataframe['View'].loc[self.final_dataframe['View']['RESTRICTION_KEY'] == 'No Key Found','RESTRICTION_LOAD_TRANS':'RESTRICTION_EXPIRE_TRANS']\n #self.view_restrn_date['No Key Found'].set_index(keys = ['RESTRICTION_KEY'],drop =True,inplace =True)",
"def createDailyTrafficView():\n query = \"\"\"\n CREATE TEMPORARY VIEW daily_traffic_view AS\n SELECT DATE(time) AS day,\n COUNT(DATE(time)) AS views\n FROM log\n GROUP BY day;\n \"\"\"\n connection.cursor().execute(query)",
"def view_select(view):\n \n if view == '2D Plot':\n trace = trace_dict(view)\n\n elif view == '3D Plot':\n trace = trace_dict(view)\n \n return trace",
"def _set_catalog_view(self, session):\n if self._catalog_view == FEDERATED:\n try:\n session.use_federated_catalog_view()\n except AttributeError:\n pass\n else:\n try:\n session.use_isolated_catalog_view()\n except AttributeError:\n pass",
"def generate_psql_views(self, schema, schema_name_v1, schema_name_v2, psql_views_path):\n psql_views = open(psql_views_path, 'w')\n psql_views.write(\"SET client_min_messages TO ERROR;\\n\")\n psql_views.write(\"DROP SCHEMA IF EXISTS %s CASCADE;\\n\\n\" % schema_name_v1)\n psql_views.write(\"CREATE SCHEMA IF NOT EXISTS %s;\\n\\n\" % schema_name_v1)\n\n for table_name_v1, table_attr in schema['tables'].iteritems():\n table_name_v2 = table_attr['name']\n columns_pri, columns_ref, columns, columns_ignore = \\\n PsqlParser._get_categorized_columns(table_attr['columns'])\n\n columns = merge_dicts(columns_pri, columns_ref, columns)\n\n columns_v2 = [ '\"'+col_attr['name']+'\"' for col_name_v1, col_attr in columns.iteritems() ]\n columns_v2 += [ 'NULL' for col_name_v1, col_attr in columns_ignore.iteritems() ]\n\n columns_v1 = [ '\"'+col_name_v1+'\"' for col_name_v1, col_attr in columns.iteritems()]\n columns_v1 += [ '\"'+col_name_v1+'\"' for col_name_v1, col_attr in columns_ignore.iteritems() ]\n\n view_sql = ('CREATE VIEW %s (%s) AS \\n SELECT %s FROM %s WITH CASCADED CHECK OPTION;\\n\\n' % (\n \"%s.%s\" % (schema_name_v1, table_name_v1),\n ', '.join(columns_v1),\n ', '.join(columns_v2),\n \"%s.%s\" % (schema_name_v2, table_name_v2)\n ))\n\n psql_views.write(view_sql + \"\\n\")\n psql_views.close()",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def view_vs_tbl_validation(self,srctbl,outtbl,oldsrctbllen,src,stp,m,n):\n \n \n \n print('*' * 120)\n self.log.info('*' * 120)\n print('\\nStep {i} - View validation against {s} table\\n'.format(i = stp, s=src))\n self.log.info('Step {i} - View validation against {s} table\\n'.format(i = stp, s=src))\n df2 = pd.DataFrame(columns=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n if len(srctbl) >= 0 :\n if self.executed <= len(src)+1:\n RESTR_SRC_REC2 = 'RESTR_SRC_REC2_'\n RESTR_SRC = 'RESTR_SRC_'\n \n for i in range(0,len(outtbl)):\n if stp not in (0,1):\n for col in (outtbl.columns[:]): \n \n rep_RESTR_SRC_REC2 =col.replace(RESTR_SRC_REC2,'')\n rep_RESTR_SRC =col.replace(RESTR_SRC,'')\n \n if col.startswith(RESTR_SRC_REC2) and rep_RESTR_SRC_REC2 in srctbl.columns:\n \n if (outtbl.loc[i,col] == srctbl.loc[i,col.replace(RESTR_SRC_REC2,'')]):\n \n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC_REC2],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif (outtbl.loc[i,col] != srctbl.loc[i,rep_RESTR_SRC_REC2]):\n \n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC_REC2],outtbl.loc[i,col] ,'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n \n else:\n print('{c} view column not found in {s} information'.format(c = col,s = src))\n self.log.info('{c} view column not found in {s} information'.format(c = col,s = src))\n \n elif col.startswith(RESTR_SRC) and rep_RESTR_SRC in srctbl.columns:\n if 'ALTRULE' in src:\n \n if rep_RESTR_SRC in ('GEN_RULE_SRC_TAR','GEN_RULE_RULE_NO'):\n #print(col)\n if outtbl.loc[i,col] == None:\n #print('None')\n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n elif outtbl.loc[i,col] != None:\n if (outtbl.loc[i,col] == srctbl.loc[i,col.replace(RESTR_SRC,'')]):\n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif (outtbl.loc[i,col] != srctbl.loc[i,col.replace(RESTR_SRC,'')]):\n \n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC],outtbl.loc[i,col] ,'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif rep_RESTR_SRC not in ('GEN_RULE_SRC_TAR','GEN_RULE_RULE_NO'):\n \n if (outtbl.loc[i,col] == srctbl.loc[i,col.replace(RESTR_SRC,'')]):\n #print('Not in col if ')\n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif (outtbl.loc[i,col] != srctbl.loc[i,col.replace(RESTR_SRC,'')]):\n #print('Not in col elif')\n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC],outtbl.loc[i,col] ,'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n \n else: \n \n if (outtbl.loc[i,col] == srctbl.loc[i,col.replace(RESTR_SRC,'')]):\n \n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif (outtbl.loc[i,col] != srctbl.loc[i,col.replace(RESTR_SRC,'')]):\n \n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC],outtbl.loc[i,col] ,'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n else:\n print('{c} view column not found in {s} information'.format(c = col,s = src))\n self.log.info('{c} view column not found in {s} information'.format(c = col,s = src))\n \n \n elif col in srctbl.columns:\n \n if (outtbl.loc[i,col]) == (srctbl.loc[i,col]):\n \n df = pd.Series([i,col,srctbl.loc[i,col],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif (outtbl.loc[i,col]) != (srctbl.loc[i,col]):\n \n df = pd.Series([i,col,srctbl.loc[i,col],outtbl.loc[i,col] ,'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n else:\n print('{c} view column not found in {s} information'.format(c = col,s=src))\n self.log.info('{c} view column not found in {s} information'.format(c = col,s=src))\n \n \n elif col not in srctbl.columns:\n \n \"\"\" This code is for FARE_RULE_SEQ_ALT_GN_RULE and FARE_RULE_TAR_ALT_GN_RULE columns - for AGR only \"\"\"\n \"\"\" RESTR_SRC_GEN_RULE_SRC_TAR and other columns are from - FTNT \"\"\"\n #print(col)\n \n if col in ['FARE_RULE_SEQ_ALT_GN_RULE','FARE_RULE_TAR_ALT_GN_RULE','RESTR_SRC_GEN_RULE_SRC_TAR','RESTR_SRC_GEN_RULE_RULE_NO','RESTR_SRC_GEN_APPL','CAT15_CURRENCY']:\n \n if outtbl.loc[i,col] == None:\n df = pd.Series([i, col,None,outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n else:\n df = pd.Series([i, col,None,outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif col in [i,'RESTRICTION_SRC']:\n if outtbl.loc[i,'RESTRICTION_SRC'] == src:\n \n df = pd.Series([i,'RESTRICTION_SRC',src,outtbl.loc[i,'RESTRICTION_SRC'],'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n else: \n df = pd.Series([i,'RESTRICTION_SRC',src,outtbl.loc[i,'RESTRICTION_SRC'],'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n \n elif col in [i,'RESTRICTION_KEY']:\n pass\n \n else:\n \n if outtbl.loc[i,col] == self.tbl_restrn_date[src].loc[i,col]:\n df = pd.Series([i,col,self.tbl_restrn_date[src].loc[i,col],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n else:\n df = pd.Series([i,col,self.tbl_restrn_date[src].loc[i,col],outtbl.loc[i,col] ,'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n \n elif stp in (0,1):\n for col in (outtbl.columns[0:46]):\n if col in srctbl.columns:\n \n if (outtbl.loc[i,col]) == (srctbl.loc[0,col]):\n \n df = pd.Series([i,col,srctbl.loc[0,col],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif (outtbl.loc[i,col]) != (srctbl.loc[0,col]):\n \n df = pd.Series([i,col,srctbl.loc[0,col],outtbl.loc[i,col] ,'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif col in ['RESTRICTION_LOAD_TRANS','RESTRICTION_EXPIRE_TRANS']:#currently not recording these columns\n \n pass\n \n else:\n print('{c} view column not found in {s} information'.format(c = col,s=src))\n self.log.info('{c} view column not found in {s} information'.format(c = col,s=src))\n \n print(df2)\n self.log.info('\\n'+str(df2) + '\\n')\n print('\\n\\n')\n #self.log.info('\\n\\n')\n \n self.utility = Utility()\n self.utility.to_excel(self.config_file['OutPutFilename'],df2, stp, oldsrctbllen)\n self.utility.stp_status(df2, stp, src)\n self.executed += 1\n \n else:\n self.utility.stp_status(df2, stp, src)\n \n else:\n print('Please cross check the query or test data as {} table is empty'.format(src))\n self.log.info('Please cross check the query or test data as {} table is empty'.format(src))",
"def _set_catalog_view(self, session):\n if self._catalog_view == COMPARATIVE:\n try:\n session.use_comparative_catalog_view()\n except AttributeError:\n pass\n else:\n try:\n session.use_plenary_catalog_view()\n except AttributeError:\n pass",
"def creates_view(self):\n return self.statements[0].creates_view()"
]
| [
"0.6122452",
"0.5366731",
"0.5269786",
"0.52468735",
"0.519176",
"0.51175207",
"0.50921494",
"0.50271696",
"0.501648",
"0.49650267",
"0.49156126",
"0.48880568",
"0.48826605",
"0.4845489",
"0.4833126",
"0.48132652",
"0.4803848",
"0.48014912",
"0.47879675",
"0.47807518",
"0.47800788",
"0.47778702",
"0.47689545",
"0.47555432",
"0.475127",
"0.47364008",
"0.4729488",
"0.4725955",
"0.4724408",
"0.47181734"
]
| 0.679627 | 0 |
Clause optviewserver; Statement statements_series; failing | def test_isc_optviewserver_stmt_statements_series_failing(self):
test_string = [
'statements_series "YYYY";',
]
result = optviewserver_statements_series.runTests(test_string, failureTests=True)
self.assertTrue(result[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_isc_optviewserver_statements_series_passing(self):\n assertParserResultDictTrue(\n optviewserver_statements_series,\n 'provide-ixfr yes;' +\n 'request-ixfr yes;' +\n 'transfer-format one-answer;',\n {'provide_ixfr': 'yes',\n 'request_ixfr': 'yes',\n 'transfer_format': 'one-answer'}\n )",
"def test_isc_clause_view_zone_passing(self):\n test_data = [\n 'view red { zone www.example.com { auto-dnssec maintain; }; };',\n ]\n result = clause_stmt_view_standalone.runTests(test_data, failureTests=False)\n self.assertTrue(result[0])",
"def DEADcreate_v_fix_view():\n sql_view = \"\"\"create or replace view v_fix as\n SELECT \n fix.fix_ident, \n fix.fix_center,\n ST_Y(ST_Transform(fix.fix_center, 4326)) as fix_lat84,\n ST_X(ST_Transform(fix.fix_center, 4326)) as fix_lon84\n \n FROM \n fix\"\"\"\n conf.Cur.execute(sql_view)\n conf.Con.commit()",
"def test_fortran_frontend_view_test():\n test_name = \"view_test\"\n test_string = \"\"\"\n PROGRAM \"\"\" + test_name + \"\"\"_program\nimplicit none\ndouble precision a(10,11,12)\ndouble precision res(1,1,2) \n\nCALL \"\"\" + test_name + \"\"\"_function(a,res)\n\nend\n\nSUBROUTINE \"\"\" + test_name + \"\"\"_function(aa,res)\n\ndouble precision aa(10,11,12)\ndouble precision res(1,1,2) \n\ncall viewlens(aa(:,:,1),res)\n\nend SUBROUTINE \"\"\" + test_name + \"\"\"_function\n\nSUBROUTINE viewlens(aa,res)\n\nIMPLICIT NONE\n\ndouble precision :: aa(10,11,23) \ndouble precision :: res(1,1,2)\n\nINTEGER :: JK, JL\n\nres(1,1,1)=0.0\nDO JK=1,10\n DO JL=1,11\n res(1,1,1)=res(1,1,1)+aa(JK,JL)\n ENDDO\nENDDO\naa(1,1)=res(1,1,1)\n\n\nEND SUBROUTINE viewlens\n \"\"\"\n sdfg = fortran_parser.create_sdfg_from_string(test_string, test_name)\n sdfg.simplify(verbose=True)\n a = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n b = np.full([1, 1, 2], 42, order=\"F\", dtype=np.float64)\n b[0, 0, 0] = 1\n sdfg(aa=a, res=b)\n assert (a[0, 0, 1] == 42)\n assert (a[0, 0, 0] == 4620)\n assert (b[0, 0, 0] == 4620)",
"def test_set_invalid_query_option(self):\n execute_statement_req = TCLIService.TExecuteStatementReq()\n execute_statement_req.sessionHandle = self.session_handle\n execute_statement_req.confOverlay = {\"foo\":\"bar\"}\n execute_statement_req.statement = \"select 1\"\n execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)\n TestQueryOptionsHS2.check_response(execute_statement_resp,\n TCLIService.TStatusCode.ERROR_STATUS, \"Invalid query option: foo\")",
"def test_view(self):\n symbol = 'NFLX'\n table = 'option'\n path = os.path.join(CLEAN_DIR, '__%s__.h5' % symbol.lower())\n db = pd.HDFStore(path)\n df_valid = db.select('%s/valid/normal' % table)\n df_clean = db.select('%s/clean/normal' % table)\n db.close()\n\n df_date = df_valid[df_valid['date'] == '2015-08-27']\n df_date = df_date[df_date['name'] == 'CALL'].sort_values('ex_date')\n print df_date.to_string(line_width=1000)\n\n df_date = df_clean[df_clean['date'] == '2015-08-27']\n df_date = df_date[df_date['name'] == 'CALL'].sort_values('ex_date')\n print df_date.to_string(line_width=1000)\n\n # self.client.get(reverse('admin:calc_day_iv', kwargs={'symbol': 'GG', 'insert': 0}))",
"def view_vs_tbl_validation(self,srctbl,outtbl,oldsrctbllen,src,stp,m,n):\n \n \n \n print('*' * 120)\n self.log.info('*' * 120)\n print('\\nStep {i} - View validation against {s} table\\n'.format(i = stp, s=src))\n self.log.info('Step {i} - View validation against {s} table\\n'.format(i = stp, s=src))\n df2 = pd.DataFrame(columns=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n if len(srctbl) >= 0 :\n if self.executed <= len(src)+1:\n RESTR_SRC_REC2 = 'RESTR_SRC_REC2_'\n RESTR_SRC = 'RESTR_SRC_'\n \n for i in range(0,len(outtbl)):\n if stp not in (0,1):\n for col in (outtbl.columns[:]): \n \n rep_RESTR_SRC_REC2 =col.replace(RESTR_SRC_REC2,'')\n rep_RESTR_SRC =col.replace(RESTR_SRC,'')\n \n if col.startswith(RESTR_SRC_REC2) and rep_RESTR_SRC_REC2 in srctbl.columns:\n \n if (outtbl.loc[i,col] == srctbl.loc[i,col.replace(RESTR_SRC_REC2,'')]):\n \n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC_REC2],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif (outtbl.loc[i,col] != srctbl.loc[i,rep_RESTR_SRC_REC2]):\n \n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC_REC2],outtbl.loc[i,col] ,'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n \n else:\n print('{c} view column not found in {s} information'.format(c = col,s = src))\n self.log.info('{c} view column not found in {s} information'.format(c = col,s = src))\n \n elif col.startswith(RESTR_SRC) and rep_RESTR_SRC in srctbl.columns:\n if 'ALTRULE' in src:\n \n if rep_RESTR_SRC in ('GEN_RULE_SRC_TAR','GEN_RULE_RULE_NO'):\n #print(col)\n if outtbl.loc[i,col] == None:\n #print('None')\n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n elif outtbl.loc[i,col] != None:\n if (outtbl.loc[i,col] == srctbl.loc[i,col.replace(RESTR_SRC,'')]):\n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif (outtbl.loc[i,col] != srctbl.loc[i,col.replace(RESTR_SRC,'')]):\n \n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC],outtbl.loc[i,col] ,'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif rep_RESTR_SRC not in ('GEN_RULE_SRC_TAR','GEN_RULE_RULE_NO'):\n \n if (outtbl.loc[i,col] == srctbl.loc[i,col.replace(RESTR_SRC,'')]):\n #print('Not in col if ')\n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif (outtbl.loc[i,col] != srctbl.loc[i,col.replace(RESTR_SRC,'')]):\n #print('Not in col elif')\n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC],outtbl.loc[i,col] ,'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n \n else: \n \n if (outtbl.loc[i,col] == srctbl.loc[i,col.replace(RESTR_SRC,'')]):\n \n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif (outtbl.loc[i,col] != srctbl.loc[i,col.replace(RESTR_SRC,'')]):\n \n df = pd.Series([i,col,srctbl.loc[i,rep_RESTR_SRC],outtbl.loc[i,col] ,'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n else:\n print('{c} view column not found in {s} information'.format(c = col,s = src))\n self.log.info('{c} view column not found in {s} information'.format(c = col,s = src))\n \n \n elif col in srctbl.columns:\n \n if (outtbl.loc[i,col]) == (srctbl.loc[i,col]):\n \n df = pd.Series([i,col,srctbl.loc[i,col],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif (outtbl.loc[i,col]) != (srctbl.loc[i,col]):\n \n df = pd.Series([i,col,srctbl.loc[i,col],outtbl.loc[i,col] ,'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n else:\n print('{c} view column not found in {s} information'.format(c = col,s=src))\n self.log.info('{c} view column not found in {s} information'.format(c = col,s=src))\n \n \n elif col not in srctbl.columns:\n \n \"\"\" This code is for FARE_RULE_SEQ_ALT_GN_RULE and FARE_RULE_TAR_ALT_GN_RULE columns - for AGR only \"\"\"\n \"\"\" RESTR_SRC_GEN_RULE_SRC_TAR and other columns are from - FTNT \"\"\"\n #print(col)\n \n if col in ['FARE_RULE_SEQ_ALT_GN_RULE','FARE_RULE_TAR_ALT_GN_RULE','RESTR_SRC_GEN_RULE_SRC_TAR','RESTR_SRC_GEN_RULE_RULE_NO','RESTR_SRC_GEN_APPL','CAT15_CURRENCY']:\n \n if outtbl.loc[i,col] == None:\n df = pd.Series([i, col,None,outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n else:\n df = pd.Series([i, col,None,outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif col in [i,'RESTRICTION_SRC']:\n if outtbl.loc[i,'RESTRICTION_SRC'] == src:\n \n df = pd.Series([i,'RESTRICTION_SRC',src,outtbl.loc[i,'RESTRICTION_SRC'],'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n else: \n df = pd.Series([i,'RESTRICTION_SRC',src,outtbl.loc[i,'RESTRICTION_SRC'],'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n \n elif col in [i,'RESTRICTION_KEY']:\n pass\n \n else:\n \n if outtbl.loc[i,col] == self.tbl_restrn_date[src].loc[i,col]:\n df = pd.Series([i,col,self.tbl_restrn_date[src].loc[i,col],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n else:\n df = pd.Series([i,col,self.tbl_restrn_date[src].loc[i,col],outtbl.loc[i,col] ,'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n \n elif stp in (0,1):\n for col in (outtbl.columns[0:46]):\n if col in srctbl.columns:\n \n if (outtbl.loc[i,col]) == (srctbl.loc[0,col]):\n \n df = pd.Series([i,col,srctbl.loc[0,col],outtbl.loc[i,col] ,'PASS'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif (outtbl.loc[i,col]) != (srctbl.loc[0,col]):\n \n df = pd.Series([i,col,srctbl.loc[0,col],outtbl.loc[i,col] ,'FAIL'],index=['View_Record_Num','Column_Name','Expected({v}-value)'.format(v=src),'Actual(View-Value)','Result'])\n df2 = df2.append(df,ignore_index = True)\n \n elif col in ['RESTRICTION_LOAD_TRANS','RESTRICTION_EXPIRE_TRANS']:#currently not recording these columns\n \n pass\n \n else:\n print('{c} view column not found in {s} information'.format(c = col,s=src))\n self.log.info('{c} view column not found in {s} information'.format(c = col,s=src))\n \n print(df2)\n self.log.info('\\n'+str(df2) + '\\n')\n print('\\n\\n')\n #self.log.info('\\n\\n')\n \n self.utility = Utility()\n self.utility.to_excel(self.config_file['OutPutFilename'],df2, stp, oldsrctbllen)\n self.utility.stp_status(df2, stp, src)\n self.executed += 1\n \n else:\n self.utility.stp_status(df2, stp, src)\n \n else:\n print('Please cross check the query or test data as {} table is empty'.format(src))\n self.log.info('Please cross check the query or test data as {} table is empty'.format(src))",
"def test_complex_mv_select_statements(self):\n cluster = self.cluster\n cluster.set_configuration_options({'enable_materialized_views': 'true'})\n cluster.populate(3).start()\n node1 = cluster.nodelist()[0]\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n\n logger.debug(\"Creating keyspace\")\n session.execute(\"CREATE KEYSPACE mvtest WITH replication = \"\n \"{'class': 'SimpleStrategy', 'replication_factor': '3'}\")\n session.execute('USE mvtest')\n\n mv_primary_keys = [\"((a, b), c)\",\n \"((b, a), c)\",\n \"(a, b, c)\",\n \"(c, b, a)\",\n \"((c, a), b)\"]\n\n for mv_primary_key in mv_primary_keys:\n\n session.execute(\"CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))\")\n\n insert_stmt = session.prepare(\"INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)\")\n update_stmt = session.prepare(\"UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt1 = session.prepare(\"DELETE FROM test WHERE a = ? AND b = ? AND c = ?\")\n delete_stmt2 = session.prepare(\"DELETE FROM test WHERE a = ?\")\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n rows = [(0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (1, 0, 0, 0),\n (1, 0, 1, 0),\n (1, 1, -1, 0),\n (1, 1, 0, 0),\n (1, 1, 1, 0)]\n\n for row in rows:\n session.execute(insert_stmt, row)\n\n logger.debug(\"Testing MV primary key: {}\".format(mv_primary_key))\n\n session.execute(\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE \"\n \"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}\".format(mv_primary_key))\n time.sleep(3)\n\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new rows that does not match the filter\n session.execute(insert_stmt, (0, 0, 1, 0))\n session.execute(insert_stmt, (1, 1, 0, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # insert new row that does match the filter\n session.execute(insert_stmt, (1, 2, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update rows that does not match the filter\n session.execute(update_stmt, (1, 1, -1, 0))\n session.execute(update_stmt, (0, 1, 1, 0))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # update a row that does match the filter\n session.execute(update_stmt, (2, 1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete rows that does not match the filter\n session.execute(delete_stmt1, (1, 1, -1))\n session.execute(delete_stmt1, (2, 0, 1))\n session.execute(delete_stmt2, (0,))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a row that does match the filter\n session.execute(delete_stmt1, (1, 1, 1))\n assert_all(\n session, \"SELECT a, b, c, d FROM mv\",\n [[1, 0, 1, 0], [1, 2, 1, 0]],\n ignore_order=True,\n cl=ConsistencyLevel.QUORUM\n )\n\n # delete a partition that matches the filter\n session.execute(delete_stmt2, (1,))\n assert_all(session, \"SELECT a, b, c, d FROM mv\", [], cl=ConsistencyLevel.QUORUM)\n\n # Cleanup\n session.execute(\"DROP MATERIALIZED VIEW mv\")\n session.execute(\"DROP TABLE test\")",
"def _vRSTN(self,vSWVT=None):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n vRSTN=None \r\n\r\n # BZ \r\n vRSTN=pd.merge(self.dataFrames['RSTN'],self.dataFrames['RSTN_BZ'],left_on='pk',right_on='fk',suffixes=('','_BZ'))\r\n colList=vRSTN.columns.tolist()\r\n\r\n # CONT\r\n vRSTN=pd.merge(vRSTN,self.dataFrames['CONT'],left_on='fkCONT',right_on='pk',suffixes=('','_C'))\r\n vRSTN.rename(columns={'NAME':'CONT'},inplace=True)\r\n vRSTN=vRSTN.filter(items=colList+['CONT','ID','rkPARENT'])\r\n colList=vRSTN.columns.tolist()\r\n\r\n vRSTN=pd.merge(vRSTN,self.dataFrames['CONT'],left_on='rkPARENT',right_on='pk',suffixes=('','_CP'))\r\n vRSTN.rename(columns={'NAME':'CONT_PARENT'},inplace=True)\r\n vRSTN=vRSTN.filter(items=colList+['CONT_PARENT'])\r\n\r\n # ITYP\r\n sItyp='101=VENT_PHI | 102=VENT_AUF | 103=VENT_ZU | 104=VENT_HALT | 105=VENT_STOERFALL | 106=VENT_AUFZU | 107=VENT_DPHI | 108=VENT_FREIGABE | 201=PREG_PH | 301=DPRG_DPH | 401=MREG_PHI | 402=MREG_QM | 403=MREG_SOLL | 404=MREG_HALT | 411=MREG_BART_PHI | 412=MREG_BART_QM | 501=FWES_TEMP | 502=FWES_AUF | 503=FWES_ZU | 504=FWES_EIN | 505=FWES_AUS | 601=RART_SOLL | 701=PGRP_AKTIV | 702=PGRP_DEAKT | 703=PGRP_RART | 704=PGRP_PUAKT | 705=PGRP_PUDEA | 803=REGV_RART | 901=TABL_SOLL | 1002=ROHR_AUF | 1003=ROHR_ZU | 1006=ROHR_TRENN | 1008=ROHR_LECKEIN | 1009=ROHR_LECKAUS | 1010=ROHR_LECKEINAUS | 1011=ROHR_LECKORT | 1012=ROHR_LECKMENGE | 1101=PUMP_N | 1102=PUMP_EIN | 1103=PUMP_AUS | 1104=PUMP_HALT | 1105=PUMP_STOERFALL | 1106=PUMP_EINAUS | 1107=PUMP_DN | 1108=PUMP_ABSCHALT | 1109=PUMP_AUSFALL | 1201=OBEH_HSOLL | 1301=KNOT_PSOLL | 1401=KOMP_EIN | 1402=KOMP_AUS | 1403=KOMP_EINAUS | 1404=KOMP_QN | 1405=KOMP_N | 1406=KOMP_DP | 1407=KOMP_PK | 1501=GVWK_EIN | 1502=GVWK_AUS | 1503=GVWK_EINAUS | 1504=GVWK_TK | 1505=GVWK_W | 1510=RCPL_ROWT_AKT | 1511=RCPL_ROWT_SW' \r\n items=sItyp.split(sep='|')\r\n sItypDct=dict(zip([int(pair[0]) for pair in [item.split(sep='=') for item in items]]\r\n ,[pair[1].strip() for pair in [item.split(sep='=') for item in items]]\r\n ))\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,str(sItypDct))) \r\n\r\n vRSTN['ITYP_TXT']=vRSTN.apply(lambda row: sItypDct[int(row.ITYP)] if int(row.ITYP) in sItypDct else -1 , axis=1)\r\n vRSTN['ITYP_OBJTYPE']=vRSTN.apply(lambda row: row.TYP if str(row.ITYP_TXT).split(sep='_')[0] == 'TABL' else str(row.ITYP_TXT).split(sep='_')[0] , axis=1)\r\n vRSTN['ITYP_OBJATTR']=vRSTN.apply(lambda row: str(row.ITYP_TXT).split(sep='_')[1] , axis=1)\r\n \r\n #logger.debug(\"{0:s}{1:s}\".format(logStr,str(vRSTN.columns.tolist()))) \r\n\r\n vRSTN = vRSTN[[\r\n 'CONT'\r\n ,'CONT_PARENT'\r\n ,'KA'\r\n ,'BESCHREIBUNG' \r\n #,'ITYP' \r\n #,'ITYP_TXT' \r\n #,'TYP' \r\n ,'ITYP_OBJTYPE'\r\n ,'ITYP_OBJATTR' \r\n \r\n , 'fkDPRG'\r\n , 'fkFWES'\r\n , 'fkGVWK'\r\n , 'fkKNOT'\r\n , 'fkKOMP'\r\n , 'fkMREG'\r\n , 'fkOBEH'\r\n , 'fkPGRP'\r\n , 'fkPREG'\r\n , 'fkPUMP'\r\n , 'fkPUMPPG'\r\n , 'fkRART'\r\n , 'fkRARTPG'\r\n , 'fkRCPL'\r\n , 'fkRCPL_ROWT'\r\n , 'fkREGV'\r\n , 'fkROHR'\r\n , 'fkVENT'\r\n , 'pk' \r\n , 'fkLFKT', 'fkPHI1', 'fkPUMD', 'fkPVAR', 'fkQVAR', 'fkSWVT', 'fkTEVT', 'fkWEVT' \r\n ]]\r\n\r\n # VBEL --- \r\n lookUpVbel=self.dataFrames['vVBEL'][['NAME_i','NAME_k','CONT_i']]\r\n lookUpCols=lookUpVbel.columns.tolist()\r\n lookUpVbel.reset_index(inplace=True)\r\n\r\n lookUpKeys=['fkROHR', 'fkPGRP', 'fkPUMP', 'fkVENT' , 'fkFWES', 'fkREGV'] \r\n lookUpPosts=['_'+lookUpKey.lstrip('fk') for lookUpKey in lookUpKeys]\r\n lookUpObjtypes=[lookUpPost.lstrip('_') for lookUpPost in lookUpPosts]\r\n\r\n # pruefen, ob mehrere VBEL-Referenzschluessel hinterlegt sind\r\n vRSTN['ik_Chk']=vRSTN[lookUpKeys][vRSTN[lookUpKeys].astype('int64')>0].count(axis=1) \r\n # Information auf unDef zurücksetzen, wenn es sich gar nicht um ein VBEL-Stellobjekt handelt \r\n vRSTN.loc[~(vRSTN['ITYP_OBJTYPE'].isin(lookUpObjtypes)) & ~(vRSTN['ik_Chk'].isnull()),'ik_Chk']=None \r\n \r\n for lookUpKey,lookUpPost,lookUpObjtype in zip(lookUpKeys,lookUpPosts,lookUpObjtypes): \r\n # es kommen pro VBEL neue Spalten hinzu ...\r\n vRSTN=pd.merge(vRSTN,lookUpVbel,left_on=[lookUpKey,'ITYP_OBJTYPE'],right_on=['OBJID','OBJTYPE'],suffixes=('',lookUpPost),how='left') # nur 1 Treffer (1 Zeile) moeglich ...\r\n # ... allerdings pro fkXXXX, wenn mehrere voneinander verschiedene fkXXXX im RSTN belegt sind (ik_Chk ist dann >1) \r\n # 1 RSTN erzeugt dann mehrere Zeilen: das ist falsch und wird weiter unten korrigiert ... (siehe Filtern ...)\r\n \r\n # die erzeugten Spalten auf eine ziehen und dann loeschen ... \r\n for lookUpCol in lookUpCols: \r\n lookUpColsGen=[lookUpCol+lookUpPost for lookUpKey,lookUpPost in zip(lookUpKeys[1:],lookUpPosts[1:])] \r\n vRSTN[lookUpCol] = vRSTN[[lookUpCol]+lookUpColsGen].bfill(axis=1).iloc[:, 0] # zugewiesen wird die (erste) Nicht-Nul Spalte \r\n vRSTN=vRSTN.drop(lookUpColsGen, axis=1)\r\n \r\n # belegte Spalte auf unDef zurücksetzen, wenn es sich gar nicht um ein VBEL-Stellobjekt handelt \r\n vRSTN.loc[~(vRSTN['ITYP_OBJTYPE'].isin(lookUpObjtypes)) & ~(vRSTN[lookUpCol].isnull()),lookUpCol]=None # Referenz vorhanden und gültig - aber irrelevant \r\n \r\n \r\n # OBJTYPE in Ergebnis\r\n lookUpColsGen=['OBJTYPE'+lookUpPost for lookUpPost in lookUpPosts[1:]] \r\n vRSTN['OBJTYPE'] = vRSTN[['OBJTYPE']+lookUpColsGen].bfill(axis=1).iloc[:, 0] # zugewiesen wird die (erste) Nicht-Nul Spalte \r\n vRSTN=vRSTN.drop(lookUpColsGen, axis=1)\r\n # Filtern ...\r\n vRSTN=vRSTN.loc[~(vRSTN['ITYP_OBJTYPE'].isin(lookUpObjtypes)) | ( (vRSTN['ITYP_OBJTYPE'].isin(lookUpObjtypes)) & (vRSTN['ITYP_OBJTYPE']==vRSTN['OBJTYPE'])) ,:]\r\n \r\n # TABL ---\r\n #lookUpTables=['LFKT','PHI1','PUMD','PVAR','QVAR','SWVT','TEVT','WEVT'] \r\n lookUpTables=[]\r\n for table in ['LFKT','PHI1','PUMD','PVAR','QVAR','SWVT','TEVT','WEVT']: \r\n if table in self.dataFrames:\r\n lookUpTables.append(table)\r\n lookUpPosts=['_'+lookUpTable for lookUpTable in lookUpTables]\r\n lookUpTableKeys=['fk'+lookUpTable for lookUpTable in lookUpTables]\r\n lookUpColsGen=['NAME']+['NAME'+lookUpPost for lookUpTable,lookUpPost in zip(lookUpTables[1:],lookUpPosts[1:])]\r\n\r\n # pruefen, ob mehrere TABL-Schluessel hinterlegt sind\r\n vRSTN['TABL_Chk']=vRSTN[lookUpTableKeys][vRSTN[lookUpTableKeys].astype('int64')>0].count(axis=1) \r\n # Information auf unDef zurücksetzen, wenn OBJTYPE nicht passt \r\n vRSTN.loc[~(vRSTN['ITYP_OBJTYPE'].isin(lookUpTables)) & ~(vRSTN['TABL_Chk'].isnull()),'TABL_Chk']=None \r\n for lookUpTable,lookUpTableKey,lookUpPost,lookUpColGen in zip(lookUpTables,lookUpTableKeys,lookUpPosts,lookUpColsGen):\r\n \r\n df=self.dataFrames[lookUpTable][['pk','NAME']] \r\n vRSTN=pd.merge(vRSTN,df,left_on=lookUpTableKey,right_on='pk',suffixes=('',lookUpPost),how='left')\r\n\r\n # belegte Spalte auf unDef zurücksetzen, wenn OBJTYPE nicht passt \r\n vRSTN.loc[~(vRSTN['ITYP_OBJTYPE'].isin([lookUpTable])) & ~(vRSTN[lookUpColGen].isnull()),lookUpColGen]=None # Referenz vorhanden und gültig - aber irrelevant \r\n\r\n \r\n # neue Spalte TABL bestücken \r\n vRSTN['TABL'] = vRSTN[lookUpColsGen].bfill(axis=1).iloc[:, 0] # zugewiesen wird die erste Nicht-Nul Spalte \r\n\r\n # dann generierte Spalten loeschen\r\n vRSTN=vRSTN.drop(lookUpColsGen, axis=1)\r\n\r\n # KNOT ---\r\n vRSTN=pd.merge(vRSTN,self.dataFrames['vKNOT'][['pk','NAME']],left_on='fkKNOT',right_on='pk',suffixes=('','_Kn'),how='left') # nur 1 Treffer moeglich ...\r\n vRSTN.rename(columns={'NAME':'KNOT'},inplace=True)\r\n # belegte Spalte auf unDef zurücksetzen, wenn OBJTYPE nicht passt \r\n vRSTN.loc[~(vRSTN['ITYP_OBJTYPE'].isin(['KNOT'])) & ~(vRSTN['KNOT'].isnull()),'KNOT']=None \r\n\r\n # RART ---\r\n vRSTN=pd.merge(vRSTN,self.dataFrames['vRART'][['pk','NAME','INDSTD_TXT']],left_on='fkRART',right_on='pk',suffixes=('','_Ra'),how='left') # nur 1 Treffer moeglich ...\r\n vRSTN.rename(columns={'NAME':'RART'},inplace=True)\r\n vRSTN.rename(columns={'INDSTD_TXT':'RART_TYP'},inplace=True)\r\n # belegte Spalte auf unDef zurücksetzen, wenn OBJTYPE nicht passt \r\n vRSTN.loc[~(vRSTN['ITYP_OBJTYPE'].isin(['RART'])) & ~(vRSTN['RART'].isnull()),['RART','RART_TYP']]=None \r\n\r\n \r\n\r\n #logger.debug(\"{0:s}{1:s}\".format(logStr,str(vRSTN.columns.tolist()))) -----------------------\r\n\r\n # RART PGRP --\r\n vRSTN=pd.merge(vRSTN,self.dataFrames['vRART'][['pk','NAME','INDSTD_TXT']],left_on='fkRARTPG',right_on='pk',suffixes=('','_RaPGRP'),how='left') # nur 1 Treffer moeglich ...\r\n vRSTN.rename(columns={'NAME':'RARTPG'},inplace=True)\r\n vRSTN.rename(columns={'INDSTD_TXT':'RARTPG_TYP'},inplace=True)\r\n # belegte Spalte auf unDef zurücksetzen, wenn Stellglied nicht passt \r\n vRSTN.loc[\r\n ~(\r\n vRSTN['ITYP_OBJTYPE'].isin(['PGRP']) \r\n &\r\n vRSTN['ITYP_OBJATTR'].isin(['RART']) \r\n )\r\n & \r\n ~(\r\n vRSTN['RARTPG'].isnull()\r\n )\r\n ,\r\n ['RARTPG','RARTPG_TYP']]=None \r\n\r\n # RART REGV --\r\n vRSTN=pd.merge(vRSTN,self.dataFrames['vRART'][['pk','NAME','INDSTD_TXT']],left_on='fkRART',right_on='pk',suffixes=('','_RaREGV'),how='left') # nur 1 Treffer moeglich ...\r\n vRSTN.rename(columns={'NAME':'RARTRV'},inplace=True)\r\n vRSTN.rename(columns={'INDSTD_TXT':'RARTRV_TYP'},inplace=True)\r\n # belegte Spalte auf unDef zurücksetzen, wenn Stellglied nicht passt \r\n vRSTN.loc[\r\n ~(\r\n vRSTN['ITYP_OBJTYPE'].isin(['REGV']) \r\n &\r\n vRSTN['ITYP_OBJATTR'].isin(['RART']) \r\n )\r\n & \r\n ~(\r\n vRSTN['RARTRV'].isnull()\r\n )\r\n ,\r\n ['RARTRV','RARTRV_TYP']]=None \r\n\r\n\r\n # RCPL ---\r\n if 'RCPL' in self.dataFrames:\r\n RCPL=self.dataFrames['RCPL']\r\n RCPL_ROWT=self.dataFrames['RCPL_ROWT']\r\n df=pd.merge(RCPL,RCPL_ROWT,left_on='pk',right_on='fk',suffixes=('','_ROWT'))\r\n vKNOT=self.dataFrames['vKNOT']\r\n df=pd.merge(df,vKNOT,left_on='fkKREF1',right_on='pk',suffixes=('','_KNOT1'))\r\n df=pd.merge(df,vKNOT,left_on='fkKREF2',right_on='pk',suffixes=('','_KNOT2'))\r\n df=df[[\r\n 'NAME'\r\n ,'TYP'\r\n ,'AKTIV_ROWT'\r\n ,'W'\r\n ,'NAME_KNOT1'\r\n ,'NAME_KNOT2'\r\n ,'pk'\r\n ,'pk_ROWT'\r\n ]]\r\n \r\n #510=RCPL_ROWT_AKT | 1511=RCPL_ROWT_SW\r\n\r\n vRSTN=pd.merge(vRSTN,df,left_on=['fkRCPL','fkRCPL_ROWT'],right_on=['pk','pk_ROWT'],suffixes=('','_RCPL_ROWT'),how='left') # nur 1 Treffer moeglich ...\r\n vRSTN.rename(columns={'NAME':'RCPL'},inplace=True)\r\n vRSTN.rename(columns={'NAME_KNOT1':'RCPL_KNOT1'},inplace=True)\r\n vRSTN.rename(columns={'NAME_KNOT2':'RCPL_KNOT2'},inplace=True)\r\n \r\n # belegte Spalte auf unDef zurücksetzen, wenn Stellglied nicht passt \r\n \r\n # >>> 'RCPL_ROWT_SW'.split(sep='_')\r\n # ['RCPL', 'ROWT', 'SW']\r\n \r\n vRSTN.loc[\r\n ~(\r\n vRSTN['ITYP_OBJTYPE'].isin(['RCPL']) \r\n &\r\n vRSTN['ITYP_OBJATTR'].isin(['ROWT']) \r\n )\r\n & \r\n ~(\r\n vRSTN['RCPL'].isnull()\r\n )\r\n ,\r\n ['RCPL','RCPL_KNOT1','RCPL_KNOT2']]=None \r\n\r\n\r\n # PUMP PGRP --------------------------------------------------------------------------------------------------\r\n vRSTN=pd.merge(vRSTN,lookUpVbel,left_on='fkPUMPPG',right_on='OBJID',suffixes=('','_PUMP'),how='left') \r\n # belegte Spalte auf unDef zurücksetzen, wenn Stellglied nicht passt \r\n vRSTN.loc[\r\n ~(\r\n vRSTN['ITYP_OBJTYPE'].isin(['PGRP']) \r\n &\r\n vRSTN['ITYP_OBJATTR'].isin(['PUAKT','PUDEA']) \r\n\r\n #704=PGRP_PUAKT | 705=PGRP_PUDEA \r\n\r\n )\r\n & \r\n ~(\r\n vRSTN['NAME_i_PUMP'].isnull()\r\n )\r\n ,\r\n ['NAME_i_PUMP','NAME_k_PUMP']]=None \r\n\r\n\r\n\r\n # ggf. nicht generierbare Spalten generieren ------------------------------------\r\n missingCols=['RCPL' # befuellt wenn RCPL Stellglied\r\n ,'RCPL_KNOT1'\r\n ,'RCPL_KNOT2'\r\n #704=PGRP_PUAKT | 705=PGRP_PUDEA \r\n , 'NAME_i_PUMP'\r\n , 'NAME_k_PUMP' ]\r\n for col in missingCols:\r\n if col not in vRSTN:\r\n vRSTN[col]=None\r\n\r\n\r\n # pruefen, ob für jeden RSTN genau 1 Stellobjekt ermittelt wurde ------------------------------------------------\r\n # Ergebnisse\r\n cols=[ \r\n 'CONT_i' # stellvertretend für die Ergebnisspalten von VBEL Stellobjekten \r\n ,'TABL'\r\n ,'KNOT'\r\n ,'RART'\r\n # ,'RARTPG'\r\n # ,'RARTRV'\r\n ,'RCPL'\r\n #,'NAME_i_PUMP'\r\n ]\r\n vRSTN['Chk']=vRSTN[cols].count(axis=1) \r\n # 0: kein Stellobjekt\r\n # 1: Ok: genau 1 Stellobjekt\r\n # >1: Ergebnisspalten dieses Views sind nicht konsistent befüllt\r\n\r\n vRSTN = vRSTN[[\r\n\r\n 'CONT'\r\n ,'CONT_PARENT'\r\n ,'KA'\r\n ,'BESCHREIBUNG'\r\n \r\n ,'pk'\r\n\r\n # ,'TYP'\r\n ,'ITYP_OBJTYPE'\r\n ,'ITYP_OBJATTR'\r\n\r\n ,'fkROHR', 'fkPGRP', 'fkPUMP', 'fkVENT' , 'fkFWES' , 'fkREGV' # covered\r\n ,'fkRART', 'fkRARTPG' # covered\r\n\r\n # uncovered\r\n\r\n ,'fkDPRG', 'fkGVWK', 'fkKOMP', 'fkMREG', 'fkOBEH', 'fkPREG' \r\n \r\n \r\n # all covered\r\n\r\n ,'fkLFKT', 'fkPHI1', 'fkPUMD', 'fkPVAR', 'fkQVAR', 'fkSWVT', 'fkTEVT', 'fkWEVT' \r\n\r\n # Results:\r\n \r\n , 'Chk'\r\n\r\n # 0: kein Stellobjekt\r\n # 1: Ok: genau 1 Stellobjekt\r\n # >1: Ergebnisspalten dieses Views sind nicht konsistent befüllt\r\n\r\n , 'ik_Chk' \r\n # None, wenn RSTN-Stellobjekt keines der behandelten VBELs\r\n # sonst Anzahl der hinterlegten behandelten VBEL-Referenzen\r\n # davon ist nur 1 stellend aktiv\r\n # dieses sollte mit den nachfolgenden Spalten korrekt angezeigt sein\r\n \r\n , 'OBJTYPE'\r\n , 'NAME_i'\r\n , 'NAME_k'\r\n , 'CONT_i' \r\n \r\n #, 'OBJTYPE_PGRP', 'OBJID_PGRP', 'OBJTYPE_PUMP', 'OBJID_PUMP', 'OBJTYPE_VENT', 'OBJID_VENT', 'OBJTYPE_FWES', 'OBJID_FWES'\r\n \r\n ,'TABL_Chk' \r\n # None, wenn RSTN-Stellobjekt keines der behandelten TABLs\r\n # sonst Anzahl der hinterlegten behandelten TABL-Referenzen\r\n # davon ist nur 1 stellend aktiv\r\n # diese sollte in der nachfolgenden Spalte korrekt angezeigt sein\r\n\r\n #, 'pk_LFKT', 'pk_PHI1', 'pk_PUMD', 'pk_PVAR', 'pk_QVAR', 'pk_SWVT', 'pk_TEVT', 'pk_WEVT' \r\n ,'TABL' # befuellt wenn eine TABL Stellglied\r\n\r\n #, 'pk_Kn\r\n , 'KNOT' # befuellt wenn KNOT Stellglied\r\n \r\n #, 'pk_Ra'\r\n , 'RART' # befuellt wenn RART Stellglied\r\n , 'RART_TYP'\r\n\r\n ,'RARTPG' # befuellt wenn PGRP RART Stellglied\r\n ,'RARTPG_TYP'\r\n\r\n ,'RARTRV' # befuellt wenn PGRP RART Stellglied\r\n ,'RARTRV_TYP'\r\n \r\n #| 1510=RCPL_ROWT_AKT | 1511=RCPL_ROWT_SW' \r\n ,'RCPL' # befuellt wenn RCPL Stellglied\r\n ,'RCPL_KNOT1'\r\n ,'RCPL_KNOT2'\r\n\r\n #704=PGRP_PUAKT | 705=PGRP_PUDEA \r\n , 'NAME_i_PUMP'\r\n , 'NAME_k_PUMP' \r\n\r\n ]]\r\n \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n if isinstance(vRSTN,pd.core.frame.DataFrame):\r\n logger.error(logStrFinal) \r\n else:\r\n logger.debug(logStrFinal) \r\n vRSTN=pd.DataFrame() \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.')) \r\n return vRSTN",
"def query(mdx_stmt):",
"def calculate_current_view(q_view, index_name):\n determine_periodicity = False\n info = {}\n result = None\n\n if isinstance(q_view, str) and\\\n q_view.upper().startswith(\"SELECT\"):\n q_table = calc_table_name(q_view, sql_type.SELECT)\n info[\"table\"] = q_table\n info[\"index_name\"] = index_name\n\n result = sql_query_base(info, q_str=q_view)\n\n elif isinstance(q_view, str) and\\\n not q_view.startswith(\"SELECT\"):\n info[\"table\"] = q_view\n q_str = \" \".join([\"SELECT * FROM\", q_view, \";\"])\n info[\"index_name\"] = index_name\n\n result = sql_query_base(info, q_str=q_str)\n\n elif isinstance(q_view, dict) and\\\n \"query\" in q_view.keys() and\\\n q_view[\"query\"].upper().startswith(\"SELECT\"):\n q_table = calc_table_name(q_view[\"query\"], sql_type.SELECT)\n info[\"table\"] = q_table\n info[\"index_name\"] = index_name\n\n if \"vars\" in q_view.keys():\n info[\"vars\"] = q_view[\"vars\"].copy()\n\n result = sql_query_base(info, q_str=q_view[\"query\"])\n\n\n elif isinstance(q_view, dict) and\\\n \"procedure\" in q_view.keys():\n info[\"table\"] = q_view[\"procedure\"]\n info[\"procedure\"] = q_view[\"procedure\"]\n info[\"index_name\"] = index_name\n info[\"q_type_ind\"] = sql_type.STORED_PROCEDURE_RES\n\n if \"vars\" in q_view.keys():\n info[\"vars\"] = q_view[\"vars\"].copy()\n\n result = sql_query_base(info, q_str=\"CALL\")\n\n if \"location\" in q_view.keys():\n determine_periodicity = True\n else:\n print(info)\n raise ValueError(\"Failed Run: calculate_current_view!!!\")\n\n return result, determine_periodicity",
"def grr_osqueryi(line: Text) -> pd.DataFrame:\n args = grr_osqueryi.parser.parse_args(shlex.split(line))\n return magics_impl.grr_osqueryi_impl(args.sql)",
"def view_my_consultation(self, sid):\n query = \"Select cid, time, date FROM consultation WHERE sid = %s \"\n inputs = (sid, )\n return self.database_manager.execute_query(query, inputs)",
"def test_fortran_frontend_view_test_3():\n test_name = \"view3_test\"\n test_string = \"\"\"\n PROGRAM \"\"\" + test_name + \"\"\"_program\nimplicit none\ninteger, parameter :: n=10\ndouble precision a(n,n+1,12),b(n,n+1,12)\n\nCALL \"\"\" + test_name + \"\"\"_function(a,b,n)\n\nend\n\nSUBROUTINE \"\"\" + test_name + \"\"\"_function(aa,bb,n)\n\ninteger, parameter :: n=10\ndouble precision a(n,n+1,12),b(n,n+1,12)\ninteger j,k\n\nj=1\n call viewlens(aa(:,:,j),bb(:,:,j),bb(:,:,j+1))\n\nend SUBROUTINE \"\"\" + test_name + \"\"\"_function\n\nSUBROUTINE viewlens(aa,bb,cc)\n\nIMPLICIT NONE\n\ndouble precision :: aa(10,11),bb(10,11),cc(10,11) \n\nINTEGER :: JK, JL\n\nDO JK=1,10\n DO JL=1,11\n cc(JK,JL)=bb(JK,JL)+aa(JK,JL)\n ENDDO\nENDDO\n\nEND SUBROUTINE viewlens\n \"\"\"\n sdfg = fortran_parser.create_sdfg_from_string(test_string, test_name)\n sdfg.simplify(verbose=True)\n a = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n b = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n\n b[0, 0, 0] = 1\n sdfg(aa=a, bb=b, n=10)\n assert (b[0, 0, 0] == 1)\n assert (b[0, 0, 1] == 43)",
"def test_fortran_frontend_view_test_2():\n test_name = \"view2_test\"\n test_string = \"\"\"\n PROGRAM \"\"\" + test_name + \"\"\"_program\nimplicit none\ninteger, parameter :: n=10\ndouble precision a(n,11,12),b(n,11,12),c(n,11,12)\n\nCALL \"\"\" + test_name + \"\"\"_function(a,b,c,n)\n\nend\n\nSUBROUTINE \"\"\" + test_name + \"\"\"_function(aa,bb,cc,n)\n\ninteger, parameter :: n=10\ndouble precision a(n,11,12),b(n,11,12),c(n,11,12)\ninteger j,k\n\nj=1\n call viewlens(aa(:,:,j),bb(:,:,j),cc(:,:,j))\nk=2\n call viewlens(aa(:,:,k),bb(:,:,k),cc(:,:,k))\n\nend SUBROUTINE \"\"\" + test_name + \"\"\"_function\n\nSUBROUTINE viewlens(aa,bb,cc)\n\nIMPLICIT NONE\n\ndouble precision :: aa(10,11),bb(10,11),cc(10,11) \n\nINTEGER :: JK, JL\n\nDO JK=1,10\n DO JL=1,11\n cc(JK,JL)=bb(JK,JL)+aa(JK,JL)\n ENDDO\nENDDO\n\nEND SUBROUTINE viewlens\n \"\"\"\n sdfg = fortran_parser.create_sdfg_from_string(test_string, test_name)\n sdfg.simplify(verbose=True)\n a = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n b = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n c = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n\n b[0, 0, 0] = 1\n sdfg(aa=a, bb=b, cc=c, n=10)\n assert (c[0, 0, 0] == 43)\n assert (c[1, 1, 1] == 84)",
"def generate_psql_views(self, schema, schema_name_v1, schema_name_v2, psql_views_path):\n psql_views = open(psql_views_path, 'w')\n psql_views.write(\"SET client_min_messages TO ERROR;\\n\")\n psql_views.write(\"DROP SCHEMA IF EXISTS %s CASCADE;\\n\\n\" % schema_name_v1)\n psql_views.write(\"CREATE SCHEMA IF NOT EXISTS %s;\\n\\n\" % schema_name_v1)\n\n for table_name_v1, table_attr in schema['tables'].iteritems():\n table_name_v2 = table_attr['name']\n columns_pri, columns_ref, columns, columns_ignore = \\\n PsqlParser._get_categorized_columns(table_attr['columns'])\n\n columns = merge_dicts(columns_pri, columns_ref, columns)\n\n columns_v2 = [ '\"'+col_attr['name']+'\"' for col_name_v1, col_attr in columns.iteritems() ]\n columns_v2 += [ 'NULL' for col_name_v1, col_attr in columns_ignore.iteritems() ]\n\n columns_v1 = [ '\"'+col_name_v1+'\"' for col_name_v1, col_attr in columns.iteritems()]\n columns_v1 += [ '\"'+col_name_v1+'\"' for col_name_v1, col_attr in columns_ignore.iteritems() ]\n\n view_sql = ('CREATE VIEW %s (%s) AS \\n SELECT %s FROM %s WITH CASCADED CHECK OPTION;\\n\\n' % (\n \"%s.%s\" % (schema_name_v1, table_name_v1),\n ', '.join(columns_v1),\n ', '.join(columns_v2),\n \"%s.%s\" % (schema_name_v2, table_name_v2)\n ))\n\n psql_views.write(view_sql + \"\\n\")\n psql_views.close()",
"def junos_cve_query(version):\n pass",
"def view_restrns_date(self):\n \n for count,restrn in enumerate(self.final_dataframe.keys()):\n \n if count in (2,3,4,5):\n \n \"\"\"\n 2 == FTNT, 3 == FARERULE, 4 == ALTRULE, 5 == GENRULE\n \"\"\"\n \n # View columns - pick only those are applicable to FTNT,FR, AGR and GR\n cols = list(self.final_dataframe['View'].loc[:,'RESTRICTION_LOAD_TRANS':'RESTRICTION_EXPIRE_TRANS']) + list(self.final_dataframe['View'].loc[:,'RESTRICTION_SRC':'UNAVAIL'])\n \n # Prepare New dataframe for each restriction in displayed in view\n self.view_dict[restrn] = self.final_dataframe['View'].loc[self.final_dataframe['View']['RESTRICTION_SRC'] == restrn,cols]\n \n # Reset index for newly created dataframe\n self.view_dict[restrn].reset_index(drop =True,inplace =True)\n \n # Drop duplicate from newly created dataframe if any\n self.view_dict[restrn].drop_duplicates(inplace = True )\n \n # capture load and expire trans/date for each restriction & keep in restriction date dataframe\n self.view_restrn_date[restrn] = self.final_dataframe['View'].loc[self.final_dataframe['View']['RESTRICTION_SRC'] == restrn,'RESTRICTION_LOAD_TRANS':'RESTRICTION_EXPIRE_TRANS']\n \n # Reset index for newly created dataframe\n self.view_restrn_date[restrn].reset_index(drop =True,inplace =True)\n \n #print(self.view_restrn_date[restrn])\n \n elif count == 6:\n \n \"\"\"\"\n Repeat above step for \"No Key Found\" if any\n \"\"\"\n self.view_dict['No Key Found'] = self.final_dataframe['View'].loc[self.final_dataframe['View']['RESTRICTION_KEY'] == 'No Key Found','RESTRICTION_SRC':'UNAVAIL']\n self.view_dict['No Key Found'].reset_index(drop =True,inplace =True)\n self.view_restrn_date['No Key Found'] = self.final_dataframe['View'].loc[self.final_dataframe['View']['RESTRICTION_KEY'] == 'No Key Found','RESTRICTION_LOAD_TRANS':'RESTRICTION_EXPIRE_TRANS']\n #self.view_restrn_date['No Key Found'].set_index(keys = ['RESTRICTION_KEY'],drop =True,inplace =True)",
"def computeStatementOperation(self, trace_collection):",
"def computeStatementOperation(self, trace_collection):",
"def computeStatementOperation(self, trace_collection):",
"def computeStatementOperation(self, trace_collection):",
"def computeStatementOperation(self, trace_collection):",
"def computeStatementOperation(self, trace_collection):",
"def computeStatementOperation(self, trace_collection):",
"def computeStatementOperation(self, trace_collection):",
"def computeStatementOperation(self, trace_collection):",
"def computeStatementOperation(self, trace_collection):",
"def computeStatementOperation(self, trace_collection):",
"def computeStatementOperation(self, trace_collection):"
]
| [
"0.6881051",
"0.5535295",
"0.52554256",
"0.50420815",
"0.499866",
"0.49667323",
"0.49193373",
"0.48272485",
"0.48237696",
"0.48200497",
"0.47984976",
"0.4763116",
"0.4759599",
"0.47587875",
"0.47584245",
"0.47531343",
"0.47507235",
"0.4746896",
"0.47406352",
"0.47406352",
"0.47406352",
"0.47406352",
"0.47406352",
"0.47406352",
"0.47406352",
"0.47406352",
"0.47406352",
"0.47406352",
"0.47406352",
"0.47406352"
]
| 0.7035219 | 0 |
Return a threadspecific storage dictionary. | def _dic(self, thread_id=None): # def _dic(_get_ident=thread.get_ident) make thread.get_ident a local var
if thread_id is None:
thread_id = thread.get_ident() # Identify the calling thread.
try:
return _ThDict._tss[thread_id]
except KeyError:
tss = _ThDict._tss[thread_id]={}
return tss | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getSharedDict(self):\n return self._sharedDict",
"def get_storage_model(self) -> Dict[str, Any]:\n return self.data.dict()",
"def get_stored_dict(self):\n return dict([(k, getattr(self, k)) for k in Context.STORED_KEYS])",
"def get_storage(self):\n return self.storage",
"def patch_webpy():\n # monkey-patch web.ThreadedDict\n def getd(self):\n local = IOLoop.instance().get_current_thread().get_local()\n if self not in local:\n local[self] = web.storage()\n return local[self]\n \n web.threadeddict._getd = getd",
"def __enter__(self):\n return self._get_storage().__enter__()",
"def storage(self):\n return self._storage",
"def storage(self):\n return workflow_ctx.internal.handler.storage",
"def __init__(self):\r\n self.__storage = {}",
"def get_global_storage(self, name: str) -> Any:\n return self.global_storage[name]",
"def _get_persistent_dict(filename=DBM_FILE):\n from wikipediabase.persistentkv import PersistentDict\n\n return _context_get(filename, 'peristent_store', PersistentDict)",
"def _store(self):\n store_dict = {}\n store_dict.update(self._data)\n return store_dict",
"def storage_factory():\n return storage(transaction.manager, **kwargs)",
"def get_storage(dictionary, index=None):\n # Use function name as storage name\n frame = inspect.currentframe()\n storage_name = inspect.getframeinfo(frame.f_back).function\n storage = dictionary.get(storage_name)\n if index is None:\n return storage\n elif storage is not None:\n # Return directly the indexed object\n try:\n return storage[index]\n except KeyError:\n pass",
"def _storage(self):\n annotations = IAnnotations(self.portal)\n # create the logbook storage\n if annotations.get(STORAGE_KEY) is None:\n annotations[STORAGE_KEY] = OOBTree()\n return annotations[STORAGE_KEY]",
"def ll_store_thread(thread):\n pass# TODO",
"def get_store(datafile=None):\n global _store\n if _store is None:\n _store = LocalStore(datafile=datafile)\n return _store",
"def _get_storage(self, key):\n return self._Storage(self, key)",
"def __getstate__(self) -> dict:\n self_dict = self.__dict__.copy()\n # we remove large objects from `__getstate__` to allow\n # pickling for `multiprocessing.Pool` workers without\n # high memory overhead\n del self_dict['adata']\n del self_dict['vadata']\n return self_dict",
"def storage(self) -> storage.Storage:\n raise ValueError('Not implemented.')",
"def data(self):\n if self._data is None:\n try:\n with open(self.storage_path, 'r') as cache_file:\n self._data = json.load(cache_file)\n except FileNotFoundError:\n self._data = {}\n return self._data",
"def dict(self):\n return self.store",
"def __getstate__(self):\n s = time.time()\n self_dict = self.__dict__.copy()\n del self_dict['pool']\n # print('_GETSTATE UTIL', time.time()-s)\n return self_dict",
"def storage_metadata(self):\n return self._storage_metadata",
"def init_dictstorage_driver(nodes, edges, data):\n\n node_storage = DictStorage(nodes)\n edge_storage = DictStorage(edges)\n data_storage = DictStorage(data)\n adjacency_storage = AdjacencyView(node_storage, edge_storage)\n\n return node_storage, edge_storage, adjacency_storage, data_storage",
"def StorageForPeerOnOurMachine(self,peer):\n\n peerRecord = self.peerDatabase[peer]\n return peerRecord.localStorage",
"def _localWhatDoINeed(self):\n needDict = super()._localWhatDoINeed()\n\n return needDict",
"def __init__(self, name='TemporaryStorage'):\n\n BaseStorage.__init__(self, name)\n\n self._index = {}\n self._referenceCount = {}\n self._oreferences = {}\n self._opickle = {}\n self._tmp = []\n self._conflict_cache = {}\n self._last_cache_gc = 0\n self._recently_gc_oids = [None for x in range(RECENTLY_GC_OIDS_LEN)]\n self._oid = z64\n self._ltid = z64\n\n # Alow overrides for testing.\n self._conflict_cache_gcevery = CONFLICT_CACHE_GCEVERY\n self._conflict_cache_maxage = CONFLICT_CACHE_MAXAGE",
"def storage(self, **kwargs):\n self.logger.debug(f\"Get basic storage data\")\n url_path = 'storage'\n body = self._make_body(kwargs)\n return self._common_post(request_path=url_path, body=body)",
"def __getSettingsFromStorage():\n return AccountSettings.getSettings(NEW_SETTINGS_COUNTER)"
]
| [
"0.6215964",
"0.61273575",
"0.6037959",
"0.6016725",
"0.5994203",
"0.5977398",
"0.5924003",
"0.58914196",
"0.58765364",
"0.58751744",
"0.57997715",
"0.57680607",
"0.568655",
"0.563845",
"0.56246895",
"0.5611149",
"0.55896187",
"0.5578572",
"0.5547232",
"0.55118096",
"0.5485658",
"0.53952354",
"0.5368654",
"0.5363582",
"0.5357003",
"0.53496104",
"0.5337172",
"0.53172934",
"0.52994806",
"0.5298074"
]
| 0.69267035 | 0 |
Clear the named lazyprop from this object | def clear_lazyprop(object, property_name):
assert isinstance(property_name, str)
if _LAZY_PROP_VALUES in object.__dict__:
if property_name in object.__dict__[_LAZY_PROP_VALUES]:
del object.__dict__[_LAZY_PROP_VALUES][property_name]
if _LAZY_PROP_SUBSCRIBERS in object.__dict__:
if property_name in object.__dict__[_LAZY_PROP_SUBSCRIBERS]:
for fn in object.__dict__[_LAZY_PROP_SUBSCRIBERS][property_name]:
fn(object) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clearProperty(*args):",
"def clearProperty(*args):",
"def clearProperty(*args):",
"def clearProperty(*args):",
"def _del(self) -> None:\n self.variables.pop(prop_name, None)",
"def _reset_derived_prop_(self):\n self._derived_properties[\"photosamplers\"] = None",
"def reset(self):\r\n instdict = self.__dict__\r\n classdict = self.__class__.__dict__\r\n # To reset them, we simply remove them from the instance dict. At that\r\n # point, it's as if they had never been computed. On the next access,\r\n # the accessor function from the parent class will be called, simply\r\n # because that's how the python descriptor protocol works.\r\n for mname, mval in classdict.items():\r\n if mname in instdict and isinstance(mval, OneTimeProperty):\r\n delattr(self, mname)",
"def clear(self, attrname):\n self.__dict__['_'+attrname] = False",
"def clear_all_lazyprops(object):\n if _LAZY_PROP_VALUES in object.__dict__:\n del object.__dict__[_LAZY_PROP_VALUES]\n\n if _LAZY_PROP_SUBSCRIBERS in object.__dict__:\n for subscribers in object.__dict__[_LAZY_PROP_SUBSCRIBERS].values():\n for fn in subscribers:\n fn(object)",
"def __reset__(self):\n\n for i in self.__dict__.keys():\n self.__dict__[i] = None",
"def unsubscribe_from_lazy_prop(object, property_name, on_change_func):\n assert isinstance(property_name, str)\n\n if hasattr(object, _LAZY_PROP_SUBSCRIBERS):\n object.__dict__[_LAZY_PROP_SUBSCRIBERS][property_name].remove(on_change_func)",
"def clear_properties(self):\n self.properties.clear()",
"def remove_property(self, name):\n if (not name in self.properties):\n return\n del self.properties[name]",
"def clear_lazyprop_on_lazyprop_cleared(subscriber_object, subscriber_lazyprop,\n listen_to_object, listen_to_lazyprop=None):\n if listen_to_lazyprop is None:\n listen_to_lazyprop = subscriber_lazyprop\n\n assert isinstance(listen_to_lazyprop, str)\n assert isinstance(subscriber_lazyprop, str)\n\n subscribe_to_lazy_prop(listen_to_object, listen_to_lazyprop,\n lambda _: clear_lazyprop(subscriber_object, subscriber_lazyprop))",
"def clearProperties(*args):",
"def clearProperties(*args):",
"def clearProperties(*args):",
"def clearProperties(*args):",
"def clear(self, name):\n pass",
"def reset(self):\n self.valid_passes = set()\n self.property_set.clear()",
"def __delattr__(self, name):\n self.unset(name)",
"def unsetProp(self, name):\n ret = libxml2mod.xmlUnsetProp(self._o, name)\n return ret",
"def clear(self):\n self.__dict__.clear()",
"def freeProp(self):\n libxml2mod.xmlFreeProp(self._o)",
"def unset(self) -> None:\n self.val = None\n self.notes = []",
"def reset_property(self, _, prop):\n dst = prop.get_merged_equivalent().clone()\n create_pseudo_values([dst])\n cmd = commands.ReplaceObject(obj=prop, repl=dst)\n self.execute(cmd)\n\n # Reset the view to make sure the changes are properly displayed.\n self.reset_value_view(None)",
"def __delattr__(self, name):\r\n # The __delattr__ of the parent class should always be called so that the attribute actually gets removed.\r\n val = getattr(self, name)\r\n if type(val) == Constraint:\r\n self._remove_constraint(val)\r\n val.name = 'None'\r\n elif type(val) == ConstraintDict():\r\n val.name = 'None'\r\n val._model = None\r\n for k, v in val.items():\r\n self._remove_constraint(v)\r\n elif type(val) in {Var, Param, VarDict, ParamDict}:\r\n val.name = 'None'\r\n\r\n super(Model, self).__delattr__(name)",
"def propdel(self, name):\r\n res = self._svn('propdel', name)\r\n return res[:-1] # strip trailing newline\r",
"def __delattr__(self, name):\n if self.__pepth__ != 0:\n return plist.__getattr__(self, '__delattr__')(name)\n for x in self:\n x.__delattr__(name)\n return self",
"def unload(self) -> None:\n for attr in self._attrs:\n setattr(self, attr, None)"
]
| [
"0.6905533",
"0.6905533",
"0.6905533",
"0.6905533",
"0.6871976",
"0.6779333",
"0.6716801",
"0.66273415",
"0.65594524",
"0.6543166",
"0.65126073",
"0.6447588",
"0.6352423",
"0.63402987",
"0.62901765",
"0.62901765",
"0.62901765",
"0.62901765",
"0.6161432",
"0.61306316",
"0.6094376",
"0.6062388",
"0.59777683",
"0.59769064",
"0.5935158",
"0.5933446",
"0.593102",
"0.5923801",
"0.5919311",
"0.59118754"
]
| 0.77106565 | 0 |
Clears all lazy prop from an object. This means they will be reevaluated next time they are run | def clear_all_lazyprops(object):
if _LAZY_PROP_VALUES in object.__dict__:
del object.__dict__[_LAZY_PROP_VALUES]
if _LAZY_PROP_SUBSCRIBERS in object.__dict__:
for subscribers in object.__dict__[_LAZY_PROP_SUBSCRIBERS].values():
for fn in subscribers:
fn(object) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear_lazyprop(object, property_name):\n assert isinstance(property_name, str)\n\n if _LAZY_PROP_VALUES in object.__dict__:\n if property_name in object.__dict__[_LAZY_PROP_VALUES]:\n del object.__dict__[_LAZY_PROP_VALUES][property_name]\n\n if _LAZY_PROP_SUBSCRIBERS in object.__dict__:\n if property_name in object.__dict__[_LAZY_PROP_SUBSCRIBERS]:\n for fn in object.__dict__[_LAZY_PROP_SUBSCRIBERS][property_name]:\n fn(object)",
"def __reset__(self):\n\n for i in self.__dict__.keys():\n self.__dict__[i] = None",
"def clear_properties(self):\n self.properties.clear()",
"def clearProperties(*args):",
"def clearProperties(*args):",
"def clearProperties(*args):",
"def clearProperties(*args):",
"def reset(self):\r\n instdict = self.__dict__\r\n classdict = self.__class__.__dict__\r\n # To reset them, we simply remove them from the instance dict. At that\r\n # point, it's as if they had never been computed. On the next access,\r\n # the accessor function from the parent class will be called, simply\r\n # because that's how the python descriptor protocol works.\r\n for mname, mval in classdict.items():\r\n if mname in instdict and isinstance(mval, OneTimeProperty):\r\n delattr(self, mname)",
"def clearProperty(*args):",
"def clearProperty(*args):",
"def clearProperty(*args):",
"def clearProperty(*args):",
"def clear_cached_attributes(self):\n setattr(self, '_atoms', None)\n setattr(self, '_bonds', None)\n setattr(self, '_rings', None)\n setattr(self, '_ring_systems', None)",
"def _reset_derived_prop_(self):\n self._derived_properties[\"photosamplers\"] = None",
"def clear(self):\n self.__dict__.clear()",
"def reset_properties(self):\n self.__elements_count = 0\n self.__elements = {}",
"def clear(self) :\n self.__dict__ = {}",
"def clear_lazyprop_on_lazyprop_cleared(subscriber_object, subscriber_lazyprop,\n listen_to_object, listen_to_lazyprop=None):\n if listen_to_lazyprop is None:\n listen_to_lazyprop = subscriber_lazyprop\n\n assert isinstance(listen_to_lazyprop, str)\n assert isinstance(subscriber_lazyprop, str)\n\n subscribe_to_lazy_prop(listen_to_object, listen_to_lazyprop,\n lambda _: clear_lazyprop(subscriber_object, subscriber_lazyprop))",
"def reset(self):\n self.values = None\n self.keys = None\n self.mask = None",
"def clear(self) -> None:\n self.objects = []",
"def reset(self):\n self.valid_passes = set()\n self.property_set.clear()",
"def _reset(self):\n [delattr(self, attr) for attr in ('_XtX', '_XtY', 'coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]",
"def full_clear(self):\n self.clear()\n self.class_hooks.clear()",
"def reset(self):\n # type: () -> None\n self._all_scalar_oids = []\n self._use_scalar_oids_cache = False",
"def clear(self):\n self._post_init()",
"def reset_property(self, _, prop):\n dst = prop.get_merged_equivalent().clone()\n create_pseudo_values([dst])\n cmd = commands.ReplaceObject(obj=prop, repl=dst)\n self.execute(cmd)\n\n # Reset the view to make sure the changes are properly displayed.\n self.reset_value_view(None)",
"def clear(self):\r\n\t\tself.free_objects[:] = []",
"def reset(self):\n self._setupObjects()",
"def reset(self):\n\n self.simple_cache = {}\n self.complex_cache = {}\n self.target_cache = {}",
"def clear(self):\n self._pkcache = {}\n self._typecache = defaultdict(dict)\n self.init()"
]
| [
"0.7548748",
"0.7294389",
"0.7102218",
"0.70832473",
"0.70832473",
"0.70832473",
"0.70832473",
"0.7073602",
"0.70369637",
"0.70369637",
"0.70369637",
"0.70369637",
"0.6780632",
"0.67750883",
"0.6658477",
"0.66337144",
"0.6555479",
"0.64801943",
"0.6456167",
"0.6351214",
"0.6333118",
"0.6284741",
"0.62756467",
"0.62203723",
"0.6217804",
"0.61997694",
"0.6180864",
"0.6162302",
"0.6159061",
"0.6148739"
]
| 0.8140209 | 0 |
Clear the lazyprop on the subscriber_object if the listen_to_object property is cleared | def clear_lazyprop_on_lazyprop_cleared(subscriber_object, subscriber_lazyprop,
listen_to_object, listen_to_lazyprop=None):
if listen_to_lazyprop is None:
listen_to_lazyprop = subscriber_lazyprop
assert isinstance(listen_to_lazyprop, str)
assert isinstance(subscriber_lazyprop, str)
subscribe_to_lazy_prop(listen_to_object, listen_to_lazyprop,
lambda _: clear_lazyprop(subscriber_object, subscriber_lazyprop)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear_all_lazyprops(object):\n if _LAZY_PROP_VALUES in object.__dict__:\n del object.__dict__[_LAZY_PROP_VALUES]\n\n if _LAZY_PROP_SUBSCRIBERS in object.__dict__:\n for subscribers in object.__dict__[_LAZY_PROP_SUBSCRIBERS].values():\n for fn in subscribers:\n fn(object)",
"def unsubscribe_from_lazy_prop(object, property_name, on_change_func):\n assert isinstance(property_name, str)\n\n if hasattr(object, _LAZY_PROP_SUBSCRIBERS):\n object.__dict__[_LAZY_PROP_SUBSCRIBERS][property_name].remove(on_change_func)",
"def clear_lazyprop(object, property_name):\n assert isinstance(property_name, str)\n\n if _LAZY_PROP_VALUES in object.__dict__:\n if property_name in object.__dict__[_LAZY_PROP_VALUES]:\n del object.__dict__[_LAZY_PROP_VALUES][property_name]\n\n if _LAZY_PROP_SUBSCRIBERS in object.__dict__:\n if property_name in object.__dict__[_LAZY_PROP_SUBSCRIBERS]:\n for fn in object.__dict__[_LAZY_PROP_SUBSCRIBERS][property_name]:\n fn(object)",
"def _clear_hasevents_refs(self, ob):\n for connection in self._connections:\n for i in reversed(range(len(connection.objects))):\n if connection.objects[i][0] is ob:\n connection.objects.pop(i)\n \n # Do not clear pending events. This handler is assumed to continue\n # working, and should thus handle its pending events at some point,\n # at which point it cannot hold any references to ob anymore.",
"def __del__(self):\n self.unsubscribe()",
"def __init__(self, obj):\n self._store = {}\n self.obj = weakref.proxy(obj)",
"def fix_subscriber(subscriber):\n \n if subscriber is None:\n subscriber = Disposable.empty()\n elif type(subscriber) == types.FunctionType:\n subscriber = Disposable(subscriber)\n\n return subscriber",
"def unsubscribe(self):\n pass # pragma: no cover",
"def clean_up(self, observable):\n pass",
"def unsubscribe(observer):",
"def unsubscribe(observer):",
"def event_subscribe(self, obj_ref):\n self.subscribers.append(obj_ref)",
"def __init__(self):\n self.event_list = []\n self._subscribers = set()",
"def unsubscribe(self, event_handler):\n pass # pragma: no cover",
"def __init__(self, obj):\r\n self._obj = obj\r\n obj.notifyOnDeath(self._selfDied)\r\n\r\n self._cbs = set()",
"def subscribe_to_lazy_prop(object, property_name, on_change_func):\n assert isinstance(property_name, str)\n\n if not hasattr(object, _LAZY_PROP_SUBSCRIBERS):\n setattr(object, _LAZY_PROP_SUBSCRIBERS, defaultdict(lambda: set()))\n\n object.__dict__[_LAZY_PROP_SUBSCRIBERS][property_name].add(on_change_func)",
"def clear(self):\n self.events={}",
"def _unsubscribe(self):\n if not self.subscribed:\n return False\n return {}",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def clear_message_listener(self):\n self.callback_message.clear()",
"def unsubscribeFromEvent(eventName,subscriber):",
"def clear(self):\n self.__hooks = odict()",
"def _unsubscribe(self):\n self.unsubscribe_date = now()\n self.unsubscribed = True\n self.subscribed = False",
"async def unlistened(self, value=None):\n pass",
"def unlisten(obj, name, func):\n _signals(obj, name).remove(func)",
"def __init__(self, subscribe):\n def _subscribe(observer):\n def fix_subscriber(subscriber):\n \"\"\"Fix subscriber to check for None or function returned to \n decorate as Disposable\"\"\"\n \n if subscriber is None:\n subscriber = Disposable.empty()\n elif type(subscriber) == types.FunctionType:\n subscriber = Disposable(subscriber)\n\n return subscriber\n\n def set_disposable(scheduler=None, value=None):\n try:\n auto_detach_observer.disposable = fix_subscriber(subscribe(auto_detach_observer))\n except Exception as ex:\n if not auto_detach_observer.fail(ex):\n raise ex\n\n auto_detach_observer = AutoDetachObserver(observer)\n\n if current_thread_scheduler.schedule_required():\n current_thread_scheduler.schedule(set_disposable)\n else:\n set_disposable()\n\n return auto_detach_observer\n \n super(AnonymousObservable, self).__init__(_subscribe)"
]
| [
"0.6362107",
"0.63212144",
"0.6137851",
"0.58720046",
"0.5845626",
"0.58111215",
"0.5734059",
"0.5656716",
"0.5642605",
"0.56112516",
"0.56112516",
"0.5497448",
"0.5417932",
"0.5414736",
"0.53566337",
"0.53334963",
"0.5315492",
"0.53083503",
"0.5284902",
"0.5284902",
"0.5284902",
"0.5284902",
"0.5284902",
"0.5267844",
"0.52657145",
"0.52462715",
"0.52437997",
"0.52154815",
"0.52123106",
"0.52072304"
]
| 0.85002965 | 0 |
Converts a sentence (string) into a list of words (strings). Extracts the GloVe representation of each word and averages its value into a single vector encoding the meaning of the sentence. | def sentence_to_avg(sentence, word_to_vec_map):
# Get a valid word contained in the word_to_vec_map.
any_word = list(word_to_vec_map.keys())[0]
### START CODE HERE ###
# Step 1: Split sentence into list of lower case words (≈ 1 line)
words = sentence.lower().split()
# Initialize the average word vector, should have the same shape as your word vectors.
avg = np.zeros(word_to_vec_map[any_word].shape)
# Initialize count to 0
count = 0
# Step 2: average the word vectors. You can loop over the words in the list "words".
for w in words:
# Check that word exists in word_to_vec_map
if w in word_to_vec_map:
avg += word_to_vec_map[w]
# Increment count
count +=1
if count > 0:
# Get the average. But only if count > 0
avg = avg / count
### END CODE HERE ###
return avg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sentence_mean_vec(sentence):\n sentence_vecs = numpy.array([])\n \n sent1 = nltk.word_tokenize(sentence)\n for w in sent1: \n w = w.strip(\"'?.,- \")\n if not w in stop_words and w.lower() in glove_model:\n word_vec = numpy.array([glove_model[w.lower()]])\n if sentence_vecs.shape[0] == 0: # Initialize sentence vectors\n sentence_vecs = word_vec\n else:\n sentence_vecs = numpy.vstack((sentence_vecs, word_vec))\n # print(sentence_vecs.shape)\n if sentence_vecs.shape[0] == 0:\n return None\n elif sentence_vecs.shape == (300,):\n return numpy.expand_dims(sentence_vecs, axis=0)\n return numpy.mean(sentence_vecs, axis=0)",
"def word_average(self, sent):\n mean = []\n for word in sent:\n if word in self.word_model.wv.vocab:\n mean.append(self.word_model.wv.get_vector(word))\n\n if not mean: # empty words\n # If a text is empty, return a vector of zeros.\n logging.warning(\n \"cannot compute average owing to no vector for {}\".format(sent))\n return np.zeros(self.vector_size)\n else:\n mean = np.array(mean).mean(axis=0)\n return mean",
"def word2vec(self, sentence: str):\n tokens = nltk.word_tokenize(sentence)\n v = [self.word_dict.get(token, 0) for token in tokens]\n return v",
"def process_sentence(sentence: str) -> list:\r\n return [process_word(word) for word in sentence.split()][:-1]",
"def avg_sentence_vector(words, model, num_features, index2word_set):\n featureVec = np.zeros((num_features,), dtype=\"float32\")\n nwords = 0\n\n for word in words:\n if word in index2word_set:\n nwords = nwords+1\n featureVec = np.add(featureVec, model.wv[word])\n # featureVec = np.add(featureVec, model.wv.__getitem__(word))\n\n if nwords>0:\n featureVec = np.divide(featureVec, nwords)\n return featureVec",
"def process(self, sentence):\n\n # selects onlt alphanumeric words\n words = self.tokenizer.tokenize(sentence)\n\n # lemmatize the words\n words = [self.lemmatizer.lemmatize(word) for word in words]\n\n # lowercase all the words and remove single characters\n words = [word.lower() for word in words if len(word) > 1]\n\n # remove the stopwords using NLTK\n words = [word for word in words if word not in stopwords.words('english')]\n\n return words",
"def word_average(self, sent):\n\n mean = []\n for word in sent:\n if word in self.word_model.wv.vocab:\n mean.append(self.word_model.wv.get_vector(word) *\n self.word_idf_weight[word]) # idf weighted\n\n if not mean: # empty words\n # If a text is empty, return a vector of zeros.\n logging.warning(\n \"cannot compute average owing to no vector for {}\".format(sent))\n return np.zeros(self.vector_size)\n else:\n mean = np.array(mean).mean(axis=0)\n return mean",
"def sentence_to_words(sentence: str) -> List[str]:\n return list(map(clean_word, sentence.split(\" \")))",
"def vectorize_sentence(sentence, model):\n final_vec = np.zeros(300, )\n count = 0\n for word in sentence:\n count += 1\n dummy_vec = np.zeros(300, )\n try:\n temp_vec = get_vector(word, model)\n final_vec += temp_vec\n except:\n final_vec += dummy_vec\n return final_vec / count",
"def parse_sentence(self, text):\n l = []\n tokens = word_tokenize(text)\n print(tokens)\n skip = 0\n i = -1 # index of token in tokens list\n for token in tokens:\n i += 1\n if skip:\n skip -= 1\n # CORONA TERMS:\n elif token.lower() in corona_words:\n l.append('covid')\n elif is_flag_emoji(token):\n try:\n l.append(flag.ISO3166[flag.dflagize(token)[1:3]])\n except:\n continue\n # HASHTAGS:\n elif token == '#' and i+1 < len(tokens):\n parse_hashtage(tokens[i+1], l, tokens)\n skip += 1\n # TAGS:\n elif token == '@' and i+1 < len(tokens):\n parst_tag(tokens[i+1], l)\n skip = True\n # Size AS A WORD:\n elif token.lower() in sizes.keys():\n l.append(parse_number('1', token))\n elif check_if_term_is_fraction(token):\n if i < len(tokens)-1 and tokens[i+1].lower() in percent:\n l.append(token + '%')\n skip += 1\n else:\n l.append(token)\n # NUMBERS:\n elif isNumber(token):\n token = clean_number(token)\n if (i < len(tokens) - 2) and (tokens[i+1].lower() in sizes.keys()) and (tokens[i+2].lower() in percent):\n l.append(parse_number(token, tokens[i+1]) + '%')\n skip += 2\n elif (i < len(tokens) - 1) and tokens[i+1].lower() in percent:\n l.append(parse_number(token) + '%')\n skip += 1\n elif (i < len(tokens) - 1) and tokens[i+1].lower() in sizes.keys():\n l.append(parse_number(token, tokens[i+1]))\n skip += 1\n elif (i < len(tokens) - 1) and check_if_term_is_fraction(tokens[i+1]):\n l.append(token +' '+ tokens[i+1])\n skip += 1\n else:\n l.append(parse_number(token))\n elif isNumber(token[0:len(token) - 1]) and token[len(token)-1].lower() in sizes:\n tokens.append(token[0:len(token) - 1])\n tokens.append(token[len(token)-1])\n # OTHER TOKENS:\n else:\n cleaning(token, tokens, l)\n\n text_tokens_without_stopwords = [w for w in l if w.lower() not in stop_words]\n print(text_tokens_without_stopwords)\n return text_tokens_without_stopwords",
"def tokenizer(sentence):\n words = []\n for phrase in sentence.split('.'):\n for piece in phrase.split(','):\n for word in piece.split(' '):\n words.append(word)\n return words",
"def _words_to_vec(self, sentence):\n return torch.FloatTensor([self._use_embeddings(word) for word in sentence])",
"def parse_sentence(self, text):\n\n if text is None:\n return []\n text_tokens = word_tokenize(text)\n text_tokens_without_stopwords = []\n # text_lower_tokens_without_stopwords = [w.lower() for w in text_tokens if w not in self.stop_words]\n\n # remove stopwords\n for w in text_tokens:\n if w.lower() not in self.stop_words_dict:\n text_tokens_without_stopwords.append(w)\n\n # parsing\n doc_length = len(text_tokens_without_stopwords)\n num_dict = {\"thousand\": \"K\", \"million\": \"M\", \"billion\": \"B\", \"dollar\": \"$\", \"dollars\": \"$\", \"bucks\":\"$\", \"percent\": \"%\",\n \"$\": \"$\", \"%\": \"%\",\n \"percentage\": \"%\"}\n\n similar_words_dict = {\"corona\":\"covid\", \"covid19\":\"covid\", \"coronavirus\":\"covid\", \"covid-19\":\"covid\", \"covid\": \"covid\",\"#covid\": \"covid\", \"#covid19\": \"covid\"}\n\n new_tokenized_text = []\n i = -1\n # for i in range(doc_length):\n while i < doc_length - 1:\n # please note: when we do i += 1 it is because next_term(old_token[i + 1]) is used already so we skip over it next iteration\n # so we dont go over it twice\n\n i += 1\n term = text_tokens_without_stopwords[i]\n\n term = term.encode(\"ascii\", \"ignore\").decode() # remove ascii\n if term.lower() in similar_words_dict:\n new_tokenized_text.append(similar_words_dict[term.lower()])\n continue\n next_term = None\n if term.startswith(\"//t\") or (term.isalpha() and len(term) == 1): # remove short urls and terms that are single letters\n continue\n if term.__contains__(\"-\"):\n new_tokenized_text.extend(term.split(\"-\"))\n if i + 1 < doc_length:\n next_term = text_tokens_without_stopwords[i + 1]\n if term is \"@\" and next_term is not None:\n i += 2 # removing @ and name\n continue\n if term is \"#\" and next_term is not None:\n new_tokenized_text.extend(self.handle_hashtag(next_term))\n i += 1\n elif term is \"$\" and next_term is not None and str.isdigit(\n next_term.replace(\",\", \"\")): # $100 thousand / $75 --> 100K$ / 75$\n num = self.handle_numbers(next_term)\n if i + 2 < doc_length and text_tokens_without_stopwords[i + 2] in num_dict:\n num = num + num_dict[text_tokens_without_stopwords[i + 2]]\n i += 1\n new_tokenized_text.append(num + \"$\")\n i += 1\n elif str.isdigit(term.replace(\",\", \"\")): # if term is a number\n # deal with decimal number like 10.1234567 -> 10.123\n num = self.handle_numbers(term)\n if next_term is not None and next_term.lower() in num_dict:\n new_tokenized_text.append(num + num_dict[next_term.lower()])\n i += 1\n else:\n new_tokenized_text.append(num)\n elif not term.isidentifier(): # identifier: (a-z) and (0-9), or underscores (_)\n emojis_removed = self.remove_emojis(term)\n if emojis_removed is not \"\":\n new_tokenized_text.append(emojis_removed)\n else:\n new_tokenized_text.append(term.lower())\n\n return new_tokenized_text",
"def text_to_vecs(self):\n # convert word strings into word vectors\n sent_vec = []\n for w in self.sentence:\n if w in self.word_vectors.getVocab():\n sent_vec.append( self.word_vectors.getWordVectors()[w] )\n else:\n sent_vec.append( self.word_vectors.getOOVWordVector() )\n \n assert(len(self.sentence) == len(sent_vec)) \n self.sent_vec = sent_vec",
"def getWords(speech):\r\n return speech.split()",
"def get_sentence_average_w2v(sent, word_to_vec, embedding_dim):\n sum_vec = np.zeros((embedding_dim,))\n known_tokens = 0\n for token in sent.text:\n if (token in word_to_vec.dict):\n known_tokens += 1\n sum_vec += word_to_vec[token]\n if (known_tokens != 0):\n return sum_vec / known_tokens\n else:\n return sum_vec",
"def _WordSimAveVec(self,df,a):\r\n #Obtain the course description for the course provided and convert the string into a list of individual words.\r\n Description = df['description'][a].split()\r\n #Create a placeholder zero vector of the same size as the vector embedding.\r\n Vector = np.zeros(self.WordVecModel.layer1_size)\r\n wordCount = 0\r\n #Iterate over each word in the description.\r\n for word in Description:\r\n #If the word is in the trained vocabulary, obtain the word vector. \r\n #Continue to add the word vectors to the placeholder vector to get the running sum.\r\n if word in self.WordVecModel.wv.vocab:\r\n vector = self.WordVecModel.wv.get_vector(word)\r\n Vector +=vector\r\n #Keep track of how many word vectors (which were included in the vocabulary) were added.\r\n wordCount +=1\r\n #Calculate the mean by dividing the sum by the number of vectors.\r\n return Vector/wordCount",
"def token_to_words(sentence):\n\n regex_of_word = re.findall(SENTENCE_TO_WORDS_REGEX, sentence)\n\n words = [x.lower() for x in regex_of_word if x is not '']\n\n return words",
"def parse_sentence(sentence):\n words = Parser.get_words_from_sentence(sentence)\n\n with open(STOP_WORDS_FILE, \"r\", encoding=\"utf-8\") as file:\n stop_words = json.load(file)\n return ' '.join(word for word in words if word not in stop_words)",
"def split_into_words(sentences):\n return list(sentences.split(\" \"))",
"def _get_words_and_punctiation(sentence: str) ->List[str]:\n return sum((_separate_word_and_punctiation(word) for word in sentence.strip().split()), [])",
"def get_word_list(sentence):\n sentence = space1.sub(r'\\1 \\2', sentence)\n sentence = space2.sub(r\"\\1 \\2\", sentence)\n sentence = space3.split(sentence)\n sentence = \" \".join(sentence)\n wordlist = [i for i in sentence.split()]\n return \" \".join(wordlist)",
"def mutateSentences(sentence):\n # Build the graph\n graph = {}\n words = sentence.split()\n res = set()\n for i in range(len(words) - 1):\n w = words[i]\n if not w in graph:\n graph[w] = set()\n graph[w].add(words[i + 1])\n # Helper function that recursively find and append next word\n def helper(graph, length, tmp, res):\n if len(tmp) == length:\n res.add(' '.join(tmp))\n return\n cur = tmp[-1]\n if cur not in graph:\n return\n for word in graph[cur]:\n tmp.append(word)\n helper(graph, length, tmp, res)\n tmp.pop()\n # For each vertex in the graph, call the helper function\n for word in graph:\n helper(graph, len(words), [word], res)\n return res",
"def split_sentence(sentence):\n sentences = []\n split = nltk.sent_tokenize(sentence)\n for s in split:\n sentences.append(s)\n\n return sentences",
"def update_text(sentence, wordvec_type):\n # stanford sentiment dataset has a lot of random - and /\n sentence = sentence.replace(\"-\", \" \")\n sentence = sentence.replace(\"/\", \" \")\n sentence = sentence.split()\n # our current word vectors are all entirely lowercased\n sentence = [word.lower() for word in sentence]\n if wordvec_type == classifier_args.WVType.WORD2VEC:\n return sentence\n elif wordvec_type == classifier_args.WVType.GOOGLE:\n new_sentence = []\n for word in sentence:\n if word != '0' and word != '1':\n word = re.sub('[0-9]', '#', word)\n new_sentence.append(word)\n return new_sentence\n elif wordvec_type == classifier_args.WVType.FASTTEXT:\n return sentence\n elif wordvec_type == classifier_args.WVType.OTHER:\n return sentence\n else:\n raise ValueError(\"Unknown wordvec_type {}\".format(wordvec_type))",
"def sentence_to_vec(s, embeddings_dict, stop_words, tokenizer):\n \n words = str(s).lower()\n words = tokenizer(words)\n # remove stop words, if any, and only alpha-numeric tokens\n words = [w for w in words if not w in stop_words and w.isalpha()]\n \n embeddings = []\n for w in words:\n if w in embeddings_dict:\n embeddings.append(embeddings_dict[w])\n \n # dimensions = 300\n if len(embeddings)==0:\n return np.zeros(300)\n\n # list of embeddings to array\n embeddings = np.array(embeddings)\n\n # normalized vector\n sum = embeddings.sum(axis=0)\n return sum/np.sqrt((sum**2).sum())",
"def compute_user_input_embedding(txt, model):\r\n embeddings = []\r\n tokens = txt.split(\" \")\r\n for word in tokens:\r\n embeddings.append(model.wv[word])\r\n sentence_embedding = compute_average(embeddings)\r\n return sentence_embedding",
"def add_sentence(self, sentence):\n for word in sentence.split(' '):\n self.add_word(word)",
"def add_sentence(self, sentence):\n for word in sentence.split(' '):\n self.add_word(word)",
"def to_vector(texto,model,idf):\n tokens = normalizer(texto).split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in tokens: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec"
]
| [
"0.7131855",
"0.6924961",
"0.6804821",
"0.67380065",
"0.67274404",
"0.67215794",
"0.6702916",
"0.6634316",
"0.6608552",
"0.64645565",
"0.64388955",
"0.638915",
"0.6375292",
"0.62756556",
"0.62042046",
"0.61897296",
"0.618508",
"0.61756694",
"0.61643064",
"0.61166936",
"0.61163175",
"0.60673475",
"0.6055616",
"0.6048085",
"0.6025582",
"0.6025227",
"0.6004969",
"0.5987819",
"0.5987819",
"0.5972309"
]
| 0.715393 | 0 |
Converts an array of sentences (strings) into an array of indices corresponding to words in the sentences. The output shape should be such that it can be given to `Embedding()` (described in Figure 4). | def sentences_to_indices(X, word_to_index, max_len):
m = X.shape[0] # number of training examples
### START CODE HERE ###
# Initialize X_indices as a numpy matrix of zeros and the correct shape (≈ 1 line)
X_indices = np.zeros((m, max_len))
for i in range(m): # loop over training examples
# Convert the ith training sentence in lower case and split is into words. You should get a list of words.
sentence_words = X[i].lower().split()
# Initialize j to 0
j = 0
# Loop over the words of sentence_words
for w in sentence_words:
# if w exists in the word_to_index dictionary
if w in word_to_index: # if w in word_to_index.keys():
# Set the (i,j)th entry of X_indices to the index of the correct word.
X_indices[i, j] = word_to_index[w]
# Increment j to j + 1
j = j + 1
### END CODE HERE ###
return X_indices | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sentences_to_indices(X, word_to_index, max_len):\n m = X.shape[0] # number of training examples\n X_indices = np.zeros((m, max_len)) # Initialize X_indices as a numpy matrix of zeros and the correct shape (≈ 1 line)\n for i in range(m): # loop over training examples\n sentence_words = X[i].lower().split() # Convert the ith training sentence in lower case and split is into words. You should get a list of words.\n j = 0 # Initialize j to 0\n for w in sentence_words: # Loop over the words of sentence_words\n X_indices[i, j] = word_to_index[w] # Set the (i,j)th entry of X_indices to the index of the correct word.\n j = j + 1 # Increment j to j + 1\n return X_indices",
"def sentences_to_indices(X, word_to_index, max_len):\n \n m = X.shape[0] # number of training examples\n \n # Initialize X_indices as a numpy matrix of zeros and the correct shape (≈ 1 line)\n X_indices = np.zeros( (m, max_len) )\n \n for i in range(m): # loop over training examples\n \n # Convert the ith training sentence in lower case and split it\n # into a list of words.\n sentence_words = tokenize_fr(X[i])\n \n # Initialize j to 0\n j = 0\n \n # Loop over the words of sentence_words\n for w in sentence_words:\n # Set the (i,j)th entry of X_indices to the index of the correct word.\n try :\n X_indices[i, j] = word_to_index[ w ]\n except KeyError as kErr :\n # case 'word is unknown' => assign the \"<UKN>\" token\n X_indices[i, j] = len(word_to_index)\n # Increment j to j + 1\n j = j + 1\n if j == max_len : break\n \n return X_indices",
"def convert_to_index(sentences):\n\n\twords=[]\n\tfor idx, sentence in enumerate(sentences):\n\t\tfor word, label, sid, book, bert in sentence:\n\t\t\twords.append([book, sid, word, label])\n\n\treturn words",
"def sentence_to_indices(sentence, word_dict):\n return [word_dict.to_index(word) for word in word_tokenize(sentence)]",
"def sentence_to_indices(self, sentence):\n token_list = self.tokenize(sentence)\n indices = self.tokenizer.convert_tokens_to_ids(token_list)\n\n return indices",
"def build_input_data(sentences, vocabulary):\n index_list = []\n for word in sentences:\n tmp = vocabulary[word]\n index_list.append(tmp)\n x = np.array(index_list)\n return x",
"def word_to_idx(words, word_vocab):\n\n idx = []\n\n for word in words:\n idx.append(word_vocab[word])\n \n return torch.from_numpy(np.asarray(idx))",
"def idx_sentence(sentence, word2id_dict):\r\n x = []\r\n words = sentence.split()\r\n for word in words:\r\n x.append(word2id_dict[word]) # 假设word就在word2idx_dict中.\r\n return x",
"def get_source_indices(sent, dic):\n clean_sent = cleanup_sentence(sent)\n words = clean_sent.split(' ')\n n_words = len(words) + 1 # counting for the </s>\n indices = np.zeros(n_words)\n cnt = 0\n nsrc_unk = 0\n unk_idx = dic.symbol_to_index[\"<unk>\"]\n eos_idx = dic.symbol_to_index[\"</s>\"]\n for i, word in enumerate(words):\n wid = dic.symbol_to_index.get(word, None)\n if wid is None:\n indices[cnt] = unk_idx\n nsrc_unk += 1\n else:\n indices[cnt] = wid\n if wid == unk_idx:\n nsrc_unk += 1\n cnt += 1\n indices[cnt] = eos_idx\n cnt += 1\n return indices, indices.shape[0], nsrc_unk",
"def convert_words_to_index(sentences_list, dictionary):\n return [[dictionary[word]\n if word in dictionary else 0\n for word in sentence] for sentence in sentences_list]",
"def get_features(sentences: tuple) -> np.ndarray:\n sen_embedding = [_single_sentence(st) for st in sentences]\n sen_embedding = np.array(sen_embedding)\n return sen_embedding",
"def _tokens_to_index(self,tokens):\n wids = []\n for tk in tokens:\n if tk in self.wtoi.keys():\n wids.append(self.wtoi[tk])\n else:\n wids.append(1) # <UNK>\n for _ in range(self.sentence_max_length - len(wids)):\n wids.append(0)\n if len(wids) > self.sentence_max_length:\n wids = wids[:self.sentence_max_length]\n return wids",
"def get_idx_from_sent(sent, word_idx_map):\r\n x = []\r\n words = sent.split()\r\n for word in words:\r\n if word in word_idx_map:\r\n x.append(word_idx_map[word])\r\n else:\r\n x.append(1)\r\n\r\n return x",
"def get_idx_from_sent(sent, word_idx_map):\n x = []\n words = sent.split()\n for word in words:\n if word in word_idx_map:\n x.append(word_idx_map[word])\n else:\n x.append(1)\n\n return x",
"def predict_sentences_2_idxs(self):\n fo = open(self.config.parsed_predict_file, 'w')\n self.load_dicts()\n\n questions = pd.read_csv(self.config.predict_file,\n usecols=[\"question_text\"], index_col=False)\n unk_idx = self.word2idx[self.config.unknown_token]\n\n for quest in questions.question_text:\n tokens = preprocess_text(quest)\n if self.config.include_unknown:\n idxs = [self.word2idx.get(token, unk_idx) for token in\n tokens]\n else:\n idxs = [self.word2idx.get(token) for token in tokens]\n idxs = [idx for idx in idxs if idx]\n fo.write((str(\" \".join(str(num) for num in idxs)) + \"\\n\"))",
"def sents_to_tokens(sents, wordset):\n padded_sentences = ([\"<s>\", \"<s>\"] + s + [\"</s>\"] for s in sents)\n # This will canonicalize words, and replace anything not in vocab with <unk>\n return np.array([utils.canonicalize_word(w, wordset=wordset) \n for w in utils.flatten(padded_sentences)], dtype=object)",
"def sent_to_idx(self, sent):\n\n def _is_num(w):\n \"\"\" Checks if the word should be replaced by <NUM>. \"\"\"\n symbols = list(w)\n for s in symbols:\n if s in string.digits:\n return '<NUM>'\n return w\n\n # Replace numerical expressions with <NUM>\n sent_words = [_is_num(word) for word in sent.split()]\n # Index words replacing low-frequency tokens with <UNK>\n idx_list = [self.vocab.word_to_index[word] if word in self.vocab.word_to_index.keys() else self.vocab.unk_id for\n word in sent_words]\n # Optionally mark sentence boundaries (i.e. '<GO> w1, w2, ... <EOS>')\n if self.opt.mark_borders:\n idx_list = [self.vocab.go_id] + idx_list + [self.vocab.eos_id]\n return idx_list",
"def convert_words_to_index(actual_text, dictionary,length):\n output_index=[]\n for words in actual_text:\n full_sentence = [dictionary[word] if word in dictionary else 0 for word in words]\n sen_len=len(full_sentence)\n if sen_len<length: # padding\n full_sentence.extend([0]*(length-sen_len))\n else:\n full_sentence=full_sentence[:length]\n output_index.append(full_sentence)\n return output_index",
"def sentences_2_idxs(self):\n fo_pos = open(self.config.parsed_train_file_pos, 'w')\n fo_neg = open(self.config.parsed_train_file_neg, 'w')\n self.load_dicts()\n labels = pd.read_csv(self.config.train_file, usecols=[\"target\"])\n\n labels = list(labels.values[:, 0])\n questions = pd.read_csv(self.config.train_file,\n usecols=[\"question_text\"], index_col=False)\n unk_idx = self.word2idx.get(self.config.unknown_token)\n\n for label, quest in zip(labels, questions.question_text):\n tokens = preprocess_text(quest)\n\n if self.config.include_unknown:\n idxs = [self.word2idx.get(token, unk_idx) for token in\n tokens]\n else:\n idxs = [self.word2idx.get(token) for token in tokens]\n idxs = [idx for idx in idxs if idx]\n out_line = (str(\" \".join(str(num) for num in idxs)) + \"\\n\")\n if label == 1:\n fo_pos.write(out_line)\n else:\n fo_neg.write(out_line)",
"def get_corpus_indices(corpusfname, word_to_idx, context_size):\n eos_idx = word_to_idx[EOS_SYMBOL]\n start_padding = [eos_idx] * context_size\n sentences = []\n with open(corpusfname) as corpus:\n for line in corpus:\n sentences.append(start_padding + [word_to_idx.get(word, word_to_idx[\"<unk>\"]) for word in line.strip().split()] + [eos_idx])\n return sentences",
"def buildTrainingSequences(voc, maxlen=50, step=3):\n \n text, sym_indices, _ = voc\n sentences = []\n next_syms = []\n \n syms = set(text) # unique symbols (chars or words)\n \n for i in range(0, len(text) - maxlen, step):\n sentences.append(text[i: i + maxlen])\n next_syms.append(text[i + maxlen])\n print('nb sequences:', len(sentences))\n \n X = np.zeros((len(sentences), maxlen), dtype=np.int)\n y = np.zeros((len(sentences), len(syms)), dtype=np.bool)\n\n for i, sentence in enumerate(sentences):\n for j, sym in enumerate(sentence):\n X[i,j] = sym_indices[sym] \n \n y[i, sym_indices[next_syms[i]]] = 1 # one-hot enconding\n\n return (X,y)",
"def build_input_data(sentences, labels, vocabulary):\n x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])\n y = np.array(labels)\n return [x, y]",
"def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv.append('<pad>')\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]",
"def build_input_data(sentences, labels, vocabulary):\n vocabulary_inv = {word: index for index, word in vocabulary.items()}\n x = np.array([[vocabulary_inv[word] if word in vocabulary_inv else 0 for word in sent] for sent in sentences])\n y = np.array(labels)\n return [x, y]",
"def build_input_data(sentences, labels, vocabulary):\n # x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])\n\n # Uncomment this if we have unprecedented tokens\n for sentence_i in range(len(sentences)):\n for word_j in range(len(sentences[sentence_i])):\n if sentences[sentence_i][word_j] in vocabulary:\n sentences[sentence_i][word_j] = vocabulary[sentences[sentence_i][word_j]]\n else:\n sentences[sentence_i][word_j] = 1\n x = np.array(sentences)\n y = np.array(labels)\n return [x, y]",
"def Batch2Idx(batch, word2idx):\n# batch_idx = []\n# for sent in batch:\n# sent_list = []\n# for word in sent:\n# try:\n# idx = word2idx[word]\n# sent_list.append(idx)\n# except:\n# idx = word2idx['<unk>']\n# sent_list.append(idx)\n# batch_idx.append(sent_list)\n batch_idx = [[word2idx[word] for word in sent]for sent in batch]\n return batch_idx",
"def convert_to_idx(lines):\n for idx, l in enumerate(lines):\n line_temp = []\n for v in l:\n try:\n line_temp.append(vocab_idx[v])\n except KeyError:\n line_temp.append(vocab_idx['<unk>'])\n lines[idx] = line_temp\n return lines",
"def tokenize_sequences(source_sent, target_sent):\r\n\tsource_sent = source_sent.numpy().decode('utf-8')\r\n\ttarget_sent = target_sent.numpy().decode('utf-8')\r\n\r\n\tinpt = [en2idx.get(word, 1) for word in (u\"<SOS> \" + source_sent + u\" <EOS>\").split()]\r\n\toutpt = [de2idx.get(word, 1) for word in (u\"<SOS> \" + target_sent + u\" <EOS>\").split()]\r\n\r\n\treturn inpt, outpt",
"def encode(self, sentence):\n return sentence_to_token_ids(sentence, self.vocab)",
"def build_input_data(sentences, vocabulary):\n count = 0\n seq2seq_sentences = []\n for sentence in sentences:\n seq2seq_sentence = []\n for word in sentence:\n try:\n seq2seq_sentence.append(vocabulary[word])\n except KeyError:\n seq2seq_sentence.append(vocabulary['</s>'])\n count += 1\n seq2seq_sentences.append(seq2seq_sentence)\n print count\n return np.array(seq2seq_sentences)"
]
| [
"0.7674391",
"0.7353829",
"0.7153578",
"0.7031342",
"0.6893006",
"0.66123027",
"0.64662796",
"0.6405434",
"0.63036615",
"0.6300431",
"0.6209552",
"0.61663127",
"0.6027999",
"0.60131246",
"0.6002782",
"0.5977784",
"0.59518605",
"0.5949315",
"0.5922359",
"0.58846384",
"0.58815134",
"0.5865479",
"0.5862714",
"0.585012",
"0.5845733",
"0.58452356",
"0.5838964",
"0.5833479",
"0.5823168",
"0.5822856"
]
| 0.7652687 | 1 |
Creates a Keras Embedding() layer and loads in pretrained GloVe 50dimensional vectors. | def pretrained_embedding_layer(word_to_vec_map, word_to_index):
vocab_size = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)
any_word = list(word_to_vec_map.keys())[0]
emb_dim = word_to_vec_map[any_word].shape[0] # define dimensionality of your GloVe word vectors (= 50)
### START CODE HERE ###
# Step 1
# Initialize the embedding matrix as a numpy array of zeros.
# See instructions above to choose the correct shape.
emb_matrix = np.zeros((vocab_size, emb_dim))
# Step 2
# Set each row "idx" of the embedding matrix to be
# the word vector representation of the idx'th word of the vocabulary
for word, idx in word_to_index.items():
emb_matrix[idx, :] = word_to_vec_map[word]
# Step 3
# Define Keras embedding layer with the correct input and output sizes
# Make it non-trainable.
embedding_layer = tensorflow.keras.layers.Embedding(input_dim = vocab_size, output_dim = emb_dim, trainable = False)
### END CODE HERE ###
# Step 4 (already done for you; please do not modify)
# Build the embedding layer, it is required before setting the weights of the embedding layer.
embedding_layer.build((None,)) # Do not modify the "None". This line of code is complete as-is.
# Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.
embedding_layer.set_weights([emb_matrix])
return embedding_layer | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n emb_dim = word_to_vec_map[\"cucumber\"].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n emb_matrix = np.zeros((vocab_len, emb_dim)) # Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)\n for word, index in word_to_index.items(): # Set each row \"index\" of the embedding matrix to be the word vector representation of the \"index\"th word of the vocabulary\n emb_matrix[index, :] = word_to_vec_map[word]\n embedding_layer = Embedding(vocab_len, emb_dim, trainable = False) # Define Keras embedding layer with the correct output/input sizes, make it trainable. Use Embedding(...). Make sure to set trainable=False. \n embedding_layer.build((None,)) # Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the \"None\".\n embedding_layer.set_weights([emb_matrix]) # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n return embedding_layer",
"def _add_pre_trained_embedding(self):\n\n if self.embedding_type['type'] == 'glove':\n self.logging.info('use pre-trained glove word2vec')\n # a. load pre trained glove\n GLOVE_DIR = '../data/glove_pretrained/glove.6B'\n glove_suffix_name = 'glove.6B.' + str(self.embedding_size) + 'd.txt'\n import os\n import numpy as np\n\n embeddings_index = {}\n f = open(os.path.join(GLOVE_DIR, glove_suffix_name)) # 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n self.logging.info('')\n self.logging.info('Found %s word vectors.' % len(embeddings_index))\n\n # b. compute embedding matrix\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector # words not found in embedding index will be all-zeros.\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt) + ' / ' + str(len(self.word_index)))\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n\n elif self.embedding_type['type'] == 'gensim':\n self.logging.info('use pre-trained gensim word2vec')\n\n import gzip\n import gensim\n from keras.layers import Embedding\n import numpy as np\n\n # fname = '../data/word2vec_pretrained/motors/d_300_k_712904_w_6_e_60_v_motors'\n # fname = '../data/word2vec_pretrained/fashion/d_300_k_1341062_w_6_e_70_v_fashion'\n\n self.logging.info('load word2vec path: ' + str(self.embedding_type['path']))\n model = gensim.models.Word2Vec.load(self.embedding_type['path'])\n pretrained_weights = model.wv.syn0\n vocab_size, vector_dim = pretrained_weights.shape\n\n method = 3\n if method == 1:\n self.logging.info('word2vec attempt to fit into embedding layer - middle complex')\n # convert the wv word vectors into a numpy matrix that is suitable for insertion\n # into our TensorFlow and Keras models\n\n embedding_matrix = np.zeros((len(model.wv.vocab), vector_dim))\n for i in range(len(model.wv.vocab)):\n embedding_vector = model.wv[model.wv.index2word[i]]\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n embedding_layer = Embedding(input_dim=embedding_matrix.shape[0],\n output_dim=embedding_matrix.shape[1],\n # input_length=self.maxlen,\n weights=[embedding_matrix],\n trainable=False)\n elif method == 2:\n self.logging.info('word2vec simple embedding matching - simple complex')\n embedding_layer = Embedding(input_dim=vocab_size,\n output_dim=vector_dim,\n input_length=self.maxlen,\n weights=[pretrained_weights],\n trainable=False)\n elif method == 3:\n\n self.logging.info('word2vec match using word_index from keras tokenizer - as used in glove match above')\n # b. compute embedding matrix\n\n # sd = 1 / np.sqrt(len(self.word_index) + 1)\n # embedding_matrix = np.random.normal(0, scale=sd, size=(len(self.word_index) + 1, self.embedding_size))\n\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n if word in model.wv:\n embedding_vector = model.wv[word]\n embedding_matrix[i] = embedding_vector\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt))\n\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n else:\n raise ValueError('unknown method value')\n\n else:\n raise ValueError('unknown embedding type')\n self.logging.info('create glove pre-trained embedding: ' + str(self.embedding_size))\n return embedding_layer",
"def embedding_layer(self):\n with tf.name_scope(\"Embedding_Layer\"):\n V_size = len(self.vocab)\n embed_dim = len(self.embed[0]) \n W_embed_ = tf.get_variable(\"W_embed\",shape=[V_size, embed_dim],trainable=False).assign(np.asarray(self.embed))\n W_analogy_embed_ = tf.get_variable(\"W_analogy_embed\",shape=[V_size, embed_dim],trainable=True,initializer=tf.random_uniform_initializer(minval=-1,maxval=1))\n return W_embed_, W_analogy_embed_",
"def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()",
"def build_embedding_layer(inputs_, vocab_size, embed_size):\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_size), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, inputs_)\n \n return embed",
"def build_vgg():\n input_shape = (256, 256, 3)\n\n vgg = keras.applications.VGG19(include_top = False , input_shape = input_shape , weights=\"imagenet\")\n features = vgg.get_layer(index = 9).output\n\n model = keras.Model(inputs=[vgg.inputs], outputs=[features])\n return model",
"def pretrained_embedding_layer(model,model2,model3, word_to_index,emb_dim_max):\n words_ignored = []\n vocab_len = len(word_to_index) + 1 \n emb_matrix = np.zeros([vocab_len,emb_dim_max])\n \n print(' Total words would be processed : '+str(vocab_len))\n for word, idx in word_to_index.items():\n if word in model:\n emb_matrix[idx,:200] = model[word]\n emb_matrix[idx,200:] = 0\n if word in model2:\n emb_matrix[idx, :100] = model2[word]\n emb_matrix[idx, 100:] = 0\n if word in model3.keys():\n emb_matrix[idx,:] = model3[word]\n else:\n words_ignored.append(word)\n print(str(len(words_ignored))+\" words ignored\")\n print(emb_matrix.shape) \n \n \n embedding_layer = Embedding(vocab_len,emb_dim_max,trainable = True)\n \n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer,words_ignored",
"def keras_model_fn(_, config):\n\n f = open(config[\"embeddings_path\"],encoding='utf8')\n glove = f.readlines()[:config[\"embeddings_dictionary_size\"]]\n f.close()\n\n embedding_matrix = np.zeros((config[\"embeddings_dictionary_size\"], config[\"embeddings_vector_size\"]))\n for i in range(config[\"embeddings_dictionary_size\"]):\n if len(glove[i].split()[1:]) != config[\"embeddings_vector_size\"]:\n continue\n embedding_matrix[i] = np.asarray(glove[i].split()[1:], dtype='float32')\n\n cnn_model = tf.keras.Sequential()\n cnn_model.add(layers.Embedding(weights=[embedding_matrix],\n input_dim=config['embeddings_dictionary_size'],\n output_dim=config['embeddings_vector_size'],\n input_length=config['padding_size']))\n cnn_model.add(layers.Conv1D(filters=100,kernel_size=2,padding='valid',activation='relu',strides=1))\n cnn_model.add(layers.GlobalMaxPooling1D())\n cnn_model.add(layers.Dense(100, activation='relu'))\n cnn_model.add(layers.Dense(1, activation = 'sigmoid'))\n cnn_model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return cnn_model",
"def add_embedding(self):\n ### YOUR CODE HERE (~4-6 lines)\n embeddingTensor = tf.Variable(self.pretrained_embeddings)\n embeddings = tf.nn.embedding_lookup(embeddingTensor, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [-1, self.max_length, Config.n_features * Config.embed_size])\n ### END YOUR CODE\n return embeddings",
"def build_pre_embedding(self, use_saved_embed=False):\n\n if use_saved_embed and\\\n self.config.parser['embedding_save_dir'] is not '':\n Print(\n f'reading saved embedding file from '\\\n f'{self.config.parser[\"embedding_save_dir\"]}',\n 'information'\n )\n with open(self.config.parser['embedding_save_dir'], 'rb') as f:\n pretrain_embed = pickle.load(f)\n else:\n if self.config.parser['embed_dir'] is None:\n Print('Pre-trained embedding file not available.', 'error')\n return\n\n embed_file = self.config.parser['embed_dir']\n\n # load in pre-trained Glove model, save it as a dict\n pretrain_embed = {}\n with open(embed_file, 'r', encoding='utf-8') as f:\n tqdm_iter = tqdm.tqdm(f.readlines())\n tqdm_iter.set_description('read from pre-trained file', False)\n for line in tqdm_iter:\n embed_content = line.strip().split()\n word, embed_content = embed_content[0], embed_content[1:]\n if self.config.parser['word_embed_dim'] < 0:\n self.config.parser['word_embed_dim'] = len(embed_content)\n elif self.config.parser['word_embed_dim'] != len(embed_content):\n # invalid embedding word\n continue\n embed_content = np.array([float(x) for x in embed_content])\n pretrain_embed[word] = embed_content\n \n if self.config.parser['embedding_save_dir'] is not '':\n with open(self.config.parser['embedding_save_dir'], 'wb') as f:\n pickle.dump(pretrain_embed, f)\n Print(\n f'pre-trained embedding dictionary is saved at '\\\n f'{self.config.parser[\"embedding_save_dir\"]}',\n 'success'\n )\n\n embed_dim = self.config.parser['word_embed_dim']\n\n # build embedding if find it in pre-trained model\n # else randomly generate one.\n self.embedding = np.empty([\n self.word_dict.word_size, embed_dim\n ])\n scale = np.sqrt(3 / embed_dim)\n perfect_match, case_match, not_match = 0, 0, 0\n for word, index in self.word_dict.word2idx.items():\n if word in pretrain_embed:\n self.embedding[index, :] = self.norm2one(pretrain_embed[word]) \\\n if self.config.parser['norm_word_embed'] else pretrain_embed[word]\n perfect_match += 1\n if word.lower() in pretrain_embed:\n self.embedding[index, :] = self.norm2one(pretrain_embed[word.lower()]) \\\n if self.config.parser['norm_word_embed'] else pretrain_embed[word.lower()]\n case_match += 1\n else:\n # not found\n self.embedding[index,\n :] = np.random.uniform(-scale, scale, [embed_dim])\n not_match += 1\n Print(\n f'Pre-trained embedding loaded in from {self.config.parser[\"embed_dir\"]},\\n'\\\n f'pre-train words: {len(pretrain_embed)}, perfect match {perfect_match},\\n'\\\n f'case match {case_match}, not match {not_match},\\n'\\\n f'oov {not_match / self.word_dict.word_size}', 'success'\n )\n return self.embedding",
"def init_pretrained_glove(glove_path, word2idx, embedding_dim):\n vocab_size = len(word2idx)\n # read in the glove files\n glove_file = os.path.join(glove_path, 'glove.6B.{:d}d.json'.\n format(embedding_dim))\n with open(glove_file, 'r') as fp:\n word2glove = json.load(fp)\n print('Read embeddings: {:s}'.format(glove_file))\n\n # then make giant matrix with all the matching vocab words\n padding_idx = 0\n # follow Karpahty's advice and initialize really small\n pretrained = torch.randn(vocab_size, embedding_dim) * 0.01\n count = 0\n for word, idx in word2idx.iteritems():\n # reserve the padding idx as 0\n if idx == padding_idx:\n torch.FloatTensor(embedding_dim).zero_()\n # keep as random initialization\n if word not in word2glove:\n continue\n pretrained[idx] = torch.FloatTensor(word2glove[word])\n\n embed = torch.nn.Embedding(vocab_size, embedding_dim)\n embed.weight = torch.nn.Parameter(pretrained)\n return embed",
"def set_glove_embedding(self,fpath,embedding_dim):\n\t\temb = np.random.randn(self._count,embedding_dim)\n#\ttf.logging.info(emb[0])\n\t\twith open(fpath) as f: #python 3.x support \n\t\t\tfor k,line in enumerate(f):\n\t\t\t\tfields = line.split()\n\t\t\t\tif len(fields) - 1 != embedding_dim:\n\t\t\t\t\t# Sometimes there are funny unicode parsing problems that lead to different\n\t\t\t\t\t# fields lengths (e.g., a word with a unicode space character that splits\n\t\t\t\t\t# into more than one colum n). We skip those lines. Note that if you have\n\t\t\t\t\t# some kind of long header, this could result in all of your lines getting\n\t\t\t\t\t# skipped. It's hard to check for that here; you just have to look in the\n\t\t\t\t\t# embedding_misses_file and at the model summary to make sure things look\n\t\t\t\t\t# like they are supposed to.\n\t\t\t\t\t#logger.warning(\"Found line with wrong number of dimensions (expected %d, was %d): %s\",\n\t\t\t\t\t\t\t# embedding_dim, len(fields) - 1, line)\n\t\t\t\t\traise Exception(\"Found line with wrong number of dimensions (expected %d, was %d): %s\",\n\t\t\t\t\t\t\t\t\t\t\t embedding_dim, len(fields) - 1, line)\n\t\t\t\t\tcontinue\n\t\t\t\tword = fields[0]\n\t\t\t\tif word in self._word_to_id:\n\t\t\t\t\tvector = np.asarray(fields[1:], dtype='float32')\n\t\t\t\t\temb[self._word_to_id[word]] = vector\n#\t\tif k%1000 == 0:\n#\t\t tf.logging.info('glove : %d',k)\n\t\tself.glove_emb = emb",
"def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.pretrained_word_mat = tf.get_variable(\"word_emb_mat\",\n [self.vocab.word_size() - 2, self.vocab.word_embed_dim],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[2:],\n dtype=tf.float32),\n trainable=False)\n self.word_pad_unk_mat = tf.get_variable(\"word_unk_pad\",\n [2, self.pretrained_word_mat.get_shape()[1]],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[:2],\n dtype=tf.float32),\n trainable=True)\n\n self.word_mat = tf.concat([self.word_pad_unk_mat, self.pretrained_word_mat], axis=0)\n self.p_emb = tf.nn.embedding_lookup(self.word_mat, self.p)\n self.q_emb = tf.nn.embedding_lookup(self.word_mat, self.q)",
"def generate_conll2003_embeddings():\n glove_embedding = get_glove_embedding()\n\n word2index = {}\n idx2word = {}\n embed_array = []\n\n word2index[\"<pad>\"] = 1\n embed_array.append(init_embedding())\n\n word2index[\"<unk>\"] = 0\n embed_array.append(init_embedding())\n\n data = []\n with open(TRAIN_DATA_PATH, \"r\") as f:\n for line in f:\n data.append(json.loads(line))\n\n idx = 2\n\n for sample in tqdm(data, total=len(data)):\n words = sample[\"tokens\"]\n\n for w in words:\n w = w.lower()\n\n # if word is not present in dictionary, add to dictionary and append embedding vector\n if w not in word2index.keys():\n word2index[w] = idx\n idx += 1\n if w not in glove_embedding.keys():\n ev = init_embedding()\n else:\n ev = glove_embedding[w]\n\n embed_array.append(ev)\n\n else:\n continue\n\n # save embeddings\n embed_array = np.vstack(embed_array)\n np.save(EMBD_OUTPUT_PATH, embed_array)\n\n # save dictionary\n print(\"Dicitionary Size: \", len(word2index))\n with open(DICTIONARY_OUTPUT_PATH, \"w\") as f:\n json.dump(word2index, f)",
"def __init__(self, input_size, config):\r\n super(EmbeddingLayer, self).__init__()\r\n\r\n if config.emtraining:\r\n self.embedding = nn.Sequential(OrderedDict([\r\n ('embedding', nn.Embedding(input_size, config.emsize)),\r\n ('dropout', nn.Dropout(config.dropout))\r\n ]))\r\n else:\r\n self.embedding = nn.Embedding(input_size, config.emsize)\r\n self.embedding.weight.requires_grad = False",
"def load_pretrain_embedding(vocab, embed_size, embedding_path):\n model = KeyedVectors.load_word2vec_format(embedding_path)\n\n print('{} {}'.format(vocab.size(), embed_size))\n for token, id in vocab.token2id.items():\n if token in model:\n print('{} {}'.format(token, ' '.join(map(str, model[token]))))\n else:\n emb = np.random.random((embed_size,)) - 0.5\n print('{} {}'.format(token, ' '.join(map(str, emb))))",
"def get_glove_embedding():\n embedding = {}\n N = 400_000\n print(\"Reading glove embedding...\")\n with open(GLOVE_EMBD_PATH, \"rb\") as f:\n for line in tqdm(f, total=N):\n line = line.decode().split()\n word = line[0].lower()\n vector = np.array(line[1:]).astype(np.float32)\n embedding[word] = vector\n\n return embedding",
"def init_embedding(size=50):\n vector = np.random.normal(0.0, 0.01, size)\n return vector",
"def __init__(self, embed_size):\n super(ImgEncoder, self).__init__()\n model = models.vgg19(pretrained=True)\n in_features = model.classifier[-1].in_features # input size of feature vector\n model.classifier = nn.Sequential(\n *list(model.classifier.children())[:-1]) # remove last fc layer\n\n self.model = model # loaded model without last fc layer\n self.fc = nn.Linear(in_features, embed_size) # feature vector of image",
"def load(path, device=None):\n\n V, W, vb, wb, dictionary = None, None, None, None, None\n\n dictionary_path = os.path.join(path, 'dictionary')\n if os.path.exists(dictionary_path):\n dictionary = h.dictionary.Dictionary.load(dictionary_path)\n V = np.load(os.path.join(path, 'V.npy'))\n if os.path.exists(os.path.join(path, 'W.npy')):\n W = np.load(os.path.join(path, 'W.npy'))\n if os.path.exists(os.path.join(path, 'v_bias.npy')):\n vb = np.load(os.path.join(path, 'v_bias.npy'))\n if os.path.exists(os.path.join(path, 'w_bias.npy')):\n wb = np.load(os.path.join(path, 'w_bias.npy'))\n\n return Embeddings(\n V, W=W, vb=vb, wb=wb, dictionary=dictionary,\n device=device\n )",
"def build_bilstm(self, verbose=True):\r\n word_ids = Input(batch_shape=(None, None), dtype='int32', name='word_input')\r\n inputs = [word_ids]\r\n\r\n if self._params.use_pretrain_embedding:\r\n if verbose: logging.info(\"initial word embedding with pretrained embeddings\")\r\n if self._params.word_embedding_dim == 100:\r\n glove_file = self._params.data_dir + '/glove.6B.100d.txt'\r\n elif self._params.word_embedding_dim == 300:\r\n glove_file = self._params.data_dir + '/glove.42B.300d.txt'\r\n else:\r\n logging.error(\"we only support glove embedding with dimension 100 or 300\")\r\n raise ValueError(\"unmatch word dimension, we only support glove embedding with dimension 100 or 300\")\r\n glove_embedding_index = load_glove(glove_file, self._params.word_embedding_dim)\r\n word_vocab = self.input_processor.word_vocab.vocab\r\n glove_embeddings_matrix = np.zeros([len(word_vocab), self._params.word_embedding_dim])\r\n for word, i in word_vocab.items():\r\n vector = glove_embedding_index.get(word)\r\n if vector is not None:\r\n glove_embeddings_matrix[i] = vector\r\n \r\n word_embeddings = Embedding(input_dim=glove_embeddings_matrix.shape[0],\r\n output_dim=glove_embeddings_matrix.shape[1],\r\n trainable=False,\r\n mask_zero=True,\r\n weights=[glove_embeddings_matrix],\r\n name='word_embedding')(word_ids)\r\n else:\r\n word_embeddings = Embedding(input_dim=self._params.word_vocab_size,\r\n output_dim=self._params.word_embedding_dim,\r\n mask_zero=True,\r\n name='word_embedding')(word_ids)\r\n\r\n input_embeddings = [word_embeddings]\r\n if self._params.use_char:\r\n char_ids = Input(batch_shape=(None, None, None), dtype='int32', name='char_input')\r\n inputs.append(char_ids)\r\n if self._params.char_feature == \"lstm\":\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n mask_zero=True,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level lstm features\")\r\n char_feas = TimeDistributed(Bidirectional(LSTM(self._params.char_lstm_size)), name=\"char_lstm\")(char_embeddings)\r\n elif self._params.char_feature == \"cnn\":\r\n # cnn do not support mask\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level cnn features\")\r\n char_feas = char_cnn_encode(char_embeddings, self._params.n_gram_filter_sizes, self._params.n_gram_filter_nums)\r\n else:\r\n raise ValueError('char feature must be lstm or cnn')\r\n\r\n input_embeddings.append(char_feas)\r\n\r\n if self._params.use_pos:\r\n if verbose: logging.info(\"use pos tag features\")\r\n pos_ids = Input(batch_shape=(None, None), dtype='int32', name='pos_input')\r\n inputs.append(pos_ids)\r\n\r\n\r\n pos_embeddings = Embedding(input_dim=self._params.pos_vocab_size,\r\n output_dim=self._params.pos_embedding_dim,\r\n mask_zero=True,\r\n name='pos_embedding')(pos_ids)\r\n input_embeddings.append(pos_embeddings)\r\n\r\n if self._params.use_dict:\r\n if verbose: logging.info(\"use user dict features\")\r\n dict_ids = Input(batch_shape=(None, None), dtype='int32', name='dict_input')\r\n inputs.append(dict_ids)\r\n\r\n dict_embeddings = Embedding(input_dim=self._params.dict_vocab_size,\r\n output_dim=self._params.dict_embedding_dim,\r\n mask_zero=True,\r\n name='dict_embedding')(dict_ids)\r\n input_embeddings.append(dict_embeddings)\r\n\r\n input_embedding = Concatenate(name=\"input_embedding\")(input_embeddings) if len(input_embeddings)>1 else input_embeddings[0]\r\n input_embedding_ln = LayerNormalization(name='input_layer_normalization')(input_embedding)\r\n #input_embedding_bn = BatchNormalization()(input_embedding_ln)\r\n input_embedding_drop = Dropout(self._params.dropout, name=\"input_embedding_dropout\")(input_embedding_ln)\r\n\r\n z = Bidirectional(LSTM(units=self._params.main_lstm_size, return_sequences=True, dropout=0.2, recurrent_dropout=0.2),\r\n name=\"main_bilstm\")(input_embedding_drop)\r\n z = Dense(self._params.fc_dim, activation='tanh', name=\"fc_dense\")(z)\r\n\r\n if self._params.use_crf:\r\n if verbose: logging.info('use crf decode layer')\r\n crf = CRF(self._params.num_labels, sparse_target=False,\r\n learn_mode='marginal', test_mode='marginal', name='crf_out')\r\n loss = crf.loss_function\r\n pred = crf(z)\r\n else:\r\n loss = 'categorical_crossentropy'\r\n pred = Dense(self._params.num_labels, activation='softmax', name='softmax_out')(z)\r\n\r\n model = Model(inputs=inputs, outputs=pred)\r\n model.summary(print_fn=lambda x: logging.info(x + '\\n'))\r\n model.compile(loss=loss, optimizer=self._params.optimizer)\r\n\r\n self.model = model",
"def vgg16_bn(pretrained,**kwargs):\n model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)\n if pretrained:\n model_dict = paddle.load('./pre_model/vgg16_bn.paddle')\n model.set_state_dict(model_dict)\n return model",
"def vgg16(pretrained=False, **kwargs):\n model = VGG(make_layers(cfg['D']), **kwargs)\n if pretrained:\n model_dict = paddle.load('./pre_model/vgg16.paddle')\n model.set_state_dict(model_dict)\n return model",
"def pretrained(name=\"glove_100d\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(WordEmbeddingsModel, name, lang, remote_loc)",
"def test_build_with_embeddings(self):\n # Train a very small model\n dataset = KDDCupDataset()\n dataset.create_fixed_samples(\n *self.data, samples_num=1, partition_sizes=self.partition_sizes)\n dataset.set_current_sample(0)\n sentences = [[str(x) for x in numpy.arange(random.randint(3, 20))]\n for _ in range(25)]\n embedding_model = Word2Vec(\n sentences=sentences, size=self.model_arguments['hidden_layer_size'],\n iter=5)\n dataset = KDDCupDataset(embedding_model=embedding_model)\n dataset.create_fixed_samples(\n *self.data, samples_num=1, partition_sizes=self.partition_sizes)\n dataset.set_current_sample(0)\n # Check build does not raise errors\n model = self.MODEL(\n dataset, embedding_model=embedding_model,\n **self.model_arguments)\n model.build_all()\n resulting_embeddings = model.sess.run(model.embedding_var)\n numpy.testing.assert_array_equal(resulting_embeddings[1:-1],\n embedding_model.wv.syn0)\n model.fit(training_epochs=50)",
"def create_vggvox(embedding_dims, name=\"vggvox\"):\n model = tf.keras.Sequential(name=name)\n model.add(tf.keras.layers.Conv2D(96, (7,7), strides=(2,2), padding=\"valid\", kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_conv1\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPooling2D((3,3), strides=(2,2), name=\"mpool1\"))\n model.add(tf.keras.layers.Conv2D(256, (5,5), strides=(2,2), padding=\"valid\", kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_conv2\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPooling2D((3,3), strides=(2,2), name=\"mpool2\"))\n model.add(tf.keras.layers.Conv2D(384, (3,3), strides=(1,1), padding=\"same\", kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_conv3\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.Conv2D(256, (3,3), strides=(1,1), padding=\"same\", kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_conv4\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.Conv2D(256, (3,3), strides=(1,1), padding=\"same\", kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_conv5\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPooling2D((5,3), strides=(3,2), name=f\"{name}_mpool5\"))\n model.add(tf.keras.layers.Conv2D(4096, (9,1), strides=1, kernel_regularizer=tf.keras.regularizers.L2(5e-4), padding=\"valid\", name=f\"{name}_fc6\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.Lambda(lambda x: tf.math.reduce_mean(x, axis=[1,2], name=f\"{name}_apool6\")))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(embedding_dims, kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_embeddings\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n return model",
"def load_pretrained_embeddings(vocabulary: dict, max_size: int):\n # get GloVe 6B pre-trained word embeddings, of dimension 100\n glove_vec = torchtext.vocab.GloVe(name=\"6B\", dim=100, unk_init=torch.Tensor.normal_)\n\n pretrained = []\n for k, _ in vocabulary.stoi.items():\n if k == \"<PAD>\":\n emb = torch.zeros([glove_vec.dim])\n elif k == \"<UNK>\":\n emb = torch.rand([glove_vec.dim])\n else:\n emb = glove_vec.get_vecs_by_tokens(k, lower_case_backup=True)\n pretrained.append(emb) \n\n # return a tensor of size [vocab_size, emb_dim]\n return torch.stack(pretrained, dim=0)",
"def load_pretrained_embeddings(self, embeddings):\r\n self.embedding.weight = nn.Parameter(embeddings)",
"def load_glove_embeddings():\n\n emmbed_file = Path(\"./embeddings.pkl\")\n if emmbed_file.is_file():\n # embeddings already serialized, just load them\n print(\"Local Embeddings pickle found, loading...\")\n with open(\"./embeddings.pkl\", 'rb') as f:\n return pk.load(f)\n else:\n # create the embeddings\n print(\"Building embeddings dictionary...\")\n data = open(\"glove.6B.50d.txt\", 'r', encoding=\"utf-8\")\n embeddings = [[0] * EMBEDDING_SIZE]\n word_index_dict = {'UNK': 0} # first row is for unknown words\n index = 1\n for line in data:\n splitLine = line.split()\n word = tf.compat.as_str(splitLine[0])\n embedding = [float(val) for val in splitLine[1:]]\n embeddings.append(embedding)\n word_index_dict[word] = index\n index += 1\n data.close()\n\n # pickle them\n with open('./embeddings.pkl', 'wb') as f:\n print(\"Creating local embeddings pickle for faster loading...\")\n # Pickle the 'data' dictionary using the highest protocol available.\n pk.dump((embeddings, word_index_dict), f, pk.HIGHEST_PROTOCOL)\n\n return embeddings, word_index_dict",
"def loadEmbModel(embFile, logger):\n logger.info(\"Loading Embedding Model\")\n f = open(embFile,'r')\n model = {}\n v = []\n for line in f:\n splitLine = line.split(' ')\n word = splitLine[0]\n try:\n embedding = np.array([float(val) for val in splitLine[1:]])\n except:\n logger.info(len(v), line)\n model[word] = embedding\n v.append(embedding)\n mean = np.array(v).mean(0)\n logger.info(mean.shape)\n model['<unk>'] = torch.tensor(mean)\n model['<pad>'] = torch.zeros(embedding.shape)\n model['<start>'] = torch.zeros(embedding.shape)\n model['<end>'] = torch.zeros(embedding.shape)\n logger.info(\"Done.\",len(model),\" words loaded!\")\n return model"
]
| [
"0.76154107",
"0.7154327",
"0.6839435",
"0.6778559",
"0.6625631",
"0.6484582",
"0.6457826",
"0.64485055",
"0.6439924",
"0.6389491",
"0.63035595",
"0.62795806",
"0.62589514",
"0.624841",
"0.62045693",
"0.62018406",
"0.6198423",
"0.6192771",
"0.6180604",
"0.61795884",
"0.61667",
"0.6162144",
"0.6149238",
"0.6127886",
"0.61268365",
"0.6115558",
"0.61119956",
"0.6103608",
"0.60782295",
"0.607809"
]
| 0.76650494 | 0 |
Function creating the Emojifyv2 model's graph. | def Emojify_V2(input_shape, word_to_vec_map, word_to_index):
### START CODE HERE ###
# Define sentence_indices as the input of the graph.
# It should be of shape input_shape and dtype 'int32' (as it contains indices, which are integers).
sentence_indices = Input(shape = input_shape, dtype = 'int32')
# Create the embedding layer pretrained with GloVe Vectors (≈1 line)
# def pretrained_embedding_layer(word_to_vec_map, word_to_index): # return embedding_layer
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
# Propagate sentence_indices through your embedding layer
# (See additional hints in the instructions).
embeddings = embedding_layer(sentence_indices)
# Propagate the embeddings through an LSTM layer with 128-dimensional hidden state
# The returned output should be a batch of sequences.
X = LSTM(units = 128, return_sequences = True)(embeddings)
# Add dropout with a probability of 0.5
X = Dropout(rate = 0.5)(X)
# Propagate X trough another LSTM layer with 128-dimensional hidden state
# The returned output should be a single hidden state, not a batch of sequences.
X = LSTM(units = 128, return_sequences = False)(X)
# Add dropout with a probability of 0.5
X = Dropout(rate = 0.5)(X)
# Propagate X through a Dense layer with 5 units
X = Dense(units = 5)(X)
# Add a softmax activation
X = Activation(activation = 'softmax')(X)
# Create Model instance which converts sentence_indices into X.
model = Model(inputs = sentence_indices, outputs = X)
### END CODE HERE ###
return model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generateGraph(mids, chaptersField, labelsField):\n output = \"digraph G { \\n\"\n # On ne traite que les chapitres qui ont actives le graphe\n chapts = chapters.graphChapters()\n # le dico nodes contient une liste pour chaque chapitre. Chaque liste\n # contient tous les neuds (un par note) presents dans ce chapitre, et\n # representes par des tuples (noteId, label)\n nodes = {}\n for mid in mids:\n chapterField = chaptersField[mid]\n labelField = labelsField[mid]\n for id, flds in mw.col.db.execute(\"\"\"\n SELECT id, flds FROM notes WHERE mid=%d\n \"\"\" % mid):\n fields = splitFields(flds)\n chapter = fields[chapterField]\n if not chapter in chapts:\n continue\n label = fields[labelField]\n if(not chapter in nodes):\n nodes[chapter] = []\n nodes[chapter].append((id, label))\n # On genere les noeuds, dans des clusters (un par chapitre)\n notes = []\n for chap in nodes:\n output += \"\"\"subgraph cluster_%d {\n node [style=filled];\n label = \"%s\";\n color=blue;\n \"\"\" % (chapts[chap], chap)\n for n in nodes[chap]:\n output += \"\"\"n%d [label=\"%s\", URL=\"%d\"];\\n\"\"\" % (n[0], n[1], n[0])\n notes.append(n)\n output += \"\"\"\n }\\n\"\"\"\n # Puis on ajoute tous les liens ..\n for n in notes:\n for nid in mw.col.db.execute(\"\"\"SELECT N.noteId FROM `PATH.links` AS L\n JOIN `PATH.match` AS M ON M.id = L.matchId\n JOIN `PATH.nodes` AS N ON M.nodeId = N.id\n WHERE L.noteId = %d\"\"\" % (n[0])):\n output += \"\"\"n%d -> n%d;\\n\"\"\" % (nid[0], n[0])\n output += \"}\"\n generateGraphImage(output)",
"def gen_graph(self):",
"def __create_graph(self):\n # create the nodes\n for h in range(self.height):\n row: List[JuncNode] = list()\n for w in range(self.width):\n jnodes: List[Node] = [self.add_node() for _ in range(4)]\n jn = JuncNode(jnodes, (h, w))\n row.append(jn)\n self.__juncs.append(row)\n # create all connections\n self.__create_connections()",
"def create_styled_dot_file(user_list, edge_list):\n \n NODE = '$id [label=< <table border=\"0\" cellborder=\"0\" cellspacing=\"0\"'\\\n ' bgcolor=\"#CCCCCC\"> <tr> <td colspan=\"2\" cellpadding=\"2\"'\\\n ' align=\"center\" bgcolor=\"#33CCFF\"> <font face=\"Helvetica Bold\">'\\\n '$id</font> </td> </tr> $rows </table> >]'\n ATTRIBUTE = '<tr> <td align=\"left\" cellpadding=\"2\"><font face=\"Helvetica'\\\n ' Bold\">$key</font></td> <td align=\"left\" cellpadding=\"2\">$value'\\\n '</td> </tr>'\n node = Template(NODE)\n attribute = Template(ATTRIBUTE)\n nodes = '\\n'.join([node.substitute(id=u,\n rows='\\n'.join([attribute.substitute(key=k, value=v) for k, v in\n d.iteritems()])) for u, d in user_list.iteritems()])\n edges = ' '.join(['%s -> %s;'%(src, tgt) for src, tgt in edge_list])\n graph = 'digraph G { node [ fontname = \"Helvetica\" fontsize = 8 shape ='\\\n ' \"plaintext\" ] %s %s }'%(nodes, edges)\n return graph",
"def build_edges(self):\n print(\"Constructing Edges.\")\n # -----------------------------------------\n # TODO: You should write this method!\n\n # Note: this method may take some time to run - it is likely to be O(N^2), and some lists have N = 10,000 words or more.\n # (I've had students decide that their program was \"broken\" and quit it before this process finished... every time,\n # not realizing that the program was working hard behind the scenes.)\n # I recommend that you keep track of the number of edges you have added, and if it is a multiple of 1000, print\n # something so that you know your program is making progress.\n n = len(self.vertices)\n\n\n\n \n # -----------------------------------------\n print(\"Done Constructing Edges.\\n------------------------------------\")",
"def generate(self, diagram):",
"def create_graph():\n alpha = request.args.get('alpha')\n alpha = float(alpha)\n beta = request.args.get('beta')\n beta = float(beta)\n pec = request.args.get('pec')\n pec = float(pec)\n q = request.args.get('q')\n q = float(q)\n name = request.args.get('instance')\n name = str(name)\n\n nodes = create_nodes(name)\n i = Instance(nodes, alpha, beta, pec, q)\n\n return jsonify(nodes=i.nodes, alpha=i.alpha, beta=i.beta, decay=i.decay,\n min_pheromone=i.min_pheromone, q=i.q,\n local_deposit=i.local_deposit, distances=i.distances,\n pheromones=i.pheromones, ants=i.ants, shortest_path=i.shortest_path,\n min_distance=i.min_distance, message=\"Instance Initialised\")",
"def graph():\n return jsonify(app.config[\"jsonified\"])",
"def _construct_graph(self):\n raise NotImplementedError",
"def create_graph(self, ctx: Context, graph_input: str) -> DefaultEmbed:\n with TemporaryFile(suffix=\".png\") as buffer:\n with Graph(ctx, buffer, *self.calculate(graph_input)) as embed:\n return embed",
"def _build_graph(self):\n self._setup_placeholders()\n self._embed()\n self.p_emb = tf.concat([self.p_emb, tf.expand_dims(self.em, -1)], -1)\n self._encode()\n self._match()\n self._fuse()\n\n with tf.variable_scope('boundary'):\n self._decode()\n with tf.variable_scope('content'):\n self._content()\n with tf.variable_scope('verif'):\n self._verify()\n\n self._compute_loss()",
"def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()",
"def _build_graph(self):\n pass",
"def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()",
"def _make_metagraph(self):\n\n print('Initializing metagraph...')\n time.sleep(0.5)\n abbrev_dict, edge_tuples = gt.get_abbrev_dict_and_edge_tuples(self.node_df, self.edge_df)\n self.metagraph = MetaGraph.from_edge_tuples(edge_tuples, abbrev_dict)",
"def visualize_epidemic(vnetwork, net_attrs):\n # Colour nodes\n colours = {}\n for node, attrs in net_attrs.items():\n if attrs[1]['i']:\n n_colour = 'r'\n elif attrs[1]['s']:\n n_colour = 'g'\n else:\n n_colour = 'b'\n colours[attrs[0]] = n_colour\n draw(vnetwork, nodeColorDict=colours, show=False, layout=\"circular\")\n plt.pause(2)\n return None",
"def makeGraph(self):\n self.floorGraph = graph.Graph()\n file = open(\"edges.csv\")\n edges = file.readlines()\n for edge in edges:\n params = edge.split(\",\")\n self.floorGraph.addEdge(params[0],params[1],float(params[2]))\n self.floorGraph.addEdge(params[1],params[0],float(params[2]))",
"def plotModel(self, name):\n g = Digraph('G', filename = name + '.gv')\n\n for prevChord in self.chain:\n for chord in self.chain[prevChord]:\n g.edge(prevChord, chord, label=\"%.2f\" % self.chain[prevChord][chord])\n\n g.view()",
"def export_model_description(md: ModelDescription) -> bytes:\n\n # ---------------- write model description -------------------\n\n fmd = ET.Element(\"fmiModelDescription\")\n fmd.set(\"fmiVersion\", \"2.0\")\n fmd.set(\"modelName\", md.modelName)\n fmd.set(\"guid\", md.guid)\n fmd.set(\"author\", md.author)\n fmd.set(\"generationDateAndTime\", md.generationDateAndTime)\n fmd.set(\"variableNamingConvention\", md.variableNamingConvention)\n fmd.set(\"generationTool\", md.generationTool)\n fmd.set(\"description\", md.description)\n\n # CoSimulation\n cs = ET.SubElement(fmd, \"CoSimulation\")\n cs.set(\"modelIdentifier\", md.CoSimulation.modelIdentifier)\n cs.set(\n \"needsExecutionTool\", str(md.CoSimulation.needsExecutionTool).lower(),\n )\n cs.set(\n \"canHandleVariableCommunicationStepSize\",\n str(md.CoSimulation.canHandleVariableCommunicationStepSize).lower(),\n )\n cs.set(\n \"canInterpolateInputs\", str(md.CoSimulation.canInterpolateInputs).lower(),\n )\n\n cs.set(\n \"maxOutputDerivativeOrder\", str(md.CoSimulation.maxOutputDerivativeOrder),\n )\n cs.set(\n \"canRunAsynchronuously\", str(md.CoSimulation.canRunAsynchronuously).lower(),\n )\n cs.set(\n \"canBeInstantiatedOnlyOncePerProcess\",\n str(md.CoSimulation.canBeInstantiatedOnlyOncePerProcess).lower(),\n )\n cs.set(\n \"canNotUseMemoryManagementFunctions\",\n str(md.CoSimulation.canNotUseMemoryManagementFunctions).lower(),\n )\n cs.set(\n \"canGetAndSetFMUstate\", str(md.CoSimulation.canGetAndSetFMUstate).lower(),\n )\n cs.set(\n \"canSerializeFMUstate\", str(md.CoSimulation.canSerializeFMUstate).lower(),\n )\n cs.set(\n \"providesDirectionalDerivative\",\n str(md.CoSimulation.providesDirectionalDerivative).lower(),\n )\n\n # 2.2.4 p.42) Log categories:\n cs = ET.SubElement(fmd, \"LogCategories\")\n for ac in md.logCategories:\n c = ET.SubElement(cs, \"Category\")\n c.set(\"name\", ac)\n\n # 2.2.7 p.47) ModelVariables\n mvs = ET.SubElement(fmd, \"ModelVariables\")\n\n variable_index = 0\n\n for var in md.modelVariables:\n var.variability\n value_reference = str(var.value_reference)\n\n idx_comment = ET.Comment(f'Index of variable = \"{variable_index + 1}\"')\n mvs.append(idx_comment)\n sv = ET.SubElement(mvs, \"ScalarVariable\")\n sv.set(\"name\", var.name)\n sv.set(\"valueReference\", value_reference)\n sv.set(\"variability\", var.variability)\n sv.set(\"causality\", var.causality)\n\n if var.description:\n sv.set(\"description\", var.description)\n\n if var.initial:\n i = var.initial\n sv.set(\"initial\", i)\n\n val = ET.SubElement(sv, var.dataType)\n\n # 2.2.7. p.48) start values\n if var.initial in {\"exact\", \"approx\"} or var.causality == \"input\":\n assert (\n var.start != None\n ), \"a start value must be defined for intial ∈ {exact, approx}\"\n val.set(\"start\", var.start)\n\n variable_index += 1\n\n ms = ET.SubElement(fmd, \"ModelStructure\")\n\n # 2.2.8) For each output we must declare 'Outputs' and 'InitialUnknowns'\n outputs = [\n (idx + 1, o)\n for idx, o in enumerate(md.modelVariables)\n if o.causality == \"output\"\n ]\n\n if outputs:\n os = ET.SubElement(ms, \"Outputs\")\n for idx, o in outputs:\n ET.SubElement(os, \"Unknown\", {\"index\": str(idx), \"dependencies\": \"\"})\n\n os = ET.SubElement(ms, \"InitialUnknowns\")\n for idx, o in outputs:\n ET.SubElement(os, \"Unknown\", {\"index\": str(idx), \"dependencies\": \"\"})\n\n # FMI requires encoding to be encoded as UTF-8 and contain a header:\n #\n # See 2.2 p.28\n return ET.tostring(fmd, pretty_print=True, encoding=\"utf-8\", xml_declaration=True)",
"def add_graph(self, model, image_size):\n dummy_input = torch.rand(2, 1, image_size, image_size)\n self.writer.add_graph(model, dummy_input, True)",
"def genCode(self, fileName, allowedTypes, genGraph = 1, isRootNode = 0, \r\n metaModelName = None, export = 0, newTypes = None, \r\n nodesToGenList = [], openModelStringList=[], attrGenFix=False):\r\n file = open(fileName, \"w+t\" )\r\n\r\n dir, fil = os.path.split(fileName)\r\n funcName = string.split (fil, \".\")\t\t\t\t\t# compose class name\r\n\r\n if export == 0:\r\n file.write('\"\"\"\\n')\r\n file.write(\"__\"+ fil +\"_____________________________________________________\\n\")\r\n file.write(\"\\n\") \r\n file.write(\"Automatically generated AToM3 Model File (Do not modify directly)\\n\")\r\n file.write(\"Author: \"+USER_NAME+\"\\n\")\r\n file.write(\"Modified: \"+time.asctime()+\"\\n\") \r\n file.write(\"__\"+ len(fil)*\"_\" +\"_____________________________________________________\\n\")\r\n file.write('\"\"\"\\n')\r\n #file.write('from graph_ASG_ERmetaMetaModel import *\\n')\t\t# just for the case!\r\n file.write('from stickylink import *\\n')\t\t\t\t# necessary if we describe some graphLinks...\r\n file.write('from widthXfillXdecoration import *\\n')\t\t\t# necessary if we describe some graphLinks...\r\n\r\n # import the subclass ...\r\n if( self.getClass() not in self.nodeTypes ):\r\n file.write('from '+self.getClass()+' import *\\n')\r\n \r\n # import all the node types...\r\n for nodetype in self.nodeTypes:\r\n if( self.listNodes[nodetype] != [] ): \r\n file.write('from '+nodetype+' import *\\n') \r\n \r\n # Import all the graphical appearences of the node types... that\r\n # are actually used! \r\n # Added by Denis Dube, last modified on Sept. 9, 2004\r\n if( genGraph ): \r\n # STEP 1: Find all graphObjects used in the model\r\n graph_objectDict = dict()\r\n for nodetype in self.listNodes.keys():\r\n for node in self.listNodes[nodetype]:\r\n if( node.graphClass_ ):\r\n graph_objectDict[ node.graphObject_.getGraphClassName() ]=1\r\n # STEP 2: Create the import statements for each graphObject\r\n for graphObject in graph_objectDict.keys():\r\n file.write('from '+graphObject+' import *\\n')\r\n # NOTE: I think the next two statements are caution overkill...\r\n #file.write('try: from '+graphObject+' import *\\n')\r\n #file.write('except: print \"WARNING: unable to load the graphical appearence file: '+graphObject+'.py\" \\n')\r\n \r\n # import the basic types...\r\n for typ in allowedTypes.keys():\r\n typeInstance, params = allowedTypes[typ]\r\n typeName = typeInstance.__name__\r\n file.write('from '+typeName+' import *\\n')\r\n \r\n # Generate the ASG constructor\r\n if( attrGenFix ):\r\n self.__genASGconstructor( file, funcName ) \r\n else:\r\n # Old way\r\n file.write('\\ndef '+funcName[0]+'(self, rootNode):\\n')\r\n \r\n # Generate code for the ASGroot attributes\r\n if( isRootNode ): \r\n # Should attrGenFix be always true? More testing required\r\n #todo: attrGenFix == True always?\r\n if( attrGenFix ): self.__genAttributesROOT( file )\r\n else: self.genAttributesCode(file, genGraph, \"rootNode\")\r\n\r\n self.writeGraph2File(file, genGraph, isRootNode, None, \" \", 1, funcName[0], nodesToGenList=nodesToGenList)\r\n\r\n # generate code for the sub-models\r\n counter = 0\r\n if( not nodesToGenList ):\r\n for nodetype in self.nodeTypes:\r\n for node in self.listNodes[nodetype]: \r\n newFile = funcName[0]+str(counter)\r\n res = node.genCode(os.path.join(dir, newFile+'.py'), allowedTypes, genGraph, 0)\r\n counter = counter + 1\r\n else: \r\n for node in nodesToGenList:\r\n newFile = funcName[0]+str(counter)\r\n res = node.genCode(os.path.join(dir, newFile+'.py'), allowedTypes, genGraph, 0)\r\n counter = counter + 1\r\n \r\n\r\n if isRootNode:\r\n hierarchical = self.isHierarchical()\r\n if export == 0:\r\n if hierarchical:\r\n file.write('def main'+funcName[0]+'(self, ASGroot):\\n')\r\n # file.write(' self.ASGroot = '+self.getClass()+'(self)\\n')\r\n file.write(' self.'+funcName[0]+'(self, ASGroot)\\n\\n')\r\n file.write(' self.'+funcName[0]+'_connections(self, ASGroot)\\n\\n')\r\n file.write('newfunction = main'+funcName[0]+'\\n\\n')\r\n else:\r\n file.write('newfunction = '+funcName[0]+'\\n\\n')\r\n if newTypes and len(newTypes)>0: # generate a list of newly added types\r\n file.write('loadedTypes = [')\r\n counter = 0\r\n for nt in newTypes:\r\n if counter > 0: file.write(',')\r\n file.write(str(nt))\r\n counter = counter + 1\r\n file.write(']\\n')\r\n \r\n self.genLoadedMMName( file )\r\n if( attrGenFix ): file.write( '\\natom3version = \\'0.3\\'\\n' )\r\n file.close()\r\n return funcName[0] \t\t\t\t# this indicates that we've done something\r",
"def create_graph(self, modelDir):\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\n modelDir, 'classify_image_graph_def.pb'), 'rb') as f:\n \tgraph_def = tf.GraphDef()\n \tgraph_def.ParseFromString(f.read())\n \t_ = tf.import_graph_def(graph_def, name='')",
"def fig4():\n # fmt: off\n tpm = np.array([\n [0, 0, 0],\n [0, 0, 1],\n [1, 0, 1],\n [1, 0, 0],\n [1, 0, 0],\n [1, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n ])\n cm = np.array([\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])",
"def draw(self, model):\n graph = model.graph\n ants = model.ants\n sugar = model.sugar\n nest = model.nest\n\n colors = {node: \"y\" for node in graph.nodes}\n colors[nest] = \"b\"\n colors[sugar] = \"r\"\n for ant in ants:\n colors[ant.position] = \"k\"\n\n weights = [graph[u][v][\"weight\"] / 5 for u, v in graph.edges()]\n super().draw(graph, node_color=colors.values(), width=weights)#, arrows=True)",
"def create_graph(names_input_e, names_input_t, names_output_e, names_byproduct, name_referenceproduct,\r\n activity_geoshort_name):\r\n\r\n # create element for dot language\r\n graph = gr.Digraph(comment='', format='pdf')\r\n graph.graph_attr.update(rankdir='LR') # general order of nodes from left to right\r\n\r\n # create subgraphs left, center, right + corresponding edges\r\n # create subgraph in the center\r\n with graph.subgraph(name='center') as c:\r\n c.graph_attr.update(rank='same') # set all nodes to same rank\r\n # create activity node\r\n c.node('Activity', activity_geoshort_name, shape='box', color='white', fillcolor='lightsteelblue', style='filled')\r\n\r\n # create inputs from environment nodes + edges to activity node\r\n for i, name in enumerate(names_input_e):\r\n c.node('InputE_' + str(i), name, shape='box', color='white', fillcolor='snow', style='filled') # node\r\n graph.edge('Activity', 'InputE_' + str(i),\r\n dir='back') # edge to activity (edge is defined inverse, such that the input nodes appear _below_ the product node)\r\n\r\n # create outputs to environment nodes + edges from activity node\r\n for i, name in enumerate(names_output_e):\r\n c.node('OutputE_' + str(i), name, shape='box', color='white', fillcolor='snow', style='filled')\r\n graph.edge('OutputE_' + str(i), 'Activity',\r\n dir='back') # edge to activity (edge is defined inverse, such that the output nodes appear _above_ the product node)\r\n\r\n # create subgraph on the left (inputs from technosphere)\r\n with graph.subgraph(name='input_technosphere') as it:\r\n it.graph_attr.update(rank='min') # set all nodes to minimum rank -> left\r\n for i, name in enumerate(names_input_t):\r\n it.node('InputT_' + str(i), name, shape='box', color='white', fillcolor='ghostwhite', style='filled') # node\r\n graph.edge('InputT_' + str(i), 'Activity') # edge to activity\r\n\r\n # create subgraph on the right (ref. product and byproducts)\r\n with graph.subgraph(name='product_subgraph') as p:\r\n p.graph_attr.update(rank='max') # set all nodes to max rank -> right\r\n p.node('Product', name_referenceproduct, shape='box', color='white', fillcolor='ghostwhite', style='filled') # ref. product node\r\n graph.edge('Activity', 'Product') # edge activits to product\r\n\r\n # output of by products\r\n for i, name in enumerate(names_byproduct):\r\n p.node('OutputT_' + str(i), name, shape='box', color='white', fillcolor='ghostwhite', style='filled') # node\r\n graph.edge('Activity', 'OutputT_' + str(i)) # edge\r\n\r\n return graph",
"def model_architecture_to_file(model, save_path, show_shapes=True):\n plot_model(model, to_file=save_path + \"_model_architecture.png\", show_shapes=show_shapes)",
"def make_2axis_graph():\n d = curdoc()\n _remove_fig(d)\n graph_val = d.get_model_by_name(GRAPH_SELECTION).value\n model_id, message_name, _ = run_handlers.get_modelid_messagename_type(d)\n\n xval = d.get_model_by_name(X_AXIS_SELECTION).value\n yval = d.get_model_by_name(Y_AXIS_SELECTION).value\n\n if xval != DEFAULT_UNSELECTED and yval != DEFAULT_UNSELECTED:\n plot = figure(plot_width=400, plot_height=400, name=FIGURE_MODEL)\n sind = run_handlers.get_source_index(d.session_context.id, model_id, message_name)\n _install_callback_and_cds(sind, model_id, message_name, stream_limit=100000)\n\n # get the field name back from the pretty field : meta string formed above\n x = xval.split(\" :\")[0]\n y = yval.split(\" :\")[0]\n\n if graph_val == \"line\":\n plot.line(x=x, y=y, color=\"firebrick\", line_width=2, source=d.get_model_by_name(sind))\n plot.x_range.follow = \"end\" # don't jam all the data into the graph; \"window\" it\n plot.x_range.follow_interval = 100\n plot.x_range.range_padding = 0\n if graph_val == \"scatter\":\n plot.cross(x=x, y=y, size=20, color=\"firebrick\", line_width=2, source=d.get_model_by_name(sind))\n if graph_val == \"step\":\n plot.step(x=x, y=y, color=\"#FB8072\", source=d.get_model_by_name(sind))\n\n d.add_root(plot)",
"def convertOFFtoELENODE( offname ):\n with open(offname, \"r\") as OFF:\n OFFLines = OFF.readlines()\n\n OFFData = []\n for line in OFFLines:\n OFFData.append(line.split())\n \n numVertices = int(OFFData[1][0])\n numFaces = int(OFFData[1][1])\n numPerFace = int(OFFData[2+numVertices+1][0])\n\n outname = offname.split(\".\")[0] #To name the output files\n\n with open( outname + \".ele\", \"w\") as ELE:\n ELE.write( \"{}\\t{}\\t0\\n\".format(numFaces, numPerFace)) #Placing the number of elements, and taking the number of vertices in an element from the first element that appears in the off\n \n for i in range(2 + numVertices, 2 + numVertices + numFaces):\n temp = []\n for j in range( 1, 1+numPerFace):\n temp.append( int(OFFData[i][j]) + 1 )\n\n template = \"{}\\t\" + \"{}\\t\"*numPerFace + \"\\n\"\n ELE.write( template.format( i-numVertices-1, *temp))\n\n with open( outname + \".node\", \"w\") as NODE:\n NODE.write( \"{}\\t2\\t0\\t0\\n\".format(numVertices)) #Placing the number of elements, and taking the number of vertices in an element from the first element that appears in the off\n \n for i in range(2, 2 + numVertices):\n\n template = \"{}\\t{}\\t{}\\n\"\n NODE.write( template.format( i-1, *OFFData[i]))\n \n return",
"def build_graph(self):\n pass",
"def createEdge(lines, list):\n res = lines.split('\\\\n')\n mains = res[0].split(' ')\n sid = mains[3]\n sid = sid[4:-1]\n ssource = mains[4]\n ssource = ssource[8:-1]\n starget = mains[5]\n starget = starget[8:-2]\n slabel = ''\n i = 2\n\n while ('key=' in res[i]):\n i = i + 1\n\n if ('EdgeLabel' in res[i + 4]):\n slabels = res[i + 4].split('>')\n slabel = slabels[1]\n slabel = slabel.split('<')[0]\n slabel = umlautHelper(slabel)\n\n source = findInList(ssource, list)\n target = findInList(starget, list)\n\n nline = Edge(sid, source, target)\n nline.setLabel(slabel)\n\n j = i + 1\n while ('Path' in res[j] or 'Point' in res[j]):\n j = j + 1\n\n allarrows = res[j + 1]\n if ('source=\"standard' in allarrows or 'source=\"delta' in allarrows):\n nline.setArrowSource(True)\n if ('target=\"standard' in allarrows or 'target=\"delta' in allarrows):\n nline.setArrowTarget(True)\n\n if (type(source) == Entity and type(target) == Attribute):\n source.addAttribute(target)\n if (type(target) == Entity and type(source) == Attribute):\n target.addAttribute(source)\n if (type(source) == Relation and type(target) == Attribute):\n source.addAttribute(target)\n if (type(target) == Relation and type(source) == Attribute):\n target.addAttribute(source)\n list.append(nline)"
]
| [
"0.5534107",
"0.5258285",
"0.515239",
"0.5108124",
"0.5100486",
"0.509315",
"0.49832487",
"0.49781877",
"0.4977967",
"0.49577668",
"0.49575582",
"0.49375737",
"0.49323317",
"0.49096295",
"0.48911446",
"0.4890061",
"0.48886243",
"0.48797578",
"0.48782152",
"0.4875987",
"0.4868809",
"0.48595175",
"0.48562187",
"0.48501515",
"0.48335192",
"0.483084",
"0.482899",
"0.48273093",
"0.48215553",
"0.48190585"
]
| 0.58188945 | 0 |
Attempt to import a module, with a fallback. Attempt to import ``name``. If it fails, return ``alternative``. When supporting multiple versions of Python or optional dependencies, it is useful to be able to try to import a module. | def try_import(name, alternative=None, error_callback=None):
module_segments = name.split('.')
last_error = None
remainder = []
# module_name will be what successfully imports. We cannot walk from the
# __import__ result because in import loops (A imports A.B, which imports
# C, which calls try_import("A.B")) A.B will not yet be set.
while module_segments:
module_name = '.'.join(module_segments)
try:
__import__(module_name)
except ImportError:
last_error = sys.exc_info()[1]
remainder.append(module_segments.pop())
continue
else:
break
else:
if last_error is not None and error_callback is not None:
error_callback(last_error)
return alternative
module = sys.modules[module_name]
nonexistent = object()
for segment in reversed(remainder):
module = getattr(module, segment, nonexistent)
if module is nonexistent:
if last_error is not None and error_callback is not None:
error_callback(last_error)
return alternative
return module | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def try_import(module, default=None):\n try:\n return importlib.import_module(module)\n except ImportError:\n return default",
"def try_import(import_str, default=None):\r\n try:\r\n return import_module(import_str)\r\n except ImportError:\r\n return default",
"def import_module(name) -> Optional[ModuleType]:\n\n try:\n return importlib.import_module(name)\n except ModuleNotFoundError:\n return None",
"def resolve_name(name, *additional_parts):\n additional_parts = \".\".join(additional_parts)\n\n if additional_parts:\n name = name + \".\" + additional_parts\n\n parts = name.split(\".\")\n\n if len(parts) == 1:\n # No dots in the name--just a straight up module import\n cursor = 1\n fromlist = []\n else:\n cursor = len(parts) - 1\n fromlist = [parts[-1]]\n\n module_name = parts[:cursor]\n\n while cursor > 0:\n try:\n ret = __import__(\".\".join(module_name), fromlist=fromlist)\n break\n except ImportError:\n if cursor == 0:\n raise\n cursor -= 1\n module_name = parts[:cursor]\n fromlist = [parts[cursor]]\n ret = \"\"\n\n for part in parts[cursor:]:\n try:\n ret = getattr(ret, part)\n except AttributeError:\n raise ImportError(name)\n\n return ret",
"def resolve_name(name):\n parts = name.split('.')\n cursor = len(parts)\n module_name, rest = parts[:cursor], parts[cursor:]\n\n while cursor > 0:\n try:\n ret = __import__('.'.join(module_name))\n break\n except ImportError:\n if cursor == 0:\n raise\n cursor -= 1\n module_name = parts[:cursor]\n rest = parts[cursor:]\n ret = ''\n\n for part in parts[1:]:\n try:\n ret = getattr(ret, part)\n except AttributeError:\n raise ImportError\n\n return ret",
"def import_module(name):\n testname = None\n try:\n package = __import__(name)\n except ImportError, e:\n parts = name.rsplit('/', 1)\n if len(parts) != 2:\n return None, None, None\n try:\n package, temp, testname = import_module(parts[0])\n except ImportError:\n raise ImportError('Unable to import %s' % name)\n if testname:\n testname = '.'.join((testname, parts[1]))\n else:\n testname = parts[1]\n return package, package.__name__, testname",
"def resolve_import(self, item):\n name = item.name\n # The last part in `from a.b.c import d` might be a symbol rather than a\n # module, so we try a.b.c and a.b.c.d as names.\n short_name = None\n if item.is_from and not item.is_star:\n if '.' in name.lstrip('.'):\n # The name is something like `a.b.c`, so strip off `.c`.\n rindex = name.rfind('.')\n else:\n # The name is something like `..c`, so strip off just `c`.\n rindex = name.rfind('.') + 1\n short_name = name[:rindex]\n\n if import_finder.is_builtin(name):\n filename = name + '.so'\n return Builtin(filename, name)\n\n filename, level = convert_to_path(name)\n if level:\n # This is a relative import; we need to resolve the filename\n # relative to the importing file path.\n filename = os.path.normpath(\n os.path.join(self.current_directory, filename))\n\n if not short_name:\n try_filename = True\n try_short_filename = False\n elif item.source:\n # If the import has a source path, we can use it to eliminate\n # filenames that don't match.\n source_filename, _ = os.path.splitext(item.source)\n dirname, basename = os.path.split(source_filename)\n if basename == \"__init__\":\n source_filename = dirname\n try_filename = source_filename.endswith(filename)\n try_short_filename = not try_filename\n else:\n try_filename = try_short_filename = True\n\n files = []\n if try_filename:\n files.append((name, filename))\n if try_short_filename:\n short_filename = os.path.dirname(filename)\n files.append((short_name, short_filename))\n\n for module_name, path in files:\n for fs in self.fs_path:\n f = self._find_file(fs, path)\n if not f or f == self.current_module.path:\n # We cannot import a file from itself.\n continue\n if item.is_relative():\n package_name = self.current_module.package_name\n if package_name is None:\n # Relative import in non-package\n raise ImportException(name)\n module_name = get_absolute_name(package_name, module_name)\n if isinstance(self.current_module, System):\n return System(f, module_name)\n return Local(f, module_name, fs)\n\n # If the module isn't found in the explicit pythonpath, see if python\n # itself resolved it.\n if item.source:\n prefix, ext = os.path.splitext(item.source)\n mod_name = name\n # We need to check for importing a symbol here too.\n if short_name:\n mod = prefix.replace(os.path.sep, '.')\n mod = utils.strip_suffix(mod, '.__init__')\n if not mod.endswith(name) and mod.endswith(short_name):\n mod_name = short_name\n\n if ext == '.pyc':\n pyfile = prefix + '.py'\n if os.path.exists(pyfile):\n return System(pyfile, mod_name)\n elif not ext:\n pyfile = os.path.join(prefix, \"__init__.py\")\n if os.path.exists(pyfile):\n return System(pyfile, mod_name)\n return System(item.source, mod_name)\n\n raise ImportException(name)",
"def import_item(name):\n if sys.version_info < (3,):\n if not isinstance(name, bytes):\n name = name.encode()\n parts = name.rsplit('.', 1)\n if len(parts) == 2:\n # called with 'foo.bar....'\n package, obj = parts\n module = __import__(package, fromlist=[obj])\n try:\n pak = getattr(module, obj)\n except AttributeError:\n raise ImportError('No module named %s' % obj)\n return pak\n else:\n # called with un-dotted string\n return __import__(parts[0])",
"def _import(self, module, name):\n try:\n return getattr(__import__(module, fromlist=[name]), name)\n except (AttributeError, ImportError):\n msg = \"Failed to load %s from %s: %s\" % (name, module,\n sys.exc_info()[1])\n if not self.fail_silently:\n print(msg)\n else:\n _debug(msg)\n return None",
"def get_for_name(self, name: str, settings) -> ImportStrategy:\n if _is_dotted_path(name):\n module, type_name = name.rsplit(\".\", maxsplit=1)\n if type_name in self.local_types:\n local_module = self.local_types[type_name]\n if module == local_module:\n # `module == local_module` means an exact match in imports\n # i.e. from <package match> import <name match>\n return ImportStrategy.USE_EXISTING\n elif local_module is None:\n # `local_module is None` means local ClassDef\n # if there is a local ClassDef and type has dotted path then\n # maybe it was intended to disambiguate from the local cls?\n if settings.IMPORT_COLLISION_POLICY is ImportCollisionPolicy.IMPORT:\n # the name was maybe already in scope but it's safe\n # to add a specific import as well\n return ImportStrategy.ADD_DOTTED\n else:\n # TODO in theory we could probably calculate the absolute\n # import from filename + relative path, but it's awkward\n raise NameMatchesLocalClassError(module, type_name)\n elif local_module.startswith(\".\"):\n # Relative import: \"can't tell\"\n # we have a full path so we could add an import\n # but it may be duplicating something already imported\n if settings.IMPORT_COLLISION_POLICY is ImportCollisionPolicy.IMPORT:\n # the name was maybe already in scope but it's safe\n # to add a specific import as well\n return ImportStrategy.ADD_DOTTED\n else:\n # TODO in theory we could probably calculate the absolute\n # import from filename + relative path, but it's awkward\n raise NameMatchesRelativeImportError(module, type_name)\n else:\n # \"looks like different path\"\n return ImportStrategy.ADD_DOTTED\n else:\n # handle * imports? we could assume `name` is imported\n # if `from module import *` is present... BUT:\n # if `type_name.startswith(\"_\")` it would be exempt\n # and `__all__` could break both of these assumptions\n # So... we treat any matching * import as AMBIGUOUS\n if module in self.local_types.star_imports:\n if settings.IMPORT_COLLISION_POLICY is ImportCollisionPolicy.IMPORT:\n # the name was maybe already in scope but it's safe\n # to add a specific import as well\n return ImportStrategy.ADD_FROM\n else:\n raise ModuleHasStarImportError(module, type_name)\n elif module in self.local_types.type_defs:\n if settings.IMPORT_COLLISION_POLICY is ImportCollisionPolicy.IMPORT:\n # the name was maybe already in scope but it's safe\n # to add a specific import as well\n return ImportStrategy.ADD_FROM\n else:\n raise NameMatchesLocalClassError(module, name)\n elif module in self.local_types.package_imports:\n return ImportStrategy.USE_EXISTING_DOTTED\n elif module in self.local_types.names_to_packages:\n return ImportStrategy.USE_EXISTING_DOTTED\n else:\n return ImportStrategy.ADD_FROM\n else:\n if name == Types.ELLIPSIS:\n return ImportStrategy.USE_EXISTING\n elif name in self.local_types:\n return ImportStrategy.USE_EXISTING\n elif _is_builtin_type(name):\n return ImportStrategy.USE_EXISTING\n elif _is_typing_type(name):\n return ImportStrategy.ADD_FROM\n else:\n # there's no possibility to add an import, so no AUTO option\n raise NotFoundNoPathError(None, name)",
"def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]",
"def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]",
"def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]",
"def _import_module(name):\n __import__(name)\n return sys.modules[name]",
"def _import_module(name):\n __import__(name)\n return sys.modules[name]",
"def _import_module(self, name):\r\n try:\r\n __import__(name)\r\n return True\r\n except ImportError:\r\n return False",
"def _import(module_name, dir_name):\n\n # assign module a name that's not likely to conflict\n safe_name = 'confab.data.' + module_name\n\n # check if module is already loaded\n existing = sys.modules.get(safe_name)\n if existing:\n return existing\n\n # try to load module\n module_info = imp.find_module(module_name, [dir_name])\n module = imp.load_module(safe_name, *module_info)\n return module",
"def import_module_by_name(mod_name):\n return importlib.__import__(mod_name)",
"def __import_from(localization, member_name, module_name=\"__builtin__\"):\n module = import_python_module(localization, module_name)\n if isinstance(module, TypeError):\n return module, None\n\n try:\n return module, module.get_type_of_member(localization, member_name)\n except Exception as exc:\n return module, TypeError(localization,\n \"Could not load member '{0}' from module '{1}': {2}\".format(member_name, module_name,\n str(exc)))",
"def rtimport(name):\n\n\t# This can raise ImportError\n\tobj = __import__(name)\n\n\tcomponents = name.split('.')\n\tfor comp in components[1:]:\n\t\ttry:\n\t\t\tobj = getattr(obj, comp)\n\t\texcept AttributeError:\n\t\t\traise ImportError\n\n\treturn obj",
"def import_module(name):\n __import__(name)\n return sys.modules[name]",
"def load_module(self, name, quiet=True):\n full_name = '%s.%s' % (self.name, name)\n try:\n return import_module(full_name)\n except ImportError:\n if quiet:\n return None\n raise",
"def resolve_name(name):\n parts = name.split('.')\n used = parts.pop(0)\n found = __import__(used)\n for part in parts:\n used += '.' + part\n try:\n found = getattr(found, part)\n except AttributeError:\n __import__(used)\n found = getattr(found, part)\n return found",
"def _import_string(module_name, content):\n\n # assign module a name that's not likely to conflict\n safe_name = 'confab.data.' + module_name\n\n # check if module is already loaded\n existing = sys.modules.get(safe_name)\n if existing:\n return existing\n\n # try to load module\n module = imp.new_module(safe_name)\n exec content in module.__dict__\n return module",
"def import_and_get(name: str) -> Any:\n if '.' not in name:\n raise ImportError(f'name is not like <module>.<name>: {name}')\n module_name, attr_name = name.rsplit('.', 1)\n module = importlib.import_module(module_name)\n if not hasattr(module, attr_name):\n raise ImportError(f'name not found in module: {name}')\n return getattr(module, attr_name)",
"def import_or_none(library):\n try:\n return importlib.import_module(library)\n except ImportError:\n return None",
"def my_import(name):\n components = name.split('.')\n mod = __import__(components[0], globals(), locals(), components[1:], -1)\n for comp in components[1:]:\n mod = getattr(mod, comp)\n return mod",
"def _get_module(self, name):\n module = self._modules.get(name)\n if not module:\n module = importlib.import_module(name)\n self._modules[name] = module\n return module",
"def _tryImport(self, name):\n for path in self.cfgPath:\n filename = os.path.join(path, name + \".cfg\")\n if os.path.exists(filename):\n try:\n # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n # Need to specify SourceFileLoader since the files do not\n # have a .py extension.\n module_name = f\"{name}_cfg\"\n loader = importlib.machinery.SourceFileLoader(module_name, filename)\n spec = importlib.util.spec_from_file_location(\n module_name, filename, submodule_search_locations=None, loader=loader\n )\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n except Exception as e:\n state.log.warn(f\"Error loading configuration {filename} ({e})\")\n continue\n state.log.info(f\"Using configuration for package '{name}' at '{filename}'.\")\n if not hasattr(module, \"dependencies\") or not isinstance(module.dependencies, dict):\n state.log.warn(f\"Configuration module for package '{name}' lacks a dependencies dict.\")\n return None\n if not hasattr(module, \"config\") or not isinstance(module.config, Configuration):\n state.log.warn(f\"Configuration module for package '{name}' lacks a config object.\")\n return None\n else:\n module.config.addCustomTests(self.customTests)\n return module\n state.log.info(f\"Failed to import configuration for optional package '{name}'.\")",
"def load_module(name_or_path):\n if os.path.exists(name_or_path):\n path = name_or_path.rstrip(\"/\")\n modname = os.path.splitext(os.path.basename(path))[0]\n if os.path.isdir(path):\n path = os.path.join(path, \"__init__.py\")\n spec = importlib.util.spec_from_file_location(modname, path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n else:\n mod = importlib.import_module(name_or_path)\n try:\n path = mod.__path__[0]\n except AttributeError:\n path = mod.__file__\n return mod, path"
]
| [
"0.70256954",
"0.69698614",
"0.66042155",
"0.65385836",
"0.6399915",
"0.6266867",
"0.626313",
"0.6226527",
"0.61270225",
"0.6109838",
"0.6023738",
"0.6023738",
"0.6023738",
"0.5986579",
"0.5986579",
"0.59618664",
"0.589924",
"0.5896467",
"0.5854116",
"0.58165705",
"0.57714254",
"0.57182014",
"0.57129866",
"0.5710257",
"0.5708953",
"0.57055557",
"0.5698429",
"0.5694562",
"0.56828797",
"0.5661679"
]
| 0.7629667 | 0 |
Map ``function`` across the values of ``dictionary``. | def map_values(function, dictionary):
return {k: function(dictionary[k]) for k in dictionary} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def map_values(fun, a_dict):\n return dict((k, fun(v)) for (k, v) in a_dict.items())",
"def mutate_dict_in_place(func, mapping):\n for key, value in mapping.items():\n if isinstance(value, dict):\n mutate_dict_in_place(func, value)\n else:\n mapping[key] = func(value)",
"def filter_values(function, dictionary):\n return {k: v for k, v in dictionary.items() if function(v)}",
"def map(self, function=lambda value: value):\n for j, value in enumerate(self):\n self[j] = function(value)",
"def map_values_c(fun):\n return partial(map_values, fun)",
"def f(map, key):\n def decorator(function):\n map[key] = function\n return function\n return decorator",
"def iterate(d, fun): # type: (Dict, Callable[[Any, Any], None]) -> None\n for key, value in d.items():\n if isinstance(value, dict):\n DictUtil.iterate(value, fun)\n else:\n fun(key, value)",
"def map_dictionary(dictionary):\n mapping = dict()\n\n def add_item_to_mapping(path, k, v):\n mapping.update({join(path, k): v})\n\n loop_dictionary(\n dictionary,\n callback=add_item_to_mapping\n )\n return mapping",
"def map_dict(dictionary, transform):\n return dict(transform(k, v) for k, v in dictionary.items())",
"def mapfn(k, v):\n for row in v:\n # completar\n pass",
"def apply_mapping_dict(self, mdict, domain_taxon_set=None, range_taxon_set=None):\n if domain_taxon_set is None:\n domain_taxon_set = TaxonSet(mdict.keys())\n return self.apply_mapping_func(lambda x: mdict[x], domain_taxon_set=domain_taxon_set, range_taxon_set=range_taxon_set)",
"def map(self, function):\n pass",
"def walk_map(d: dict, func: FunctionType):\n out = {}\n for k, v in d.items():\n if isinstance(v, (dict, defaultdict)):\n out[k] = walk_map(v, func)\n else:\n out[k] = func(v)\n return out",
"def map_named(function: Callable[[str, Any], Any],\n val: Any,\n key: Optional[str] = \"\") -> Any:\n if isinstance(val, Mapping):\n return type(val)(\n **{k: map_named(function, v, key + \"/\" + k) for k, v in val.items()})\n elif isinstance(val, tuple) or isinstance(val, list):\n return type(val)(\n *\n [map_named(function, v, key + \"/\" + str(i)) for i, v in enumerate(val)])\n # check if it's a flax dataclass\n elif hasattr(val, \"__dataclass_fields__\"):\n classname = repr(val).split(\"(\")[0]\n return type(val)(**{\n k: map_named(function, v, f\"{key}/{classname}.{k}\")\n for k, v in val.__dataclass_fields__.items()\n })\n else:\n return function(key, val)",
"def map(s,dic):\n state=s.getstate()\n if not state in dic:raise Exception(\"the current state \"+str(state)+\" is not available to map to using the dictionary \"+str(dic))\n val=dic[state]\n if callable(val):\n return val()\n states=s.getstates()\n if val in states:\n return s.setstate(val)\n raise Exception(\"I dont know how to use this \"+str(state)+\" since it maps to a type of \"+str(type(val))+\" namely \"+str(val))",
"def apply_mapping_func(self, mfunc, domain_taxon_set, range_taxon_set=None):\n self.forward = {}\n self.reverse = {}\n self.domain_taxon_set = domain_taxon_set\n if range_taxon_set is None:\n self.range_taxon_set = TaxonSet()\n else:\n self.range_taxon_set = range_taxon_set\n for dt in self.domain_taxon_set:\n rt = mfunc(dt)\n if rt not in self.range_taxon_set:\n self.range_taxon_set.add(rt)\n self.forward[dt] = rt\n try:\n self.reverse[rt].add(dt)\n except KeyError:\n self.reverse[rt] = set([dt])",
"def applyFuncOnValues(self, func):\r\n self._value = func(self._value)",
"def mapfn(k, v):\n for row in v:\n # rellenar el codigo\n pass",
"def map(self, function):\n return FunctionalWrapper(map(function, self.data))",
"def map(self, func):\n return _(map(func, self._))",
"def foreach(function):\n return partial(map, function)",
"def _map_config_values(config, fn):\n if isinstance(config, dict):\n return {key: _map_config_values(value, fn) for key, value in config.items()}\n elif isinstance(config, list):\n return [_map_config_values(elem, fn) for elem in config]\n else:\n return fn(config)",
"def map(self, function=lambda item: item):\n for i, row in enumerate(self):\n for j, item in enumerate(row):\n row[j] = function(item)",
"def convert(self, function=pointwise_mi):\n self.normalise()\n feat_prob = Counter()\n for feat_set in self.itervalues():\n for feat in feat_set:\n feat_prob[feat] += feat_set[feat]\n \n for feat_set in self.itervalues():\n code_prob = sum(feat_set.values())\n for feat in feat_set:\n feat_set[feat] = function(code_prob, feat_prob[feat], feat_set[feat])\n return self",
"def mapToDict(dictionary, key):\n return dictionary[key]",
"def with_calculated(funs):\n def with_calculated_funs(a_dict):\n return updated_with(a_dict, to_dict(funs)(a_dict))\n return with_calculated_funs",
"def map(keys, values) -> MapValue:\n return ops.Map(keys, values).to_expr()",
"def dict_operate(dict, key, value, operation=None):\n if key in dict and operation is not None:\n dict[key] = operation(dict[key], value)\n else:\n dict[key] = value",
"def apply_func(output, func):\n new_output = []\n for dict in output:\n mnemonic = copy.deepcopy(dict['mnemonic'])\n values = dict['values']\n new_values = func(values)\n new_output.append({'mnemonic': mnemonic, 'values': new_values})\n return new_output",
"def map(self, func, *sequences):\n return self.mapper().map(func, *sequences)"
]
| [
"0.81647795",
"0.73186177",
"0.67919385",
"0.67103237",
"0.66901916",
"0.65380114",
"0.63903636",
"0.63513243",
"0.63053024",
"0.6285849",
"0.6283094",
"0.626183",
"0.62040246",
"0.6043741",
"0.6023191",
"0.60203594",
"0.60124683",
"0.6009012",
"0.59802103",
"0.5977911",
"0.5929177",
"0.58181554",
"0.58142114",
"0.58000255",
"0.5780818",
"0.5759522",
"0.5752582",
"0.57511723",
"0.5747817",
"0.56705904"
]
| 0.8729859 | 0 |
Filter ``dictionary`` by its values using ``function``. | def filter_values(function, dictionary):
return {k: v for k, v in dictionary.items() if function(v)} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_dict(dictionary, pred):\n return dict((k, v) for k, v in dictionary.items() if pred(k, v))",
"def filter_keys(func, a_dict):\n return dict((k, v) for (k, v) in a_dict.items() if func(k))",
"def filter(self, func):\n self._sets.filter(key=func)",
"def filter_dict(fdict, mask):\n\n if fdict is None:\n fdict = dict()\n\n if mask is None:\n mask = []\n\n return {k: v for (k, v) in fdict.items() if k in mask}",
"def filter_by_val(j_dict: Dict, **kwargs: str) -> Dict:\n def _filter_dict(j_dict, f_by, f_elem):\n return {key: val for key, val in j_dict.items()\n if val[f_by] == f_elem}\n\n for key, val in kwargs.items():\n j_dict = _filter_dict(j_dict, key, val)\n\n return j_dict",
"def filter(self, filter_dict):\n pass",
"def _filter_from_dict(cls, nm, val):\n #Any necessary filtering place here.\n return val",
"def map_values(function, dictionary):\n return {k: function(dictionary[k]) for k in dictionary}",
"def filter(self, function):\n return FunctionalWrapper(filter(function, self.data))",
"def filter_keys_c(func):\n return partial(filter_keys, func)",
"def filter(self, func: Callable[[Tuple[keyType, valueType]], Tuple[keyType, valueType]]) -> List[Tuple[keyType, valueType]]:\n result = []\n it = self.__iter__()\n while True:\n try:\n key, value = next(it)\n pair = (key, value)\n tmp = func(pair)\n if not (tmp is None):\n result.append(tmp)\n except StopIteration:\n break\n return result",
"def filter(self, dict):\n for (pos, hashKey) in enumerate(self._sequence):\n for (key, value) in dict.items():\n data = self.dictionary[hashKey]\n \n if not (data.has_key(key) and data[key].find(value) == 0):\n del self.dictionary[hashKey]\n self._sequence.pop(pos)",
"def custom_filter(function, iterable):\n map_list = []\n\n for i in iterable:\n if function(i):\n map_list.append(i)\n\n return map_list",
"def map_values(fun, a_dict):\n return dict((k, fun(v)) for (k, v) in a_dict.items())",
"def filter(self, filter_dict):\n self.result = [x for x in self.result if all(str(x[y]) == z or (hasattr(x[y], \"__iter__\") and (z in str(x[y]) or any(z in str(d.values) for d in x[y] if isinstance(d, dict)))) for y,z in filter_dict.items())] \n\n return self",
"def filter_data(f):\n @functools.wraps(f, assigned=[])\n def wrapper(*args, **kwds):\n out = f(*args, **kwds)\n\n def _filter(obj):\n if isinstance(obj, list):\n new_list = []\n for o in obj:\n new_list.append(_filter(o))\n obj = new_list\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(k, str):\n obj[k.lower()] = _filter(v)\n return obj\n return _filter(out)\n return wrapper",
"def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n raise NotImplementedError",
"def dict_filter(d, keys, into=dict):\n \n if hasattr(keys, \"__call__\"):\n f = keys\n keys = filter(f, d.keys())\n return into(map(lambda k:(k,d[k]), keys))",
"def dict_filter(indict, key_list):\n \n return dict((key, value) for key, value in list(indict.items()) if key in key_list)",
"def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n return (entry for entry in iter(self) if fn(entry))",
"def filter_values(self):\n dfilter = self.args.datafilter\n self.logger.info(u'Filtering values with:{f}'.format(f=dfilter))\n data = self.outputdata\n newdata = {}\n for key, value in data.items():\n self.logger.info(u'\\nProcessing Key:{k}, value:{v}'.format(k=key,\n v=value))\n returned_data = dict_value_filter(key, value, dfilter, self.logger)\n if bool(returned_data):\n newdata[key] = returned_data\n self.logger.info(u'Data after filter:{d}'.format(d=newdata))\n\n self.outputdata = newdata",
"def dict_value_filter(key, data, dfilter, logger):\n\n logger.info(u'dict_value_filter:{l}'.format(l=locals()))\n newdata = {}\n if isinstance(data, dict):\n for nextkey, nextdata in data.items():\n returned_data = dict_value_filter(nextkey, nextdata, dfilter,\n logger)\n if bool(returned_data):\n newdata[nextkey] = returned_data\n elif isinstance(data, list):\n logger.info('Processing List:{}'.format(data))\n\n for item in data:\n logger.info(u'Process list:{}'.format(data))\n if isinstance(item, dict):\n logger.info('Found a dictionary:{}'.format(item))\n logger.info('Calling dict_value_filter:{k},{d},{f}'\n ''.format(k=key,d=item, f=dfilter))\n returned_data = dict_value_filter(key, item, dfilter, logger)\n if bool(returned_data):\n newdata = returned_data\n elif dfilter in unicode(data):\n newdata = data\n else:\n logger.info(u'Skipping data entry:{d}'.format(d=data))\n\n return newdata",
"def filter(self, func):\r\n\r\n d = self.data\r\n f = []\r\n for i in d:\r\n if func(i):\r\n f.append(i)\r\n return Records(f)",
"def filter(self, func=None, **kwargs):\n table = self.data\n if func is not None:\n table = table[table.apply(func, axis=1)]\n for key, val in list(kwargs.items()):\n assert key in self\n table = table[table[key] == val]\n return self.as_dataframe(table)",
"def filter(self, func=bool):\n return _(filter(func, self._))",
"def filter_kwargs(dict_to_filter, function_to_call):\n\n sig = inspect.signature(function_to_call)\n filter_keys = [param.name for param in sig.parameters.values() if (param.kind == param.POSITIONAL_OR_KEYWORD)]\n valid_args = {}\n invalid_args = {}\n\n for key in dict_to_filter:\n if key in filter_keys:\n valid_args[key] = dict_to_filter[key]\n else:\n invalid_args[key] = dict_to_filter[key]\n return valid_args, invalid_args",
"def filter_values(matrix, values):\n if values is None:\n return matrix\n res = TransitionMatrix(order=matrix.order)\n for prefix, probabilities in matrix.items():\n filtered = {suffix: probabilities[suffix] for suffix in probabilities if suffix in values}\n if filtered:\n res[prefix] = filtered\n return res",
"def get(list_of_dict, key, value):\n return filter(lambda dictionary: dictionary[key] == value, list_of_dict)",
"def filtered(self, keys, lst=None, func=\"all\"):\n lst = self if lst is None else lst\n if len(lst) == 0:\n raise ValueError(\"No rows in list\")\n return [row for row in self.filter(keys, lst, func=func)]",
"def winnow_by_keys(dct, keys=None, filter_func=None):\n has = {}\n has_not = {}\n\n for key in dct:\n key_passes_check = False\n if keys is not None:\n key_passes_check = key in keys\n elif filter_func is not None:\n key_passes_check = filter_func(key)\n\n if key_passes_check:\n has[key] = dct[key]\n else:\n has_not[key] = dct[key]\n\n return WinnowedResult(has, has_not)"
]
| [
"0.70143163",
"0.69534475",
"0.6837467",
"0.6674849",
"0.64855725",
"0.64559966",
"0.6397897",
"0.6368482",
"0.63502276",
"0.623713",
"0.6150504",
"0.61269593",
"0.61043084",
"0.60623217",
"0.6016967",
"0.59909564",
"0.5987634",
"0.596273",
"0.5946462",
"0.59055525",
"0.5864141",
"0.58620495",
"0.58234715",
"0.5804634",
"0.57795686",
"0.5769106",
"0.5729673",
"0.5702841",
"0.5655391",
"0.5635331"
]
| 0.86716884 | 0 |
Return a list ``a`` without the elements of ``b``. If a particular value is in ``a`` twice and ``b`` once then the returned list then that value will appear once in the returned list. | def list_subtract(a, b):
a_only = list(a)
for x in b:
if x in a_only:
a_only.remove(x)
return a_only | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def difference(a, b):\r\n return list(set(b).difference(set(a)))",
"def difference(a, b):\n return list(filterfalse(lambda x: x in b, a))",
"def sublist(a, b):\n r = a[:]\n for i in b:\n r.remove(i)\n return r",
"def difference(a, b):\r\n c = [i for i in a + b if i not in a or i not in b]\r\n return c",
"def subtraction_list(a , b):\n\n c = [i for i in list_a if i not in list_b]\n\n return(c)",
"def listops_difference(list_a,list_b):\r\n\r\n retlist = []\r\n for item in list_a:\r\n if item not in list_b:\r\n retlist.append(item)\r\n\r\n # ensure that a duplicated item in list_a is only listed once\r\n return listops_uniq(retlist)",
"def listSubtract(alist,blist):\n result = []\n for item in alist:\n if item not in blist:\n result.append(item)\n return result",
"def remove_common(first: list, second: list):\n return list(set(first)-set(second))",
"def unIfInt(a, b):\n if len(intersect(a, b)) != 0:\n return (list(set(a).union(b)))",
"def extra_elem(a,b):\n \"*** YOUR CODE HERE ***\"\n return list( set(a).symmetric_difference(set(b)) )[0] # don't need to turn b into a set\n # the function will accept a list as an arg.",
"def union(a, b):\r\n return list(set(a) | set(b))",
"def union(a, b):\n return list(set(a) | set(b))",
"def union(a, b):\n return list(set(a) | set(b))",
"def equal_ignore_order(self, a, b):\n unmatched = list(b)\n for element in a:\n try:\n unmatched.remove(element)\n except ValueError:\n return False\n return not unmatched",
"def listops_union(list_a,list_b):\r\n\r\n retlist = list_a[:]\r\n for item in list_b: \r\n if item not in list_a:\r\n retlist.append(item)\r\n\r\n # ensure that a duplicated item in list_a is only listed once\r\n return listops_uniq(retlist)",
"def listops_uniq(list_a):\r\n retlist = []\r\n for item in list_a:\r\n if item not in retlist:\r\n retlist.append(item)\r\n\r\n return retlist",
"def intersect(a, b):\r\n return list(set(a) & set(b))",
"def remove_l2_from_l1(l1, l2):\r\n return [element for element in l1 if element not in l2]",
"def diff(xs, ys):\n return [x for x in xs if x not in ys]",
"def remove_repeats(list1: List[int], list2: List[int]) -> List[int]:\n result = []\n for num in list2:\n if num not in list1:\n result.append(num)\n \n return result",
"def intersect(a, b):\n return list(set(a) & set(b))",
"def intersect(a,b):\n\treturn list(set(a) & set(b))",
"def intersect(a, b):\n return(list(set(a) & set(b)))",
"def second_method(array1, array2):\n\n result = [i for i in array1 if not i in array2 or array2.remove(i)]\n result += array2\n\n return result",
"def intersection(a, b):\n return list(set(a) & set(b))",
"def unique_list(\n l1: list,\n l2: list,\n ) -> list:\n\n l = list((set(l1) | set(l2)) - (set(l1) & set(l2)))\n\n return l",
"def lines(a, b):\n c = []\n\n a = a.split(\"\\n\")\n b = b.split(\"\\n\")\n\n for x in a:\n for y in b:\n if x == y: #Checks to see if item in both lists matches\n if not x in c: #Removes duplicates\n c.append(x) #Adds to new list\n return (c)",
"def _condense(a,b=None):\r\n\t\t\r\n\t\t# second is by default empty\r\n\t\tif b is None:\r\n\t\t\tb = []\r\n\t\t\r\n\t\t# add first into second\r\n\t\tfor i in a:\r\n\t\t\t\r\n\t\t\t# try to add onto all terms\r\n\t\t\tt = [i.add(j) for j in b]\r\n\t\t\t\r\n\t\t\t# check for match\r\n\t\t\tm = False\r\n\t\t\tfor n,j in enumerate(t):\r\n\t\t\t\tif j is not None:\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t# replace with combination\r\n\t\t\t\t\tb[n] = j\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t# stop searching\r\n\t\t\t\t\tm = True\r\n\t\t\t\t\tbreak\r\n\t\t\t\t\r\n\t\t\t# otherwise append\r\n\t\t\tif not m:\r\n\t\t\t\tb.append(i)\r\n\t\t\t\r\n\t\t\t# remove zeroes\r\n\t\t\tzo = lambda x: 0 in x\r\n\t\t\tb = [i for i in b if not zo(i)]\r\n\t\t\t\r\n\t\treturn b",
"def venn(a,b):\n a = set(a)\n b = set(b)\n return map(list, (a.difference(b), a.intersection(b), b.difference(a)))",
"def _not_matching(values, sieve):\n return [val for val in values if val not in sieve]"
]
| [
"0.7454335",
"0.7405594",
"0.7392524",
"0.7338166",
"0.71582747",
"0.7101876",
"0.7096621",
"0.69358665",
"0.68041253",
"0.67892265",
"0.67726505",
"0.6708057",
"0.6708057",
"0.6624537",
"0.6611756",
"0.65195936",
"0.6506374",
"0.6492541",
"0.6486167",
"0.6474487",
"0.646777",
"0.64623755",
"0.6458556",
"0.6402355",
"0.6319352",
"0.62781644",
"0.6244596",
"0.6222635",
"0.61994976",
"0.61694604"
]
| 0.76899225 | 0 |
Reading a environment variable as text. | def env_var_line(key: str) -> str:
return str(os.environ.get(key) or "").strip() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_env(key: str) -> str:\n value = os.getenv(key)\n assert isinstance(value, str), (\n f\"the {key} environment variable must be set and a string, \" f\"{value=}\"\n )\n return value",
"def test_get_environment_string(self):\n pass",
"def getenv_string(setting, default=''):\n return os.environ.get(setting, default)",
"def windows_get_env_value(var_name: str) -> str:\n if var_name in os.environ.keys():\n return os.environ[var_name]",
"def getenv(device, variable_name):\n command = 'getenv \"%s\" \"%s\"' % (device.udid, variable_name)\n variable = _run_command(command)\n # The variable has an extra new line at the end, so remove it when returning\n return variable[:-1]",
"def get_envvar(name, silent=False):\n value = os.environ.get(name)\n if value is None:\n if not silent:\n raise RuntimeError(\n 'The environment variable %r is not set '\n 'and as such configuration could not be '\n 'loaded. Set this variable and make it '\n 'point to a configuration file' % name\n )\n else:\n return ''\n return value",
"def env(var):\n return os.environ[var]",
"def read_property(self, key: str) -> str:\n return self._env.read_property(key)",
"def stdin_get_value() -> str:\n stdin_value = sys.stdin.buffer.read()\n fd = io.BytesIO(stdin_value)\n try:\n coding, _ = tokenize.detect_encoding(fd.readline)\n fd.seek(0)\n return io.TextIOWrapper(fd, coding).read()\n except (LookupError, SyntaxError, UnicodeError):\n return stdin_value.decode(\"utf-8\")",
"def get_value(key:str):\n value = environ.get(key)\n if value == None or len(str(value)) == 0:\n raise ValueError('Missing env: '+key)\n return value",
"def GetEnvVariable(name):\n return os.environ.get(name)",
"def _env_var_yaml(loader: SafeLineLoader, node: yaml.nodes.Node) -> str:\n args = node.value.split()\n\n # Check for a default value\n if len(args) > 1:\n return os.getenv(args[0], \" \".join(args[1:]))\n if args[0] in os.environ:\n return os.environ[args[0]]\n logger.error(\"Environment variable %s not defined\", node.value)\n raise XKNXException(node.value)",
"def getenv(space, var):\n e = os.environ.get(var)\n if e is None:\n return space.w_False\n return space.newstr(e)",
"def get_environment_variable(name):\n\n variable = None\n try:\n variable = os.environ[name]\n except KeyError:\n pass\n \n return variable",
"def getenv(self, key):\n return self._env[key]",
"def getenv(self, var):\n return os.environ[var]",
"def getenv(self, name):\n return self.environment[name]",
"def get_from_environ(key: str, default: Any = None) -> str:\n return os.environ.get(key, default)",
"def env(key, default=None, required=False):\n try:\n value = os.environ[key]\n return ast.literal_eval(value)\n except (SyntaxError, ValueError):\n return value\n except KeyError:\n if default or not required:\n return default\n raise ImproperlyConfigured(\n \"Missing required environment variable '%s'\" % key)",
"def test_unquoted(self):\n e = yaenv.core.EnvVar('key = value\\n')\n assert e.key == 'key'\n assert e.value == 'value'\n assert e._interpolate",
"def read_conda_env(name=None):\n if name is None:\n name = get_conda_env_name()\n command = get_conda_bin() + ' env export -n ' + \\\n get_conda_env_name() + \" --no-builds\"\n env_str = os.popen(command).read()\n if env_str == '':\n error = 'Failed to read Anaconda environment using command: \"' + command + '\"'\n raise CondaError(error)\n return env_str",
"def environment_value(self, name):\n if not os.environ.has_key(name):\n return None\n return os.environ[name]",
"def test_single_quoted(self):\n e = yaenv.core.EnvVar(\"key = 'value'\\n\")\n assert e.key == 'key'\n assert e.value == 'value'\n assert not e._interpolate",
"def getenv_with_file(key: str, default: str = None) -> str:\n value = os.getenv(key)\n if value:\n return value\n filename = os.getenv(\"{}_FILE\".format(key))\n if filename:\n with open(filename) as f:\n return f.read()\n return default",
"def test_double_quoted(self):\n e = yaenv.core.EnvVar('key = \"value\"\\n')\n assert e.key == 'key'\n assert e.value == 'value'\n assert e._interpolate",
"def env(key: str) -> Optional[Any]:\n return os.getenv(key)",
"def __getitem__(self, key):\n return os.environ[key]",
"def env_variable(self, name: str) -> Optional[str]:\n _args = [\n Arg(\"name\", name),\n ]\n _ctx = self._select(\"envVariable\", _args)\n return _ctx.execute_sync(Optional[str])",
"def get(self):\n self.value = os.getenv(self.name, self.default)\n return self.value",
"def get(self, key, default=None):\n value = os.environ.get(key)\n\n if value:\n self.logging.info(\"Got %s from environment.\" % key)\n self.logging.debug(value)\n return_val = value\n elif key in self._config.keys():\n self.logging.info(\"Got %s from config file.\" % key)\n self.logging.debug(value)\n return_val = self._config[key]\n else:\n return_val = default\n return return_val"
]
| [
"0.64903164",
"0.61682165",
"0.6142988",
"0.60816246",
"0.6054548",
"0.60263807",
"0.59471285",
"0.59415185",
"0.5887559",
"0.58568513",
"0.5842627",
"0.5826633",
"0.5815658",
"0.5800363",
"0.5800218",
"0.57895327",
"0.5763488",
"0.57547617",
"0.5748358",
"0.57209665",
"0.56679136",
"0.56620014",
"0.56551397",
"0.5643303",
"0.5637641",
"0.562527",
"0.5597838",
"0.5519821",
"0.5517795",
"0.5510159"
]
| 0.67881423 | 0 |
Reading a environment variable as int. | def env_var_int(key: str) -> int:
try:
return int(env_var_line(key))
except (ValueError, TypeError):
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def eval_env_as_integer(varname, standard_value) -> int:\n return int(os.getenv(varname, standard_value))",
"def get_envint(key, *default):\n return get_env(key, *default, coerce=_int)",
"def read_int():\n return int(sys.stdin.readline())",
"def read_config_int(config, section, item):\n value = read_config(config, section, item)\n if value is None:\n return value\n return int(value)",
"def _getint(\n parser: configparser.ConfigParser,\n key: str,\n section: str = \"wpwatcher\",\n ) -> int:\n try:\n return parser.getint(section, key)\n except ValueError as err:\n raise ValueError(\n f\"Could not read int value in config file for key '{key}' and string '{parser.get(section, key)}'. Must be an integer\"\n ) from err",
"def _read_value(path):\n read_value = 0\n if not os.path.exists(path):\n # Path will generally only exist on a Raspberry Pi\n pass\n else:\n with open(path) as f:\n read_value = int(f.read())\n return read_value",
"def test_int(self, env: yaenv.Env):\n _val = env.int('INT_VAR')\n assert _val == 1 and type(_val) == int\n _val = env.int('MISSING', -2)\n assert _val == -2 and type(_val) == int\n with pytest.raises(yaenv.EnvError) as err:\n _ = env.int('LIST_VAR')\n assert 'Invalid integer' in str(err.value)\n assert env.int('MISSING') is None",
"def getInt(self, key):\n self._check(key)\n value, flag = self.__config.value(key).toInt()\n if flag:\n return value\n else:\n raise ValueError(\"ConfigManager can't get key '%s' as int\" % key)",
"def get_int() -> int:\n line = input().strip()\n return int(line)",
"def config_get_int(section, option):\n return __CONFIG.getint(section, option)",
"def getint(self, key):\n try:\n return self.parser.getint(\"wpwatcher\", key)\n except ValueError as err:\n raise ValueError(\n \"Could not read int value in config file for key '{}' and string '{}'. Must be an integer\".format(\n key, self.parser.get(\"wpwatcher\", key)\n )\n ) from err",
"def to_int(variable):\n try:\n return int(variable)\n except ValueError:\n return variable",
"def try_load_int (fichier,variable):\n\ttry:\n\t\tm = persistance.get_propriete (fichier, variable)\n\t\tk = int (m)\n\t\treturn k\n\texcept ValueError:\n\t\traise persistance.ValeurInvalide (fichier,variable)",
"def read_int(data):\n s_type = \"=%s\" % get_type(\"int\")\n return struct.unpack(s_type, data.read(4))[0]",
"def read_int(\n file: BinaryIO, endianness: Text = \"little\", signed: bool = True\n) -> int:\n data = file.read(n=sys.int_info.sizeof_digit)\n if not data:\n return None\n data = int.from_bytes(\n bytes=data, byteorder=endianness, signed=signed\n )\n return data",
"def getint(self, option, default = None, section = None):\n return int(self.get(option, default, section))",
"def getint(self, option, argument=None):\n value = self.get(option, argument)\n if value: return int(value)\n else: return 0",
"def get_int():\n\twhile True:\n\t\ttry:\n\t\t\tX = int(raw_input())\n\t\t\tbreak\n\t\texcept:\n\t\t\tprint \"Could not convert input to integer\"\n\t\t\tcontinue\n\treturn X",
"def to_int(name, default=0):\n try:\n return int(get(name))\n except (TypeError, ValueError):\n return default",
"def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarInt32()",
"def get_build_number():\n try:\n return int(os.getenv(*legion.config.BUILD_NUMBER))\n except ValueError:\n raise Exception('Cannot parse build number as integer')",
"def getint(self, strcommand):\n result = ct.c_longlong()\n command = ct.c_wchar_p(strcommand)\n self.lib.AT_GetInt(self.AT_H, command, ct.addressof(result))\n return result.value",
"def __get_int_value_from_line(self, line, value):\n # TODO: catch exceptions\n value_str = line.split(value)[1]\n if value_str.startswith('='):\n value_str = value_str[1:]\n value_str = value_str.split(';')[0]\n return int(value_str)",
"def read_int(self):\n return self.bits.read(32).intle",
"def getint(self, option, default=None):\n\t\treturn self._get_raw(option, 'int', default)",
"def get_by_name_as_int(cls, name, default=None):\n try:\n return int(Configuration.get_by_name(name))\n except:\n return default",
"def read_int(self, size=4, location=None):\n return self.read_ints(size=size, location=location)[0]",
"def byte_to_int(single_byte: bytes) -> int:\n shift = 0\n result = 0\n if single_byte == b\"\" or single_byte == \"\":\n raise EOFError(\"Unexpected EOF while reading varint\")\n i = ord(single_byte)\n result |= (i & 0x7f) << shift\n return result",
"def getint(self, section, option):\n return int(self.get(section, option))",
"def read(reader: BitStreamReader, _index: int) -> int:\n\n return reader.readVarInt()"
]
| [
"0.8005265",
"0.7366795",
"0.6973604",
"0.66420656",
"0.64328814",
"0.6258224",
"0.61605865",
"0.6140184",
"0.60856193",
"0.60836726",
"0.6055936",
"0.59444773",
"0.5934205",
"0.58919984",
"0.57885367",
"0.5759262",
"0.5757238",
"0.5685826",
"0.5665112",
"0.5640167",
"0.5631339",
"0.56141293",
"0.5603056",
"0.5597917",
"0.55876654",
"0.5579612",
"0.5561713",
"0.55306005",
"0.55165595",
"0.5510607"
]
| 0.7908468 | 1 |
Reading a environment variable as float. | def env_var_float(key: str) -> float:
try:
return float(env_var_line(key))
except (ValueError, TypeError):
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def eval_env_as_float(varname, standard_value) -> float:\n return float(os.getenv(varname, standard_value))",
"def get_envfloat(key, *default):\n return get_env(key, *default, coerce=_float)",
"def getFloat(self, key):\n self._check(key)\n value, flag = self.__config.value(key).toDouble()\n if flag:\n return value\n else:\n raise ValueError(\"ConfigManager can't get key '%s' as float\" % key)",
"def config_get_float(section, option):\n return __CONFIG.getfloat(section, option)",
"def readFloat(self) -> float:\n return self._unpack('!f', 4)",
"def read_float(self):\n return self._packers[\"f\"].unpack(self.read(4))[0]",
"def test_float(self, env: yaenv.Env):\n _val = env.float('FLOAT_VAR')\n assert _val == 10.0 and type(_val) == float\n _val = env.float('MISSING', -3.1)\n assert _val == -3.1 and type(_val) == float\n with pytest.raises(yaenv.EnvError) as err:\n _ = env.float('LIST_VAR')\n assert 'Invalid numerical' in str(err.value)\n assert env.float('MISSING') is None",
"def read_float(v):\n if v.strip() == '':\n return 0.\n try:\n return float(v)\n except ValueError:\n # ENDF6 may omit the e for exponent\n return float(v[0] + v[1:].replace('+', 'e+').replace('-', 'e-')) # don't replace leading negative sign",
"def test_float_variable(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"float alpha = -0.5432\")\n assert bb._var == {\"alpha\": -0.5432}",
"def getfloat(self, strcommand):\n result = ct.c_double()\n command = ct.c_wchar_p(strcommand)\n self.lib.AT_GetFloat(self.AT_H, command, ct.addressof(result))\n return result.value",
"def getfloat(self, section, option):\n return float(self.get(section, option))",
"def get_float(self, prompt=\"> \"):\n\t\twhile True:\n\t\t\tans = raw_input(prompt)\n\t\t\ttry: \t\n\t\t\t\tans = float(ans)\n\t\t\t\treturn ans\n\t\t\texcept ValueError:\n\t\t\t\tif ans == \"quit\": quit()\n\t\t\t\telse: print \"Please enter a number using decimal notation.\"",
"def get_envdecimal(key, *default):\n return get_env(key, *default, coerce=_decimal)",
"def getfloat(self, section, option, default=None):\r\n return self.get(section, option, type=float, default=default)",
"def test_float_exponent_variable(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"float alpha = -9.54e-3\")\n assert bb._var == {\"alpha\": -9.54e-3}",
"def get_float(self, sect, opt):\r\n vstr = self.get_safe(sect, opt)\r\n try:\r\n return float(vstr)\r\n except ValueError:\r\n return 0.0",
"def _parse_env_value(val):\n if val.lower() == \"false\":\n return False\n elif val.lower() == \"true\":\n return True\n try:\n return int(val)\n except ValueError:\n pass\n try:\n return float(val)\n except ValueError:\n pass\n return val",
"def valf(node: md.Document) -> float:\n try:\n return float(val(node))\n except ValueError:\n return None",
"def read_float(self, process_handle: int, address: int):\n self.__bufferSize = 4\n value = self.__read_bytes(process_handle, address)\n return None if value is None else unpack('<f', bytearray(value))",
"def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":",
"def ReadFloat(self, endian=\"<\"):\n return self.unpack(\"%sf\" % endian, 4)",
"def value(self):\n float_str = first_token(self._node).spelling\n\n # Remove any C-specific suffix (f, F, l, L) so we can use Python's\n # float constructor to parse the string.\n float_str = re.sub(r'^(.*)[fFlL]$', r'\\1', float_str)\n\n return float(float_str)",
"def read_float(data):\n s_type = \"=%s\" % get_type(\"float\")\n return struct.unpack(s_type, data.read(4))[0]",
"def getFloat(self, address: ghidra.program.model.address.Address) -> float:\n ...",
"def float_from_string(data):\n return float(maybe_number(data))",
"def getfloat(self, option, default=None):\n\t\treturn self._get_raw(option, 'float', default)",
"def getFloat(self, section, option, default=0):\n return self.get(section, option, default, float)",
"def getSetFloat(self, key: str, default: float | None = None) -> float:\n value = self.parsedConfig.getfloat(key, default)\n self.parsedConfig[key] = str(value)\n return value",
"def ffloat(string):\n try:\n return float(string.strip())\n except:\n return 0",
"def to_float(x, key):\n x = x.strip()\n if not x or x in ('NA', 'n/a'):\n return None\n if '.' in x:\n # There are '.'s, so commas are placeholders\n x = x.replace(',', '') \n if x.endswith('ft'):\n scale = 0.3048\n x = x[:-2].strip()\n else:\n scale = 1 \n try:\n return scale * float(x)\n except:\n logging.warn('Could not convert %s value %s to float', key, x)\n return None"
]
| [
"0.8250072",
"0.7407218",
"0.65869987",
"0.64011014",
"0.63798887",
"0.6249607",
"0.62252533",
"0.6190475",
"0.6136223",
"0.61103517",
"0.59694827",
"0.5918007",
"0.5894661",
"0.58787096",
"0.5845959",
"0.5840988",
"0.58388215",
"0.58197176",
"0.5793032",
"0.5783338",
"0.5772726",
"0.5750477",
"0.57104677",
"0.57102066",
"0.56849396",
"0.56694627",
"0.5667593",
"0.5628994",
"0.562617",
"0.56238383"
]
| 0.80970025 | 1 |
Reading a environment variable as list, source line should be divided by commas. | def env_var_list(key: str) -> list:
return list(
filter(
None, map(str.strip, env_var_line(key).split(","))
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_list(name, default=NO_ARGUMENT, separator=\",\"):\n value = os.environ.get(name)\n if value is None:\n if default is NO_ARGUMENT:\n return []\n else:\n return default\n return [v.strip() for v in value.split(separator) if v.strip()]",
"def get_envlist(key, *default, **kwargs):\n separator = kwargs.get('separator', ' ')\n return get_env(key, *default, coerce=lambda x: x.split(separator))",
"def _set_list(name, value, context):\n\n if name in os.environ:\n context[name] = os.environ.get(name).lower().split(\",\")\n\n _set_default(name, value, context)",
"def read_variables(var_or_list):\n session = ph.get_session()\n return session.run(var_or_list)",
"def GetListVariable(self, name):\n var = self._makefile.variables.get(name, expand=True)[2]\n if not var:\n return []\n return var.resolvesplit(self._makefile, self._makefile.variables)",
"def environments(self):\n env_txt = self.config[\"tox\"][\"envlist\"]\n env_lst_raw = env_txt.strip().replace(\"\\n\",\",\").split(\",\")\n env_lst = [x.strip() for x in env_lst_raw if x != \"\"]\n return env_lst",
"def conf_load_par_list(par_def):\n par_def = par_def[1:-1].split(',')\n par_list = list()\n for p in par_def:\n par_list.append(p.strip())\n return par_list",
"def SourceEnvironment(script, whitelist, ifs=',', env=None):\n dump_script = ['source \"%s\" >/dev/null' % script,\n 'IFS=\"%s\"' % ifs]\n for var in whitelist:\n dump_script.append(\n '[[ \"${%(var)s+set}\" == \"set\" ]] && echo %(var)s=\"${%(var)s[*]}\"'\n % {'var': var})\n dump_script.append('exit 0')\n\n if env is None:\n env = {}\n elif env is True:\n env = None\n output = cros_build_lib.RunCommand(['bash'], env=env, redirect_stdout=True,\n redirect_stderr=True, print_cmd=False,\n input='\\n'.join(dump_script)).output\n return cros_build_lib.LoadKeyValueFile(cStringIO.StringIO(output))",
"def env_variables(self) -> list[\"EnvVariable\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"envVariables\", _args)\n _ctx = EnvVariable(_ctx)._select_multiple(\n _name=\"name\",\n _value=\"value\",\n )\n return _ctx.execute_sync(list[EnvVariable])",
"def environments(self):\n envs = self.config[\"tox\"][\"envlist\"]\n #result = re.split(\"[^a-zA-Z0-9]\", envs)\n result = re.split(r'\\n| ,|,', envs)\n #print ([string for string in result if string != \"\"])\n result = (([string.strip() for string in result if string != \"\"]))\n print(list(dict.fromkeys(result)))\n return ((list(dict.fromkeys(result))))",
"def read_from_art_input_file(path_to_test_dir):\n\tpath_to_test_bart = path_to_test_dir + \"/bart.sh\"\n\tf=open(path_to_test_bart)\n\tstring=f.read()\n\tpattern = \"([#\\n])setenv[\\s]+Central_Atom[\\s]+([1-9]+)\"\n\t#pattern #setenv Central_Atom 1\n\tmatch = re.search(pattern, string)\n\tif match.group(1) == \"#\":\n\t\treturn [1]\n\telif match.group(1) == \"\\n\":\n\t\treturn [int(match.group(2))]",
"def load_input(input_name):\n with open(input_name) as input_file:\n input_list = list(map(int,input_file.readline().split(\",\")))\n return input_list",
"def test_list(self, env: yaenv.Env):\n _val = env.list('LIST_VAR', separator=':')\n _expect = ['item1', 'item2']\n assert _val == _expect and type(_val) == list\n _expect.append('item3')\n _val = env.list('MISSING', _expect)\n assert _val == _expect and type(_val) == list\n assert env.list('MISSING') is None",
"def retrieve_variables(content):\n variables = []\n in_var_section = False\n for line in content.splitlines():\n #print line\n if in_var_section:\n var_def = re.split(' +', line)\n if len(var_def) > 1:\n #print var_def[0], ':', var_def[1]\n var_name = var_def[0]\n def_value = var_def[1]\n if not def_value.startswith('%'): #not environment variable which would be directly passed to robot\n variables.append([var_name.strip('${').strip('}'), def_value])\n if '*** Variables ***' in line:\n in_var_section = True\n elif in_var_section and '*** ' in line:\n #end of Variables section\n break\n return variables",
"def _load_input() -> List[List[int]]:\n filepath = os.path.join(os.getcwd(), os.path.dirname(__file__), INPUT_FILE)\n f = open(filepath, 'r')\n data = f.read()\n f.close()\n\n raw_input = data.strip().split('\\n')\n input = [list(ri) for ri in raw_input]\n return [[int(i) for i in line] for line in input]",
"def getlist(x, y):\n return get(x, y).split(',')",
"def load_file_lines(option_value):\n if not hasattr(option_value, 'read'):\n raise IncompetentDeveloperError(\"Input type must be a file object.\")\n \n return [line.strip() for line in option_value]",
"def values(line):\n return [v.strip() or None for v in text(line).split(',')]",
"def stdin_get_lines() -> list[str]:\n return list(io.StringIO(stdin_get_value()))",
"def get_by_name_as_list(cls, name, token=','):\n config = Configuration.get_by_name(name) or []\n if config:\n return [item.strip() for item in config.split(token)]\n else:\n return config",
"def _parse(self, inval):\n regex = re.compile(r'^os\\.environ\\[(.*)\\]$')\n for val in inval:\n if val is None:\n continue\n # split on \\n\n cmd = val.split('\\n')\n for v2 in cmd:\n if not v2:\n continue\n dict_call, pth = v2.split(' = ')\n m = re.match(regex, dict_call)\n if m:\n key = m.groups()[0]\n self.env[key] = pth",
"def read_list(fname):\n with open(fname) as handle:\n items = [line.strip() for line in handle]\n return items",
"def process_list_arg(arg):\n if isinstance(arg, list):\n return arg\n elif isinstance(arg, basestring):\n args = []\n for part in arg.split(\",\"):\n args.append(part.strip())\n return args",
"def env_var_line(key: str) -> str:\n return str(os.environ.get(key) or \"\").strip()",
"def parse_list_str(setting_str):\n return re.split('\\s*,\\s*', setting_str)",
"def load_stop_list():\n stop_list = []\n with open(STOP_LIST, \"r\") as f:\n lines = f.readlines()\n stop_list = [word.strip() for word in lines]\n return stop_list",
"def input2strlist_nomapfile(invar):\n \n str_list = None\n if type(invar) is str:\n if invar.startswith('[') and invar.endswith(']'):\n str_list = [f.strip(' \\'\\\"') for f in invar.strip('[]').split(',')]\n else:\n str_list = [invar.strip(' \\'\\\"')]\n elif type(invar) is list:\n str_list = [str(f).strip(' \\'\\\"') for f in invar]\n else:\n raise TypeError('input2strlist: Type '+str(type(invar))+' unknown!')\n return str_list",
"def read_input(fname=\"day11.in\"):\n with open(fname) as f:\n return [int(v.strip()) for v in next(f).split(\",\")]",
"def generateEnvList( self, index ):\n EnvList = [ \n (\"GLOBUS_DUROC_SUBJOB_INDEX\", \"%d\" % index),\n (\"LD_LIBRARY_PATH\", \"/usr/local/globus/globus-3.2/lib/\") \n ]\n return EnvList",
"def read_input(fname=\"day05.in\"):\n with open(fname) as f:\n return [int(v.strip()) for v in next(f).split(\",\")]"
]
| [
"0.71439314",
"0.67040265",
"0.61332095",
"0.60854524",
"0.60168445",
"0.5877769",
"0.58456564",
"0.57908124",
"0.5651809",
"0.5635019",
"0.5613625",
"0.56000316",
"0.554496",
"0.5539179",
"0.5369789",
"0.53643876",
"0.53479964",
"0.5345841",
"0.5324846",
"0.52787125",
"0.5277821",
"0.5267666",
"0.52465475",
"0.52442443",
"0.5238046",
"0.523425",
"0.52118194",
"0.51786774",
"0.51692694",
"0.5162002"
]
| 0.73379016 | 0 |
returns the Planck function for radiation from a blackbody at temperature T (K) at wavelength(s) wave, given in Angstrom Returns radiance in cgs units. | def blackbody( wave, T, waveunit='Angstrom' ):
if waveunit=='Angstrom':
# convert wavelength from angstroms to cm
wave = wave / 1e10 * 100.
elif waveunit=='nm':
# convert wavelength from angstroms to cm
wave = wave / 1e9 * 100.
return( ((2 * h * c* c)/wave**5 ) / (exp(h*c/(wave*k*T))-1) ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Planck(T, wav):\n\twav_cm=wav*1.e-7 #convert wavelengths from nm to cm.\n\tc=2.99792e10 #speed of light, in cm/s\n\th=6.62607e-27#Planck constant, in erg*s\n\tkb=1.38065e-16#Boltzmann constant, in erg/K\n\t\n\timport numpy as np\n\tresult_cm=(2.*h*c**2./wav_cm**5.)*1./(np.exp(h*c/(wav_cm*kb*T))-1) #ergs/cm^3/s/steradian \n\t#Will return RunTime warnings for extremal values, which occur at these wavelengths. \n\tresult=result_cm*1.e-7 #convert to units of ergs/cm^2/nm/s/steradian \n\treturn result #result is in units of ",
"def planck_w(lam, T):\n return ((2*h*c**2)/(lam**5))*(1./(np.exp((h*c)/(lam*k*T))-1))",
"def planckian(temp, wavelength):\n if wavelength==560: return 100.0\n if temp<60: temp=60 # For simplicity, in very low temperature\n num = wavelength**(-5)\n try:\n v=num / (math.exp(0.0143877687750393/(wavelength*(10**(-9))*temp)) - 1)\n except:\n print(temp)\n print(wavelength)\n raise ValueError\n v2=(560.0**(-5)) / (math.exp(0.0143877687750393/(560.0*(10**(-9))*temp)) - 1)\n return v*100.0/v2",
"def planck_f(nu, T):\n return ((2*h*nu**3)/(c**2))*(1./(np.exp((h*nu)/(k*T))-1))",
"def planckwavelen(wavel,Temp):\n wavel=wavel*1.e-6 #convert to meters\n c1=2.*h*c**2.\n c2=h*c/kb\n Blambda=1.e-6*c1/(wavel**5.*(np.exp(c2/(wavel*Temp)) -1))\n return Blambda",
"def fried_parameter_cm(wavelength,arcseconds_of_seeing_500nm=1.,zenith_angle_deg = 0.):\n r0_500nm_cm = (500e-9/(arcseconds_of_seeing_500nm*(np.pi/(180*3600))))*100\n k = r0_500nm_cm/(500e-9)**(6./5)\n r00 = k*wavelength**(6./5.)\n zenith_angle_rad = np.radians(zenith_angle_deg)\n r0z = r00 * np.cos(zenith_angle_rad)**(3/5.) #p60 DFB POI\n return r0z",
"def RK44_family(w):\n from sympy import Rational\n one = Rational(1,1)\n\n A=snp.array([[0,0,0,0],[one/2,0,0,0],[one/2-one/(6*w),one/(6*w),0,0],\n [0,one-3*w,3*w,0]])\n b=snp.array([one/6,2*one/3-w,w,one/6])\n return ExplicitRungeKuttaMethod(A,b)",
"def rad(tx,K,w,e,T0,Vo,P):\r\n\r\n M=2*np.pi*(tx-T0)/P #Mean anomaly\r\n E=np.pi\r\n for j in range(0,25):\r\n E=(M-e*(E*np.cos(E)-np.sin(E)))/(1-e*np.cos(E))\r\n th=2*np.arctan(((1+e)/(1-e))**0.5*np.tan(E/2))\r\n return K*(np.cos(th+w)+e*np.cos(w))+Vo",
"def thermal_rad_func(ts_bt, k1, k2):\n thermal_rad = np.copy(ts_bt).astype(np.float64)\n np.reciprocal(thermal_rad, out=thermal_rad)\n thermal_rad *= k2\n np.exp(thermal_rad, out=thermal_rad)\n thermal_rad -= 1.0\n np.reciprocal(thermal_rad, out=thermal_rad)\n thermal_rad *= k1\n return thermal_rad.astype(np.float32)",
"def distribution_planck_lambda(wavelength=1,temperature=1, units=SI,printA=False):\n\n var = sy.var('pi h c l k t')\n par = np.pi, units['h'], units['c'], wavelength, units['k'], temperature\n\n y = ( 8 * pi * h * c ) / l**5 / ( sy.exp(h*c/l/k/t) - 1 )\n\n return dic_result(var,par,y)",
"def _k(self, T):\n RT = Rgas * T\n return (self.parameters.A1 / np.exp(self.parameters.E1 / RT),\n self.parameters.A2 / np.exp(self.parameters.E2 / RT))",
"def graphite_cracking_rate_Ai2020(T_dim):\n k_cr = 3.9e-20\n Eac_cr = 0 # to be implemented\n arrhenius = np.exp(Eac_cr / pybamm.constants.R * (1 / T_dim - 1 / 298.15))\n return k_cr * arrhenius",
"def evaluate(self, wavelength):\n micron = wavelength.to(u.micron).value\n x = 1 / micron\n optical_indx = np.where(np.logical_and(0.63 <= micron, micron <= 2.20))\n ir_indx = np.where(np.logical_and(0.12 <= micron, micron <= 0.63))\n x = np.asarray(x)\n if x.ndim == 0:\n x = x[None]\n k = np.empty(len(x))\n k[optical_indx] = 2.659 * (-1.857 + 1.040 * x) + self.Rv\n k[ir_indx] = 2.659 * (-2.156 + 1.509 * x - 0.198 * x**2 + 0.011 * x**3) + self.Rv\n return k",
"def derive_Fritz11(wavelength):\n # Extinction law definition\n wave = np.array([1.282, 1.736, 2.166, 2.625, 2.758, 2.873, 3.039, 3.297, 3.74, 3.819, 3.907, 4.052,\n 4.376, 5.128, 5.908, 6.772, 7.459, 7.502, 8.76, 12.371, 19.062])\n A_AKs = np.array([7.91, 4.30, 2.49, 1.83, 1.51, 1.84, 2.07, 1.66, 1.19, 1.19, 1.09, 1.01, 1.09, 0.99,\n 1.04, 0.84, 0.81, 0.79, 2.04, 1.34, 1.34])\n\n\n # Interpolate over the curve\n spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0)\n A_at_wave = interpolate.splev(wavelength, spline_interp)\n\n # We'll call 2.14 microns the K-band\n idx = np.where( abs(wavelength - 2.14) == min(abs(wavelength - 2.14)) )\n A_AKs_at_wave = A_at_wave / A_at_wave[idx] \n\n return A_AKs_at_wave",
"def k_Wa92(wind_second_moment, temp_C):\n\n U2 = wind_second_moment\n\n Sc = schmidt_number(temp_C)\n k = (0.31 * U2) * (660 / Sc) ** 0.5\n\n return k",
"def planck(\n wavel_points: np.ndarray, temperature: float, scaling: float\n ) -> np.ndarray:\n\n planck_1 = (\n 2.0 * constants.PLANCK * constants.LIGHT**2 / (1e-6 * wavel_points) ** 5\n )\n\n planck_2 = (\n np.exp(\n constants.PLANCK\n * constants.LIGHT\n / (1e-6 * wavel_points * constants.BOLTZMANN * temperature)\n )\n - 1.0\n )\n\n return 1e-6 * math.pi * scaling * planck_1 / planck_2 # (W m-2 um-1)",
"def fuel_cond(T):\n\n kc = 1.841e-19*math.pow(T,6) - 2.097e-15*math.pow(T,5) +\\\n 9.721e-12*math.pow(T,4) - 2.369e-8*math.pow(T,3) +\\\n 3.283e-5*math.pow(T,2) - 0.0267*T + 63.18\n \n return kc",
"def curly_F_tau(Teff, tau):\n\n return 2*np.pi*(trapezoidal(lambda t: integrated_planck(Teff*(0.5+ 3/4*t)**(1/4))*sc.expn(2, t-tau), tau, 20, 5000)-trapezoidal(lambda t: integrated_planck(Teff*(0.5+ 3/4*t)**(1/4))*sc.expn(2, tau-t), 0, tau, 5000))",
"def CircadianRythme(t,initial_conditions) :\n#-----------------------\n# PARAMETERS IMPORTATION\n#-----------------------\n\tfichier = 'param.csv'\n\tparam = readparam(fichier, 1)\n\n#-----------------------\n# Initial conditions : \n#-----------------------\n\n\t# mRNAs of per, Cry and Bmal : \n\tMp = initial_conditions[0]\n\tMc = initial_conditions [1]\n\tMb = initial_conditions[2]\n\n\t# Phosporylated and non-phosphorylated proteins PER\n\t# and Cry in the cytosol : \n\n\tPc = initial_conditions[3]\n\tCc = initial_conditions[4]\n\tPcp = initial_conditions[5]\n\tCcp = initial_conditions[6]\n\n\t# Phosporylated and non-phosphorylated PER- Cry complexe\n\t# in the cytosol and nucleus : \n\n\tPCc = initial_conditions[7]\n\tPcn = initial_conditions[8]\n\tPCcp = initial_conditions[9]\n\tPCnp = initial_conditions[10]\n\n\t# Phosphorylated and non-phosphorylated protein BMAL1 in\n\t# the cytosol and nucleus : \n\n\tBc = initial_conditions[11]\n\tBcp = initial_conditions[12]\n\tBn = initial_conditions[13]\n\tBnp = initial_conditions[14]\n\n\t# Inactive complex between PER-CRY and CLOCK-BMAL1 in \n\t# nucleus : \n\n\tIn = initial_conditions[15]\n\n#--------------\n# Parameters : \n#--------------\n\n\n\t# Rate constants for modification : \n\n\tk1 = param['k1']\n\tk2 = param['k2']\n\tk3 = param['k3']\n\tk4 = param['k4']\n\tk5 = param['k5']\n\tk6 = param['k6']\n\tk7 = param['k7']\n\tk8 = param['k8']\n\n\t# Activation constant\n\n\tKAP = param['KAP']\n\tKAC = param['KAC']\n\tKIB = param['KIB']\n\n\t#Nonspecific degradation rate constant\n\n\tkdmb = param['kdmb']\n\tkdmc = param['kdmc']\n\tkdmp = param['kdmp']\n\tkdnc = param['kdnc']\n\tkdn = param['kdn']\n\n\t# Michaelis constant : \n\n\tKd = param['Kd']\n\tKdp = param['Kdp']\n\tKp = param['Kp']\n\tKmB = param['KmB']\n\tKmC = param['KmC']\n\tKmP = param['KmP']\n\n\t#Rate constant for synthesis : \n\n\tkstot = param['kstot']\n\tksB = param['ksB']\n\tksC = param['ksC']\n\tksP = param['ksP']\n\n\t# Degree of cooperativity : \n\n\tn = param['n']\n\tm = param['m']\n\n\t#Phosphorylation rate : \n\n\tVphos = param['Vphos']\n\t#Maximum Rate : \n\n\tV1B = param['V1B']\n\tV1C = param['V1C']\n\tV1P = param['V1P']\n\tV1PC = param['V1PC']\n\tV2B = param['V2B']\n\tV2C = param['V2C']\n\tV2P = param['V2P']\n\tV2PC = param['V2PC']\n\tV3B = param['V3B']\n\tV3PC = param['V3PC']\n\tV4B = param['V4B']\n\tV4PC = param['V4PC']\n\n\t#Maximum rate of degradation\n\n\tvndBC = param['vndBC']\n\tvndBN = param['vndBN']\n\tvndCC = param['vndCC']\n\tvndIN = param['vndIN']\n\tvndPC = param['vndPC']\n\tvndPCC = param['vndPCC']\n\tvndPCN = param['vndPCN']\n\tvnmB = param['vnmB']\n\tvnmC = param['vnmC']\n\tvnmP = param['vnmP']\n\n\t# Maximum rate of synthesis/transcription : \n\n\tvnsTot = param['vnsTot']\n\tvnsB = param['vnsB']\n\tvnsC = param['vnsC']\n\tvnsP = param['vnsP']\n\n#--------------------------\n# Kinetic equations : \n#--------------------------\n\n\t# mRNAs of per, Cry and Bmal : \n\n\tdMp = vnsP * Bn**n/(KAP**n+Bn**n) - vnmP * Mp/(KmP+Mp) - kdmp*Mp\n\tdMc = vnsC * Bn**n/(KAC**n+Bn**n) - vnmC * Mc/(KmC + Mc) - kdmc*Mc\n\tdMb = vnsB * KIB**m/(KIB**m+Bn**m) - vnmB * Mb/(KmB + Mb) - kdmb*Mb\n\n\t#Phosphorylated and non-phosphorylated proteins PER and CRY in the cytosol : \n\n\tdPc = ksP * Mp - V1P*Pc/(Kp+Pc) + V2P * Pcp/(Kdp + Pcp) + k4 * PCc - k3 * Pc * Cc - kdn * Pc\n\tdCc = ksC * Mc - V1C * Cc / (Kp +Cc) + V2C * Ccp/(Kdp + Ccp) + k4 * PCc - k3 * Pc * Cc - kdnc * Cc\n\tdPcp = V1P * Pc/(Kp + Pc) - V2P * Pcp/(Kdp + Pcp) - vndPC * Pcp/(Kp+Pcp) - kdn * Pcp\n\tdCcp = V1C * Cc/(Kp+Cc) - V2C * Ccp/(Kdp + Ccp) - vndCC * Ccp/(Kd + Ccp) - kdn * Ccp\n\n\t# Phosphorylated and non-phosphorylated PER-CRY complex in cytosom and nucleus : \n\n\tdPCc = -V1PC * PCc/(Kp+PCc) + V2PC * PCcp/(Kdp + PCcp) - k4 * PCc + k3 * Pc * Cc + k2 * Pcn - k1 * PCc - kdn * PCc \n\tdPCn = -V3PC * Pcn/(Kp+Pcn) + V4PC * PCnp/(Kdp+PCnp) - k2*Pcn + k1*PCc - k7 * Bn * Pcn + k8 * In - kdn * Pcn\n\tdPCcp = V1PC * PCc/(Kp+PCc) - V2PC * PCcp/(Kdp + PCcp) - vndPCC * PCcp/(Kd + PCcp) - kdn * PCcp\n\tdPCnp = V3PC * Pcn/(Kp+Pcn) - V4PC * PCnp/(Kdp + PCnp) - vndPCN * PCnp/(Kd + PCnp) - kdn * PCnp\n\n\t# Phosphorylated and non-phosphorylated protein BMAL1 in the cytosol and nucleus\n\tdBc = KIB * Mb - V1B * Bc/(Kp+Bc) + V2B * Bcp/(Kdp + Bcp) - k5*Bc + k6*Bc - kdn*Bc\n\tdBcp = V1B * Bc/(Kp + Bc) - V2B * Bcp/(Kdp + Bcp) - vndBC * Bcp/(Kd + Bcp) - kdn*Bcp\n\tdBn = -V3B * Bn/(Kp+Bn) - V4B * Bnp/(Kdp+Bnp) + k5*Bc - k6 * Bn - k7 * Bn * Pcn + k8 * In - kdn*Bn\n\tdBnp = V3B*Bn/(Kp+Bn) - V4B * Bnp/(Kdp + Bnp) - vndBN * Bnp/(Kd + Bnp) - kdn * Bnp\n\n\t#Inactive complex between PER–CRY and CLOCK–BMAL1 in nucleus :\n\tdIn = -k8 * In + k7 * Bn * Pcn -vndIN * In/(Kd + In) - kdn*In\n\t\n\tdydt = np.array([dMp, dMc, dMb, dPc, dCc, dPcp, dCcp, dPCc, dPCn, dPCcp, dPCnp, dBc, dBcp, dBn, dBnp, dIn])\n\treturn dydt.reshape(len(dydt),1)",
"def planck_B_nu(freq, T):\n import numpy as np\n from astropy import units as u\n from astropy import constants as c\n\n if isinstance(T, u.quantity.Quantity):\n use_units = True\n else:\n T = T * u.K\n use_units = False\n\n if not isinstance(freq, u.quantity.Quantity):\n freq *= u.Hz\n\n T = np.array(T.value, ndmin=1) * T.unit\n freq = np.array(freq.value, ndmin=1) * freq.unit\n\n f_ov_T = freq[np.newaxis, :] / T[:, np.newaxis]\n mx = np.floor(np.log(np.finfo(f_ov_T.ravel()[0].value).max))\n exp = np.minimum(f_ov_T * c.h / c.k_B, mx)\n exp = np.maximum(exp, -mx)\n\n output = 2 * c.h * freq**3 / c.c**2 / (np.exp(exp) - 1.0) / u.sr\n\n cgsunit = 'erg/(s*sr*cm**2*Hz)'\n if use_units:\n return output.to(cgsunit).squeeze()\n else:\n return output.to(cgsunit).value.squeeze()",
"def speedOfSound(gamma, R, T):\n\n a = np.sqrt(gamma*R*T)\n\n return a",
"def PlankFunction(wavelen,T=5778.):\n\n c1=1.191042E8\n c2=1.4387752E4\n L=c1/(wavelen**5*(np.exp(c2/(wavelen*T))-1))\n return L",
"def get_kt(temps, delta_gibbs_ts):\n # rate coefficient from Eyring equation\n return KB / H * temps * np.exp(-delta_gibbs_ts / RG / temps) # [1/s] if unimolecular",
"def test_thermal_relaxation_error_kraus(self):\n t1, t2, time, p1 = (1, 2, 1, 0.3)\n error = thermal_relaxation_error(t1, t2, time, p1)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1)\n self.assertEqual(circ[0]['name'], 'kraus')\n self.assertEqual(circ[0]['qubits'], [0])",
"def generate_blackbody(temp, wmin=40., wmax=300.):\n c = 2.99792e14 # um/s\n Jy2W = 1e-26 # Convert Jy to W/m2/Hz\n dw = 0.005\n nw = int((wmax - wmin) / dw) + 1\n wstar = np.arange(nw) * dw + wmin\n fstar = np.pi * thermast.planck_function(wstar, temp) \\\n * wstar ** 2 / (Jy2W * c)\n return wstar, fstar",
"def get_brightnesstemperature(self, channel):\n K1 = {\n \"10\": 3040.136402, # Constant K1 [W m-2 um-1].\n \"11\": 2482.375199,\n \"12\": 1935.060183,\n \"13\": 866.468575,\n \"14\": 641.326517,\n }\n\n K2 = {\n \"10\": 1735.337945, # Constant K2 [K].\n \"11\": 1666.398761,\n \"12\": 1585.420044,\n \"13\": 1350.069147,\n \"14\": 1271.221673,\n }\n\n return K2[channel] / np.log((K1[channel] / self.get_radiance(channel)) + 1)",
"def cracking_rate_Ai2020(T_dim):\n k_cr = 3.9e-20\n Eac_cr = 0 # to be implemented\n arrhenius = np.exp(Eac_cr / pybamm.constants.R * (1 / T_dim - 1 / 298.15))\n return k_cr * arrhenius",
"def rkha_basis(p: float, tau: float, k: K) -> Callable[[X], object]:\n\n # TODO: Improve typing of this function. The nlsa.function_algebra module\n # does not know that we can pass arrays of points in X as function\n # arguments, which is what we do throughout this module for efficiency.\n w = rkha_weights(p, tau)\n lam = w(k)\n phi = fourier_basis(k)\n psi = fun.mul(lam, phi)\n return psi",
"def derive_Fitzpactrick09(wavelength, alpha, RV):\n alpha = float(alpha)\n RV = float(RV)\n \n # First we'll calculate k(lambda - V) = E(lambda - V) / E(B - V),\n # directly from equation 5\n k = (0.349 + 2.087*RV) * (1.0 / (1.0 + (wavelength / 0.507)**alpha)) - RV\n\n # We'll calculate Alam/Av from K + Rv\n Alam_Av = (k / RV) + 1. \n \n # Finally, to get A_lambda/Aks we need to divide Alam_Av by AKs_Av.\n # We'll assume central wavelength of 2.14 for Ks\n idx = np.where(abs(wavelength - 2.14) == min(abs(wavelength - 2.14)))\n\n A_AKs_at_wave = Alam_Av / Alam_Av[idx]\n\n return A_AKs_at_wave",
"def rodconan(r, L0, k):\n # k1 is the value of :\n # 2*gamma_R(11./6)*2^(-5./6)*pi^(-8./3)*(24*gamma_R(6./5)/5.)^(5./6);\n k1 = 0.1716613621245709486\n dprf0 = (2*numpy.pi/L0)*r\n\n if dprf0 > 4.71239:\n \tres = asymp_macdo(dprf0)\n else:\n \tres = -macdo_x56(dprf0, k)\n\n res *= k1 * L0**(5./3)\n\n return res"
]
| [
"0.7115234",
"0.6639137",
"0.65268266",
"0.61489564",
"0.60964906",
"0.60357994",
"0.5996236",
"0.5951445",
"0.59121585",
"0.5871175",
"0.570669",
"0.56897724",
"0.5669057",
"0.5641666",
"0.5613359",
"0.55661654",
"0.5559787",
"0.5544136",
"0.5537837",
"0.54915255",
"0.5489683",
"0.5480318",
"0.54244554",
"0.53727543",
"0.53722805",
"0.53703",
"0.5362979",
"0.5356486",
"0.53503156",
"0.5330943"
]
| 0.6880025 | 1 |
Testing that consecutive slices are forbidden. | def test_forbidden_consecutive_slices(
assert_errors,
parse_ast_tree,
expression,
default_options,
):
tree = parse_ast_tree(usage_template.format(expression))
visitor = SubscriptVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ConsecutiveSlicesViolation]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_forbidden_multiple_consecutive_slices(\n assert_errors,\n parse_ast_tree,\n expression,\n default_options,\n):\n tree = parse_ast_tree(usage_template.format(expression))\n\n visitor = SubscriptVisitor(default_options, tree=tree)\n visitor.run()\n\n assert_errors(visitor, [\n ConsecutiveSlicesViolation,\n ConsecutiveSlicesViolation,\n ])",
"def _is_extended_slice(s):\n\n return s.step is not None and s.step != 1",
"def test_nonconsecutive_slices(\n assert_errors,\n parse_ast_tree,\n expression,\n default_options,\n):\n tree = parse_ast_tree(usage_template.format(expression))\n\n visitor = SubscriptVisitor(default_options, tree=tree)\n visitor.run()\n\n assert_errors(visitor, [])",
"def test_negative_stop(self):\n self.assertArrayEqual(self.dset[2:-2], self.arr[2:-2])",
"def test_slice_delslice_forbidden(self):\n global setVal\n class foo:\n def __delslice__(self, i, j, value):\n global setVal\n setVal = i, j, value\n def __delitem__(self, index):\n global setVal\n setVal = index\n\n del foo()[::]\n self.assertEqual(setVal, slice(None, None, None))\n del foo()[::None]\n self.assertEqual(setVal, slice(None, None, None))",
"def testSliceStopOutOfLowerBorder(self):\n inst = WireData(b'0123456789')\n with self.assertRaises(FormError):\n inst[:-11] # pylint: disable=pointless-statement",
"def _checkForSlicesInKey(self, key):\n if isinstance(key, tuple):\n for i, v in enumerate(key):\n if isinstance(v, slice):\n raise PyTextCanvasException('Use parentheses when specifying slices, i.e. spam[(0, 0):(9, 9)] not spam[0, 0:9, 9].')",
"def test_bit_not_offset_out_of_range(self):\n ops = [bitwise_operations.bit_not(self.five_255_bin, 41, 8, None)]\n\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)",
"def testSliceBothOutOfLowerBorder(self):\n inst = WireData(b'0123456789')\n with self.assertRaises(FormError):\n inst[-12:-11] # pylint: disable=pointless-statement",
"def test_general_subset_invalid_space():\n pass",
"def forbid_consecutive_interval(s1, s2, interval):\n for (na,n1), (nb, n2) in util.pairwise(tools.iter_melodies(s1,s2,all=True)):\n nap, nbp, n1p, n2p = [util.to_pitch(x) for x in (na, nb, n1, n2)]\n if nap.isQualifiedInterval(interval).With(n1p) and nbp.isQualifiedInterval(interval).With(n2p):\n error.warn(f\"Two {interval} in a row are forbidden\",na,nb,n1,n2,f\"in {s1.title} and {s2.title}\")",
"def consecutive_sections(): # noqa: D416",
"def testSliceStopOutOfUpperBorder(self):\n inst = WireData(b'0123456789')\n with self.assertRaises(FormError):\n inst[:11] # pylint: disable=pointless-statement",
"def testSliceMiddleWithNegativeIndex(self):\n inst = WireData(b'0123456789')\n self.assertEqual(inst[-6:-3], WireData(b'456'))",
"def testSliceLowerHalfWithNegativeIndex(self):\n inst = WireData(b'0123456789')\n self.assertEqual(inst[:-5], WireData(b'01234'))",
"def illegal_parallel_intervals(a_list, b_list):\n allowed_parallel_intervals = ['3', '6']\n consecutives = parallel_motion(a_list, b_list)\n\n return [\n c for c in consecutives\n if c[0][0][0] not in allowed_parallel_intervals\n ]",
"def testSliceBothOutOfUpperBorder(self):\n inst = WireData(b'0123456789')\n with self.assertRaises(FormError):\n inst[10:20] # pylint: disable=pointless-statement",
"def test_slice_negative_index_error(self):\n self.assertRaises(IndexError, lambda: self.table[-1])",
"def test_slice_getslice_forbidden(self):\n class foo:\n def __getslice__(self, i, j):\n return 42\n def __getitem__(self, index):\n return 23\n\n self.assertEqual(foo()[::], 23)\n self.assertEqual(foo()[::None], 23)",
"def test_validate_begin_equals_end():\n with pytest.raises(InvalidSegmentError):\n _validate([[1, 2], [5, 5]])",
"def testSliceStartOutOfUpperBorder(self):\n inst = WireData(b'0123456789')\n with self.assertRaises(FormError):\n inst[11:] # pylint: disable=pointless-statement",
"def testSliceStartOutOfLowerBorder(self):\n inst = WireData(b'0123456789')\n with self.assertRaises(FormError):\n inst[-11:] # pylint: disable=pointless-statement",
"def illegal_consecutive_parallels(a_list, b_list):\n max_consecutive_parallel = 3\n consecutives = parallel_motion(a_list, b_list)\n\n return [c for c in consecutives if len(c) > max_consecutive_parallel]",
"def test_permutation_bad(self):\n self.assertRaises(CircuitError, Permutation, 4, [1, 0, -1, 2])",
"def test_amino_acid_slicing(self):\n pass",
"def test_amino_acid_slicing(self):\n pass",
"def test_02_this_step_will_fail(self):\n\n self.assertIn(5, arr)",
"def _not_allowed_len(values, sieve):\n sieve = set(sieve)\n return any(len(i) not in sieve for i in values)",
"def test_timeseries_indexing():\n s = channel.Slice(channel.TimeSeries([14, 15, 16, 17], [4, 5, 6, 7]))\n\n np.testing.assert_equal(s[0:5].data, [14])\n np.testing.assert_equal(s[0:5].timestamps, [4])\n np.testing.assert_equal(s[4:5].data, [14])\n np.testing.assert_equal(s[4:5].timestamps, [4])\n np.testing.assert_equal(s[4:6].data, [14, 15])\n np.testing.assert_equal(s[4:6].timestamps, [4, 5])\n np.testing.assert_equal(s[4:10].data, [14, 15, 16, 17])\n np.testing.assert_equal(s[4:10].timestamps, [4, 5, 6, 7])\n\n with pytest.raises(IndexError) as exc:\n assert s[1]\n assert str(exc.value) == \"Scalar indexing is not supported, only slicing\"\n with pytest.raises(IndexError) as exc:\n assert s[1:2:3]\n assert str(exc.value) == \"Slice steps are not supported\"\n\n s = channel.Slice(channel.TimeSeries([], []))\n assert len(s[1:2].data) == 0\n assert len(s[1:2].timestamps) == 0",
"def test_validate_begin_greater_than_end():\n with pytest.raises(InvalidSegmentError):\n _validate([[1, 2], [5, 3]])"
]
| [
"0.6979152",
"0.6303644",
"0.62793154",
"0.6089854",
"0.59647316",
"0.5812619",
"0.5812093",
"0.5779929",
"0.57402104",
"0.5720322",
"0.5710838",
"0.56718296",
"0.5631256",
"0.5608668",
"0.5585874",
"0.5575349",
"0.5570698",
"0.5568004",
"0.5545934",
"0.553743",
"0.5519926",
"0.54670113",
"0.5454907",
"0.54251957",
"0.5399006",
"0.5399006",
"0.5389283",
"0.5377538",
"0.53526896",
"0.53488964"
]
| 0.7050572 | 0 |
Testing that consecutive slices are forbidden. | def test_forbidden_multiple_consecutive_slices(
assert_errors,
parse_ast_tree,
expression,
default_options,
):
tree = parse_ast_tree(usage_template.format(expression))
visitor = SubscriptVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [
ConsecutiveSlicesViolation,
ConsecutiveSlicesViolation,
]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_forbidden_consecutive_slices(\n assert_errors,\n parse_ast_tree,\n expression,\n default_options,\n):\n tree = parse_ast_tree(usage_template.format(expression))\n\n visitor = SubscriptVisitor(default_options, tree=tree)\n visitor.run()\n\n assert_errors(visitor, [ConsecutiveSlicesViolation])",
"def _is_extended_slice(s):\n\n return s.step is not None and s.step != 1",
"def test_nonconsecutive_slices(\n assert_errors,\n parse_ast_tree,\n expression,\n default_options,\n):\n tree = parse_ast_tree(usage_template.format(expression))\n\n visitor = SubscriptVisitor(default_options, tree=tree)\n visitor.run()\n\n assert_errors(visitor, [])",
"def test_negative_stop(self):\n self.assertArrayEqual(self.dset[2:-2], self.arr[2:-2])",
"def test_slice_delslice_forbidden(self):\n global setVal\n class foo:\n def __delslice__(self, i, j, value):\n global setVal\n setVal = i, j, value\n def __delitem__(self, index):\n global setVal\n setVal = index\n\n del foo()[::]\n self.assertEqual(setVal, slice(None, None, None))\n del foo()[::None]\n self.assertEqual(setVal, slice(None, None, None))",
"def _checkForSlicesInKey(self, key):\n if isinstance(key, tuple):\n for i, v in enumerate(key):\n if isinstance(v, slice):\n raise PyTextCanvasException('Use parentheses when specifying slices, i.e. spam[(0, 0):(9, 9)] not spam[0, 0:9, 9].')",
"def testSliceStopOutOfLowerBorder(self):\n inst = WireData(b'0123456789')\n with self.assertRaises(FormError):\n inst[:-11] # pylint: disable=pointless-statement",
"def test_bit_not_offset_out_of_range(self):\n ops = [bitwise_operations.bit_not(self.five_255_bin, 41, 8, None)]\n\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)",
"def testSliceBothOutOfLowerBorder(self):\n inst = WireData(b'0123456789')\n with self.assertRaises(FormError):\n inst[-12:-11] # pylint: disable=pointless-statement",
"def test_general_subset_invalid_space():\n pass",
"def forbid_consecutive_interval(s1, s2, interval):\n for (na,n1), (nb, n2) in util.pairwise(tools.iter_melodies(s1,s2,all=True)):\n nap, nbp, n1p, n2p = [util.to_pitch(x) for x in (na, nb, n1, n2)]\n if nap.isQualifiedInterval(interval).With(n1p) and nbp.isQualifiedInterval(interval).With(n2p):\n error.warn(f\"Two {interval} in a row are forbidden\",na,nb,n1,n2,f\"in {s1.title} and {s2.title}\")",
"def consecutive_sections(): # noqa: D416",
"def testSliceStopOutOfUpperBorder(self):\n inst = WireData(b'0123456789')\n with self.assertRaises(FormError):\n inst[:11] # pylint: disable=pointless-statement",
"def testSliceMiddleWithNegativeIndex(self):\n inst = WireData(b'0123456789')\n self.assertEqual(inst[-6:-3], WireData(b'456'))",
"def testSliceLowerHalfWithNegativeIndex(self):\n inst = WireData(b'0123456789')\n self.assertEqual(inst[:-5], WireData(b'01234'))",
"def illegal_parallel_intervals(a_list, b_list):\n allowed_parallel_intervals = ['3', '6']\n consecutives = parallel_motion(a_list, b_list)\n\n return [\n c for c in consecutives\n if c[0][0][0] not in allowed_parallel_intervals\n ]",
"def testSliceBothOutOfUpperBorder(self):\n inst = WireData(b'0123456789')\n with self.assertRaises(FormError):\n inst[10:20] # pylint: disable=pointless-statement",
"def test_slice_negative_index_error(self):\n self.assertRaises(IndexError, lambda: self.table[-1])",
"def test_slice_getslice_forbidden(self):\n class foo:\n def __getslice__(self, i, j):\n return 42\n def __getitem__(self, index):\n return 23\n\n self.assertEqual(foo()[::], 23)\n self.assertEqual(foo()[::None], 23)",
"def test_validate_begin_equals_end():\n with pytest.raises(InvalidSegmentError):\n _validate([[1, 2], [5, 5]])",
"def testSliceStartOutOfUpperBorder(self):\n inst = WireData(b'0123456789')\n with self.assertRaises(FormError):\n inst[11:] # pylint: disable=pointless-statement",
"def testSliceStartOutOfLowerBorder(self):\n inst = WireData(b'0123456789')\n with self.assertRaises(FormError):\n inst[-11:] # pylint: disable=pointless-statement",
"def illegal_consecutive_parallels(a_list, b_list):\n max_consecutive_parallel = 3\n consecutives = parallel_motion(a_list, b_list)\n\n return [c for c in consecutives if len(c) > max_consecutive_parallel]",
"def test_permutation_bad(self):\n self.assertRaises(CircuitError, Permutation, 4, [1, 0, -1, 2])",
"def test_amino_acid_slicing(self):\n pass",
"def test_amino_acid_slicing(self):\n pass",
"def test_02_this_step_will_fail(self):\n\n self.assertIn(5, arr)",
"def _not_allowed_len(values, sieve):\n sieve = set(sieve)\n return any(len(i) not in sieve for i in values)",
"def test_timeseries_indexing():\n s = channel.Slice(channel.TimeSeries([14, 15, 16, 17], [4, 5, 6, 7]))\n\n np.testing.assert_equal(s[0:5].data, [14])\n np.testing.assert_equal(s[0:5].timestamps, [4])\n np.testing.assert_equal(s[4:5].data, [14])\n np.testing.assert_equal(s[4:5].timestamps, [4])\n np.testing.assert_equal(s[4:6].data, [14, 15])\n np.testing.assert_equal(s[4:6].timestamps, [4, 5])\n np.testing.assert_equal(s[4:10].data, [14, 15, 16, 17])\n np.testing.assert_equal(s[4:10].timestamps, [4, 5, 6, 7])\n\n with pytest.raises(IndexError) as exc:\n assert s[1]\n assert str(exc.value) == \"Scalar indexing is not supported, only slicing\"\n with pytest.raises(IndexError) as exc:\n assert s[1:2:3]\n assert str(exc.value) == \"Slice steps are not supported\"\n\n s = channel.Slice(channel.TimeSeries([], []))\n assert len(s[1:2].data) == 0\n assert len(s[1:2].timestamps) == 0",
"def test_validate_begin_greater_than_end():\n with pytest.raises(InvalidSegmentError):\n _validate([[1, 2], [5, 3]])"
]
| [
"0.705046",
"0.630489",
"0.6279199",
"0.6091267",
"0.59660774",
"0.58130056",
"0.5812792",
"0.57814914",
"0.57406366",
"0.5720761",
"0.5710128",
"0.5671244",
"0.563131",
"0.56088775",
"0.5586154",
"0.5573127",
"0.557137",
"0.5568639",
"0.554791",
"0.5537434",
"0.5520229",
"0.54674363",
"0.5453397",
"0.5424784",
"0.5400235",
"0.5400235",
"0.53891814",
"0.53771764",
"0.5353441",
"0.5348745"
]
| 0.69790363 | 1 |
finds the maximum product of four adjacent numbers in a matrix in the same direction | def find_max_product(mtx):
max_prod = 0
for row_num in range(20):
vert = 0
diag = 0
anti_diag = 0
horiz = horiz_max(mtx[row_num])
if row_num < len(mtx) - 3:
vert = vert_max(mtx[row_num], mtx[row_num + 1],
mtx[row_num + 2], mtx[row_num + 3])
diag = diag_max(mtx[row_num], mtx[row_num + 1],
mtx[row_num + 2], mtx[row_num + 3])
anti_diag = anti_diag_max(mtx[row_num], mtx[row_num + 1],
mtx[row_num + 2], mtx[row_num + 3])
max_prod = max(max_prod, horiz, vert, diag, anti_diag)
return max_prod | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_largest_column_product(grid):\n max_product = 0\n for column in range(len(grid)):\n for row in range(len(grid) - 3):\n current_product = 1\n for j in range(4):\n current_product *= grid[row + j][column]\n if current_product > max_product:\n max_product = current_product\n return max_product",
"def compute_largest_diagonal2_product(grid):\n max_product = 0\n for row in range(len(grid) - 1 , 2 , -1):\n for column in range(len(grid) - 3):\n current_product = 1\n for j in range(4):\n current_product *= grid[row - j][column + j]\n if current_product > max_product:\n max_product = current_product\n\n if current_product == 70600674:\n print(row , column)\n return max_product",
"def max_pairwise_product_brute_force(array):\n\n if len(array) <= 1:\n return 0\n\n max_product = 0\n\n for i in range(len(array)):\n for j in range(len(array)):\n if i != j:\n if array[i] * array[j] > max_product:\n max_product = array[i] * array[j]\n\n return max_product",
"def max_pairwise_product_linear(array):\n\n if len(array) <= 1:\n return 0\n\n two_biggest_values = [0, 0]\n\n for element in array:\n if element > two_biggest_values[0]:\n two_biggest_values[0] = element\n elif element > two_biggest_values[1]:\n two_biggest_values[1] = element\n\n return two_biggest_values[0] * two_biggest_values[1]",
"def compute_largest_diagonal1_product(grid):\n max_product = 0\n for row in range(len(grid) - 3):\n for column in range(len(grid) -3):\n current_product = 1\n for j in range(4):\n current_product *= grid[row + j][column + j]\n if current_product > max_product:\n max_product = current_product\n return max_product",
"def highest_product(arr):\n\n product = 1\n\n for i in range(3):\n # find the max value in the list, get the index, pop it, and mulitply\n product *= arr.pop(arr.index(max(arr)))\n\n return product",
"def max_pairwise_product(numbers):\n n = len(numbers)\n max_product = 0\n for first in range(n):\n for second in range(first + 1, n):\n max_product = max(max_product,\n numbers[first] * numbers[second])\n\n return max_product",
"def get_max_sum4(a):\n return max(get_max_sum2(a), 0)",
"def maxProduct(data):\n maxval = float('-inf')\n for i in range(len(data)):\n for j in range(i+1, len(data)):\n if maxval < data[i]*data[j]:\n maxval = data[i]*data[j]\n a,b = (data[i],data[j])\n return tuple([a,b])",
"def max_pairwise_product_sort(numbers):\n sorted_list = sorted(numbers)\n ans = sorted_list[-1]*sorted_list[-2]\n return ans",
"def compute_largest_product(grid):\n row_largest = compute_largest_row_product(grid)\n column_largest = compute_largest_column_product(grid)\n diagonal1_largest = compute_largest_diagonal1_product(grid)\n diagonal2_largest = compute_largest_diagonal2_product(grid)\n\n my_list = [row_largest , column_largest , diagonal1_largest , diagonal2_largest]\n return max(my_list)",
"def max_pairwise_product_fast(numbers):\n num_list = numbers.copy()\n max_num_1 = max(num_list)\n num_list.remove(max_num_1)\n max_num_2 = max(num_list)\n ans = max_num_1*max_num_2\n return ans",
"def maximumProduct1(self, nums: List[int]) -> int:\n s_nums = sorted(nums, reverse=True)\n return max(s_nums[0] * s_nums[1] * s_nums[2], s_nums[0] * s_nums[-1] * s_nums[-2])",
"def max_pairwise_product_sort(array):\n if len(array) <= 1:\n return 0\n\n array.sort()\n\n return array[-1] * array[-2]",
"def highest_product_2(arr):\n\n # make a list to store the highest three ints, initializing to first three\n maxes = [arr[0], arr[1], arr[2]]\n\n # find the lowest of the highest three ints\n lowest_max = min(maxes)\n\n # go through the rest of the list to check for higher values\n for num in arr[3:]:\n # if any value is higher than the lowest max, update maxes list\n if num > lowest_max:\n # remove the old maximum\n maxes.remove(lowest_max)\n # add the new one\n maxes.append(num)\n # recalculate the lowest max for continued comparison\n lowest_max = min(maxes)\n\n return maxes[0] * maxes[1] * maxes[2]",
"def findMaxProduct(n):\n large = 0\n for i in range(len(s)):\n p = 1\n number = s[i:i+n]\n for iteration in range(len(number)):\n h = number[iteration]\n p = p * int(h)\n if p > large:\n large = p\n\n \n return large",
"def highest_product_3(arr):\n # sort in place (this will take O(n), at least)\n arr.sort()\n\n # get the maximum positive solution (this only works if all three > 0)\n max_product = arr[-1] * arr[-2] * arr[-3]\n\n # check for better solutions involving negatives\n # the only solution involving negatives will have exactly two of them\n # check the two options manually and return the largest one. \n if arr[0] < 0 and arr[1] < 0:\n if arr[0] * arr[1] * max(arr[-1], arr[-2], arr[-3]) > max_product:\n max_product = arr[0] * arr[1] * max(arr[-1], arr[-2], arr[-3])\n\n return max_product",
"def maximumProduct2(self, nums: List[int]) -> int:\n big_1 = big_2 = big_3 = -float(\"inf\")\n small_1 = small_2 = float(\"inf\")\n for n in nums:\n if n >= big_1:\n big_1, big_2, big_3 = n, big_1, big_2\n elif n >= big_2:\n big_2, big_3 = n, big_2\n elif n >= big_3:\n big_3 = n\n \n if n <= small_1:\n small_1, small_2 = n, small_1\n elif n <= small_2:\n small_2 = n\n \n return max(big_1 * big_2 * big_3, big_1 * small_1 * small_2)",
"def maxProduct2(nums):\n\n maxSubseq = nums[0]\n minSubseq = nums[0]\n res = nums[0]\n for i in range(1, len(nums)):\n if nums[i] < 0:\n minSubseq, maxSubseq = maxSubseq, minSubseq\n maxSubseq = max(nums[i], maxSubseq*nums[i])\n minSubseq = min(nums[i], minSubseq*nums[i])\n res = max(res, maxSubseq)\n return res",
"def MaxMatrix(m):\n max = 0\n index = [0,1]\n for i in m:\n for j in i:\n if j > max:\n max = j\n index = [m.index(i),i.index(j)]\n return index",
"def test_returns_largest_product_within_array(self):\n result = max_product([2,3,-2,4,10,-5,3,2,1])\n self.assertEqual(result, 14400)",
"def findMax2d(x):\n m, n = x.shape \n x_ = x.ravel()\n idx = np.argmax(x_)\n i = idx // n \n j = idx % n \n return i, j",
"def maxElem(A):\n n = len(A)\n AMax = 0.0\n for i in range(n):\n for j in range(i+1,n):\n if abs(A[i,j]) >= AMax:\n AMax = abs(A[i,j])\n k = i;l = j\n return AMax, k, l",
"def max_non_adjacent_sum_memoized(a):\n table = dict()\n def helper(a, table, i):\n if i in table:\n return table[i]\n if len(a) - i == 0:\n table[i] = 0\n elif len(a) - i == 1:\n table[i] = a[i]\n elif len(a) - i == 2:\n table[i] = max(a[0], a[1])\n else:\n table[i] = max(a[i] + helper(a, table, i + 2),\n a[i + 1] + helper(a, table, i + 3))\n return table[i]\n return helper(a, table, 0)",
"def largest_product(series, length):\n\tif length > len(series):\n\t\traise ValueError\n\tif length == 0 and len(series)==0:\n\t\treturn 1\n\treturn max((reduce(mul,s) for s in slices(series,length)))",
"def maxinrow(row,span=2):\n maximum= 0\n offset= span - 1\n for i in range(0,len(row)-offset,1):\n print row[i:i+span]\n ans= product(row[i:i+span])\n maximum = ans if ans > maximum else maximum\n return maximum",
"def max_non_adjacent_sum(a):\n if not a:\n return 0\n if len(a) == 1:\n return a[0]\n return max(a[0] + max_non_adjacent_sum(a[2:]),\n a[1] + max_non_adjacent_sum(a[3:]))",
"def largest_product(digits, size):\n # Why does a blank set of digits have a maximum product of 1?\n slice_list = slices(digits, size)\n def mult_reduce(items):\n total = 1\n for i in items:\n total *= i\n return total\n slice_list = [mult_reduce(l) for l in slice_list]\n return max(slice_list)",
"def find_max_sum(triangle):\n while len(triangle) > 1:\n _reduce_triangle(triangle)\n return triangle[0][0]",
"def four():\r\n \r\n i = 999\r\n j = i\r\n largest = 0\r\n \r\n while i > 0:\r\n while j > 0:\r\n number = str(i * j)\r\n forward = str(number)\r\n reverse = \"\"\r\n for char in number:\r\n reverse = char + reverse\r\n if forward == reverse:\r\n if largest < i * j:\r\n largest = i * j\r\n break\r\n else:\r\n j = j - 1\r\n i = i - 1\r\n j = i\r\n return largest"
]
| [
"0.7602335",
"0.7415646",
"0.7369275",
"0.7299083",
"0.72303224",
"0.70609385",
"0.700835",
"0.69688153",
"0.68755877",
"0.6805713",
"0.67672414",
"0.66905874",
"0.663058",
"0.66105884",
"0.6555576",
"0.65416676",
"0.6468184",
"0.64372873",
"0.6433118",
"0.6330626",
"0.6326945",
"0.6321754",
"0.6297931",
"0.62911844",
"0.6272916",
"0.6252824",
"0.623934",
"0.620068",
"0.6163659",
"0.6148804"
]
| 0.7494554 | 1 |
List out slot names based on the names of parameters of func | def _slots_from_params(func):
funcsig = signature(func)
slots = list(funcsig.parameters)
slots.remove('self')
return slots | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_slot_names(self, *args, **kwargs):\n return self._optimizer.get_slot_names(*args, **kwargs)",
"def function_name(parameters):",
"def parameter_names(self) -> List[str]:",
"def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []",
"def _func_list(name, size):\n return [hl.Func(\"%s_%d\" % (name, i)) for i in range(size)]",
"def _func_list(name, size):\n return [hl.Func(\"%s_%d\" % (name, i)) for i in range(size)]",
"def _name_from_args(func, _, params):\n return \"{}_{}\".format(func.__name__, \"_\".join(str(arg) for arg in params.args))",
"def get_arg_name(args):\n names = []\n for arg in args:\n if type(arg).__name__ == 'ID':\n names.append(arg.name)\n elif type(arg).__name__ == 'UnaryOp':\n names.append(arg.expr.name)\n elif type(arg).__name__ == 'StructRef':\n #############################################\n # So far, we don't care about this situation:\n # fun(a->b)\n # POSSIBLE CODE HERE\n #############################################\n names.append(None)\n return names",
"def func_var_names(func):\n names = func.__code__.co_varnames[:func.__code__.co_argcount]\n return names",
"def return_parameter_names():\n return list(titles), list(labels)",
"def argnames(method):\n return [arg for arg in method.__code__.co_varnames if arg != \"self\"]",
"def get_id_args(func, arg):\n\n return \"{} {}\".format(func.__name__, arg)",
"def getGroupFuncs(self):\n\n funcs = []\n for p in self.Parameters:\n if p.arg_name[0:8] == \"Function\" and p.arg_value:\n fct, attr = p.arg_value.split(':')\n if fct and attr:\n funcs.append((fct, attr))\n if not funcs:\n funcs.append(('count', '*'))\n return funcs",
"def get_param_names(hf):\n parameters = get_params(hf)\n return [p.name for p in parameters]",
"def getmethparlist(ob):\n defText = callText = \"\"\n # bit of a hack for methods - turn it into a function\n # but we drop the \"self\" param.\n # Try and build one for Python defined functions\n args, varargs, varkw = inspect.getargs(ob.__code__)\n items2 = args[1:]\n realArgs = args[1:]\n defaults = ob.__defaults__ or []\n defaults = [\"=%r\" % (value,) for value in defaults]\n defaults = [\"\"] * (len(realArgs)-len(defaults)) + defaults\n items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]\n if varargs is not None:\n items1.append(\"*\" + varargs)\n items2.append(\"*\" + varargs)\n if varkw is not None:\n items1.append(\"**\" + varkw)\n items2.append(\"**\" + varkw)\n defText = \", \".join(items1)\n defText = \"(%s)\" % defText\n callText = \", \".join(items2)\n callText = \"(%s)\" % callText\n return defText, callText",
"def getmethparlist(ob):\n defText = callText = \"\"\n # bit of a hack for methods - turn it into a function\n # but we drop the \"self\" param.\n # Try and build one for Python defined functions\n args, varargs, varkw = inspect.getargs(ob.__code__)\n items2 = args[1:]\n realArgs = args[1:]\n defaults = ob.__defaults__ or []\n defaults = [\"=%r\" % (value,) for value in defaults]\n defaults = [\"\"] * (len(realArgs)-len(defaults)) + defaults\n items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]\n if varargs is not None:\n items1.append(\"*\" + varargs)\n items2.append(\"*\" + varargs)\n if varkw is not None:\n items1.append(\"**\" + varkw)\n items2.append(\"**\" + varkw)\n defText = \", \".join(items1)\n defText = \"(%s)\" % defText\n callText = \", \".join(items2)\n callText = \"(%s)\" % callText\n return defText, callText",
"def _function_name(func):\n return \"Calling the function: def {}()\".format(func.__name__)",
"def _get_param_names(self):\n temp_params = {'function': self.function, 'target': self.target}\n\n temp_params.update(self.kwargs)\n\n return temp_params",
"def load_params(self, func, index):\n inputs = inspect.getfullargspec(func).args\n params = []\n for iname in inputs:\n params.append(self.load(f\"{func.__name__}.{iname}_{index}\"))\n return params",
"def extract_keywords(func):\n if hasattr(func, 'im_func'):\n func = func.im_func\n\n try:\n return func.func_code.co_varnames[-len(func.func_defaults):]\n except (TypeError, ValueError, IndexError):\n return tuple()",
"def seperate_symbols(func):\n params = []\n vars = []\n for symbol in func.free_symbols:\n if not str(symbol).isidentifier():\n continue # E.g. Indexed objects might print to A[i, j]\n if isinstance(symbol, Parameter):\n params.append(symbol)\n elif isinstance(symbol, Idx):\n # Idx objects are not seen as parameters or vars.\n pass\n elif isinstance(symbol, (MatrixExpr, Expr)):\n vars.append(symbol)\n else:\n raise TypeError('model contains an unknown symbol type, {}'.format(type(symbol)))\n\n for der in func.atoms(sympy.Derivative):\n # Used by jacobians and hessians, where derivatives are treated as\n # Variables. This way of writing it is purposefully discriminatory\n # against derivatives wrt variables, since such derivatives should be\n # performed explicitly in the case of jacs/hess, and are treated\n # differently in the case of ODEModels.\n if der.expr in vars and all(isinstance(s, Parameter) for s in der.variables):\n vars.append(der)\n\n params.sort(key=lambda symbol: symbol.name)\n vars.sort(key=lambda symbol: symbol.name)\n return vars, params",
"def _GetSlots(mcs, attrs):\n raise NotImplementedError",
"def fetch_required_param_name(cmd):\n try:\n case_no = current_config.COMMAND_DICT[cmd]\n if case_no == 1:\n \"\"\"Create_parking_lot\"\"\"\n return [\"slots\"]\n elif case_no == 2:\n \"\"\"Park\"\"\"\n return [\"reg_no\", \"age\"]\n elif case_no == 3:\n \"\"\" Leave \"\"\"\n return [\"slot_no\"]\n elif case_no == 4:\n \"\"\"Vehicle_registration_number_for_driver_of_age\"\"\"\n return [\"age\"]\n elif case_no == 5:\n \"\"\"Slot_numbers_for_driver_of_age\"\"\"\n return [\"age\"]\n elif case_no == 6:\n \"\"\"Slot_number_for_car_with_number\"\"\"\n return [\"reg_no\"]\n except KeyError:\n return\n # logger.debug(\"Something wrong with input\")\n # raise InvalidParams(\"Something wrong with input\")",
"def inspect_args_func(frame):\n args, _, _, values = inspect.getargvalues(frame)\n return {key: values[key] for key in args if key != 'self'}",
"def getVisitableNodesNamed(self):\n\n return ((\"locals_arg\", self.subnode_locals_arg),)",
"def name_func(func, num, params):\n return \"%s_%s_%s\" % (\n func.__name__, int(num),\n parameterized.to_safe_name('_'.join((params.args[0].__name__, params.args[1].__name__)))\n )",
"def getBindedNames(self):\n names = []\n for function in self.functions:\n names.append(function.__name__)\n return \", \".join(names)",
"def get_func_tuples():\n func_tuples = [\n ('met_gumeJ1_3sopt_tr20', 'Rel-UME J1', 'C1-.'),\n ('met_gumeJ5_3sopt_tr20', 'Rel-UME J5', 'r-^'),\n ('met_gfssdJ1_3sopt_tr20', 'Rel-FSSD J1', 'C4--'),\n ('met_gfssdJ5_3sopt_tr20', 'Rel-FSSD J5', 'b-x'),\n\n ('met_gmmd_med', 'Rel-MMD', 'k-.'),\n ('met_gmmd_med_bounliphone', 'Rel-MMD medboun', 'k-'),\n\n ('met_gfssdJ1_3sopt_tr50', 'FSSD-opt3 J1', 'b-^'),\n ('met_gfssdJ5_3sopt_tr50', 'FSSD-opt3 J5', 'b-.h'),\n\n ('met_gumeJ1_2V_rand', 'UME-rand J1', 'r--^'),\n ('met_gumeJ1_1V_rand', 'UME-rand J1 1V', 'y-'),\n ('met_gumeJ2_2V_rand', 'UME-rand J2', 'g--^'),\n ('met_gumeJ3_2V_rand', 'UME-rand J3', 'b--^'),\n ('met_gumeJ5_2V_rand', 'UME-rand J5', 'k--^'),\n\n ('met_gumeJ1_2sopt_tr20', 'Rel-UME-opt2 J1', 'C2-.'),\n ('met_gumeJ5_2sopt_tr20', 'Rel-UME-opt2 J5', 'g-'),\n ('met_gumeJ1_2sopt_tr50', 'Rel-UME-opt2 J1', 'r-.h'),\n\n ('met_gumeJ1_3sopt_tr50', 'UME-opt3 J1', 'r-'),\n ('met_gumeJ5_3sopt_tr50', 'UME-opt3 J5', 'k-'),\n\n\n ]\n return func_tuples",
"def get_func_names(job_content):\n func_names = []\n for op in job_content[\"op_list\"]:\n if \"func_name\" in op:\n func_names.append(op[\"func_name\"])\n return func_names",
"def _f_in_parameters(self) -> List[Tuple[str, str]]:\n result = list() # type: List[Tuple[str, str]]\n for param in self.params:\n type_list = param.f_type()\n for type_name, postfix in type_list:\n result.append((type_name, param.name + postfix))\n return result"
]
| [
"0.7401886",
"0.6727158",
"0.63846385",
"0.6307123",
"0.6185898",
"0.6185898",
"0.61344635",
"0.6027699",
"0.6006341",
"0.5964612",
"0.59010446",
"0.5862502",
"0.57894915",
"0.5722084",
"0.5717884",
"0.5717884",
"0.5704354",
"0.5668376",
"0.566833",
"0.5629399",
"0.5614547",
"0.55924857",
"0.5566222",
"0.54397136",
"0.54336834",
"0.54330987",
"0.5429191",
"0.5414575",
"0.5411227",
"0.54064685"
]
| 0.71931607 | 1 |
Open or create a wheel file. In write and exclusivewrite modes, if `file_or_path` is not specified, or the specified path is a directory, the wheelfile will be created in the current working directory, with filename generated using the values given via `distname`, `version`, `build_tag`, `language_tag`, `abi_tag`, and `platfrom_tag` arguments. Each of these parameters is stored in a readonly property of the same name. | def __init__(
self,
file_or_path: Union[str, Path, BinaryIO] = './',
mode: str = 'r',
*,
distname: Optional[str] = None,
version: Optional[Union[str, Version]] = None,
build_tag: Optional[Union[int, str]] = None,
language_tag: Optional[str] = None,
abi_tag: Optional[str] = None,
platform_tag: Optional[str] = None
) -> None:
assert not isinstance(file_or_path, io.TextIOBase), (
"Text buffer given where a binary one was expected."
)
if 'a' in mode:
# Requires rewrite feature
raise NotImplementedError(
"Append mode is not supported yet"
)
if 'l' in mode:
raise NotImplementedError(
"Lazy modes are not supported yet"
)
self.mode = mode
# These might be None in case a corrupted wheel is read in lazy mode
self.wheeldata: Optional[WheelData] = None
self.metadata: Optional[MetaData] = None
self.record: Optional[WheelRecord] = None
if isinstance(file_or_path, str):
file_or_path = Path(file_or_path)
# TODO if value error, set build_tag to degenerated version, that
# compares with Version in a way that makes Version the higher one.
build_tag = int(build_tag) if build_tag is not None else None
if self._is_unnamed_or_directory(file_or_path):
self._require_distname_and_version(distname, version)
filename = self._get_filename(file_or_path)
self._pick_a_distname(filename, given_distname=distname)
self._pick_a_version(filename, given_version=version)
self._pick_tags(
filename, build_tag, language_tag, abi_tag, platform_tag
)
if self._is_unnamed_or_directory(file_or_path):
assert distname is not None and version is not None # For Mypy
self._generated_filename = self._generate_filename(
self._distname, self._version, self._build_tag,
self._language_tag, self._abi_tag, self._platform_tag
)
else:
self._generated_filename = ''
if isinstance(file_or_path, Path):
file_or_path /= self._generated_filename
# FIXME: the file is opened before validating the arguments, so this
# litters empty and corrupted wheels if any arg is wrong.
self._zip = ZipFile(file_or_path, mode)
# Used by _distinfo_path
self._distinfo_prefix: Optional[str] = None
if 'w' in mode or 'x' in mode:
self._initialize_distinfo()
else:
self._distinfo_prefix = self._find_distinfo_prefix()
self._read_distinfo()
if 'l' not in mode:
self.validate() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_wheel( # noqa:C901\n req=None, # type: Optional[TInstallRequirement]\n reqset=None, # type: Optional[Union[TReqSet, Iterable[TInstallRequirement]]]\n output_dir=None, # type: Optional[str]\n preparer=None, # type: Optional[TPreparer]\n wheel_cache=None, # type: Optional[TWheelCache]\n build_options=None, # type: Optional[List[str]]\n global_options=None, # type: Optional[List[str]]\n check_binary_allowed=None, # type: Optional[Callable[TInstallRequirement, bool]]\n no_clean=False, # type: bool\n session=None, # type: Optional[TSession]\n finder=None, # type: Optional[TFinder]\n install_command=None, # type: Optional[TCommand]\n req_tracker=None, # type: Optional[TReqTracker]\n build_dir=None, # type: Optional[str]\n src_dir=None, # type: Optional[str]\n download_dir=None, # type: Optional[str]\n wheel_download_dir=None, # type: Optional[str]\n cache_dir=None, # type: Optional[str]\n use_user_site=False, # type: bool\n use_pep517=None, # type: Optional[bool]\n verify=False, # type: bool\n editable=False, # type: bool\n format_control_provider=None, # type: Optional[TShimmedFunc]\n wheel_cache_provider=None, # type: Optional[TShimmedFunc]\n preparer_provider=None, # type: Optional[TShimmedFunc]\n wheel_builder_provider=None, # type: Optional[TShimmedFunc]\n build_one_provider=None, # type: Optional[TShimmedFunc]\n build_one_inside_env_provider=None, # type: Optional[TShimmedFunc]\n build_many_provider=None, # type: Optional[TShimmedFunc]\n install_command_provider=None, # type: Optional[TShimmedFunc]\n finder_provider=None, # type: Optional[TShimmedFunc]\n reqset_provider=None, # type: Optional[TShimmedFunc]\n):\n # type: (...) -> Generator[Union[str, Tuple[List[TInstallRequirement], ...]], None, None]\n wheel_cache_provider = resolve_possible_shim(wheel_cache_provider)\n preparer_provider = resolve_possible_shim(preparer_provider)\n wheel_builder_provider = resolve_possible_shim(wheel_builder_provider)\n build_one_provider = resolve_possible_shim(build_one_provider)\n build_one_inside_env_provider = resolve_possible_shim(build_one_inside_env_provider)\n build_many_provider = resolve_possible_shim(build_many_provider)\n install_cmd_provider = resolve_possible_shim(install_command_provider)\n format_control_provider = resolve_possible_shim(format_control_provider)\n finder_provider = resolve_possible_shim(finder_provider) or get_package_finder\n reqset_provider = resolve_possible_shim(reqset_provider)\n global_options = [] if global_options is None else global_options\n build_options = [] if build_options is None else build_options\n options = None\n kwarg_map = {\n \"cache_dir\": cache_dir,\n \"src_dir\": src_dir,\n \"download_dir\": download_dir,\n \"wheel_download_dir\": wheel_download_dir,\n \"build_dir\": build_dir,\n \"use_user_site\": use_user_site,\n }\n if not req and not reqset:\n raise TypeError(\"Must provide either a requirement or requirement set to build\")\n with contextlib.ExitStack() as ctx:\n kwargs = kwarg_map.copy()\n if wheel_cache is None and (reqset is not None or output_dir is None):\n if install_command is None:\n assert isinstance(install_cmd_provider, (type, functools.partial))\n install_command = install_cmd_provider()\n kwargs, options = populate_options(install_command, options, **kwarg_map)\n format_control = getattr(options, \"format_control\", None)\n if not format_control:\n format_control = format_control_provider(None, None) # type: ignore\n wheel_cache = ctx.enter_context(\n wheel_cache_provider(options.cache_dir, format_control)\n )\n if req and not reqset and not output_dir:\n output_dir = get_ireq_output_path(wheel_cache, req)\n if not reqset and build_one_provider:\n build_one_kwargs = {\n \"req\": req,\n \"output_dir\": output_dir,\n \"verify\": verify,\n \"editable\": editable,\n \"build_options\": build_options,\n \"global_options\": global_options,\n }\n yield call_function_with_correct_args(build_one_provider, **build_one_kwargs)\n elif build_many_provider:\n yield build_many_provider(\n reqset, wheel_cache, build_options, global_options, check_binary_allowed\n )\n else:\n builder_args, builder_kwargs = get_allowed_args(wheel_builder_provider)\n if \"requirement_set\" in builder_args and not reqset:\n reqset = reqset_provider()\n if session is None and finder is None:\n session = get_session(install_cmd=install_command, options=options)\n finder = finder_provider(\n install_command, options=options, session=session\n )\n if preparer is None:\n preparer_kwargs = {\n \"build_dir\": kwargs[\"build_dir\"],\n \"src_dir\": kwargs[\"src_dir\"],\n \"download_dir\": kwargs[\"download_dir\"],\n \"wheel_download_dir\": kwargs[\"wheel_download_dir\"],\n \"finder\": finder,\n \"session\": session\n if session\n else get_session(install_cmd=install_command, options=options),\n \"install_cmd\": install_command,\n \"options\": options,\n \"use_user_site\": use_user_site,\n \"req_tracker\": req_tracker,\n }\n preparer = ctx.enter_context(preparer_provider(**preparer_kwargs))\n check_bin = check_binary_allowed if check_binary_allowed else lambda x: True\n builder_kwargs = {\n \"requirement_set\": reqset,\n \"finder\": finder,\n \"preparer\": preparer,\n \"wheel_cache\": wheel_cache,\n \"no_clean\": no_clean,\n \"build_options\": build_options,\n \"global_options\": global_options,\n \"check_binary_allowed\": check_bin,\n }\n builder = call_function_with_correct_args(\n wheel_builder_provider, **builder_kwargs\n )\n if req and not reqset:\n if not output_dir:\n output_dir = get_ireq_output_path(wheel_cache, req)\n if use_pep517 is not None:\n req.use_pep517 = use_pep517\n yield builder._build_one(req, output_dir)\n else:\n yield builder.build(reqset)",
"def create_file(cls, relpath, contents='', mode='w'):\r\n with safe_open(os.path.join(cls.build_root, relpath), mode=mode) as fp:\r\n fp.write(contents)",
"def WriteFile(path, content, mode='w', atomic=False, makedirs=False):\n write_path = path\n if atomic:\n write_path = path + '.tmp'\n\n if makedirs:\n SafeMakedirs(os.path.dirname(path))\n\n with open(write_path, mode) as f:\n f.writelines(cros_build_lib.iflatten_instance(content))\n\n if not atomic:\n return\n\n try:\n os.rename(write_path, path)\n except EnvironmentError:\n SafeUnlink(write_path)\n raise",
"def atomic_write_in_dir(path, **kwargs):\n writer = AtomicWriter(path, **kwargs)\n return writer._open(_get_fileobject_func(writer, os.path.dirname(path)))",
"def create_file(path: Path, content: str) -> None:\n path.touch()\n with path.open(\"w\") as f:\n f.write(content)",
"def perform_register(path, file_name):\n subprocess.call(\n [sys.executable, 'setup.py', 'sdist', 'bdist_wheel'], cwd=path)\n subprocess.call(['twine', 'register', '-r', 'pypi', os.path.join(\n path, 'dist', file_name + '.tar.gz')])\n subprocess.call(['twine', 'register', '-r', 'pypi', os.path.join(\n path, 'dist', file_name + '-py3-none-any.whl')])",
"def open_writable_zipfile(path):\n try:\n return ZipFile(path, 'w', ZIP_DEFLATED)\n except RuntimeError: # pragma: nocover\n # zlib module not available\n return ZipFile(path, 'w')",
"def makeFile(self, path=None, content=b''):\n if path is None:\n path = self.mktemp()\n with open(path, 'wb') as file:\n file.write(content)\n return path",
"def create_file(path):\n open(path, \"w\").close()",
"def write_file(rel_path, text, *args, **kwargs):\n path = os.path.join(os.path.dirname(__file__), \"resources\", rel_path)\n with open(path, 'w+', *args, **kwargs) as _file:\n _file.write(text)",
"def setup(cls, path, data_file, **kwargs):\n data_filepath = os.path.join(path, data_file)\n if not os.path.isfile(data_filepath):\n with open(data_filepath, 'w'):\n pass",
"def get_built_wheel_path(link):\n sdist_path = download_file(\n link.url, filename=link.filename, check=link.check_download,\n )\n container = os.path.dirname(sdist_path)\n\n unpacked_dir = tempfile.mkdtemp()\n atexit.register(shutil.rmtree, unpacked_dir, ignore_errors=True)\n unpack_file(sdist_path, unpacked_dir, None, PipLink(link))\n\n wheel_content_dir = tempfile.mkdtemp()\n atexit.register(shutil.rmtree, wheel_content_dir, ignore_errors=True)\n proc = subprocess.Popen(\n [sys.executable, 'setup.py', 'bdist_wheel', '-d', container],\n cwd=unpacked_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n )\n stdout, stderr = proc.communicate()\n if proc.returncode != 0:\n warnings.warn('failed to build wheel\\n{}'.format(stderr))\n return None\n for fn in os.listdir(container):\n if fn != os.path.basename(sdist_path):\n return os.path.join(container, fn)\n warnings.warn('failed to find built wheel')\n return None",
"def dist(c, wheel=False):\n commands = \"sdist\" if not wheel else \"sdist bdist_wheel\"\n c.run(f\"python {SETUP_FILE} {commands}\")",
"def make_test_file(path: Path, name: str = None):\n if not name:\n name = \"test.txt\"\n\n path.mkdir(parents=True, exist_ok=True)\n test_file = path.joinpath(name)\n test_file.touch()\n mtime = (datetime.today() - timedelta(2)).timestamp()\n os.utime(test_file, (mtime, mtime))\n return test_file",
"def file(c, path=local.http_path):\r\n c = conn(c)\r\n print(\"make file repo on {}, path [{}]\".format(c.host, path))\r\n\r\n system.install(c, 'createrepo')\r\n c.run('createrepo {}'.format(path))",
"def wheel(string = None):\n None",
"def create_file(path):\n command = ['touch', TEST_FILE]\n file_operation(path, command)",
"def write_file(path, data):\n # opens file\n try:\n os.makedirs(os.path.dirname(path), exist_ok=True)\n f = open(str(path), \"w\")\n f.write(data)\n f.close()\n except Exception as e:\n print(\"Error writing file: \", e)\n sys.exit(1)",
"def build_wheel(wheel_directory, config_settings=None, metadata_directory=None):\n info = make_wheel_in(pyproj_toml, Path(wheel_directory))\n return info.file.name",
"def __init__(self, path, *args, mode=\"r\", config=None, **kwargs):\n\n self.path = os.path.abspath(path)\n if mode == \"r\":\n if not os.path.exists(self.path):\n raise FileNotFoundError(\n \"Couldn't open directory package at %s\"% (path,))\n self._index_cache = None\n self.writable = False\n elif mode == \"w\":\n if not os.path.isdir(self.path):\n os.makedirs(self.path)\n self._index_cache = None\n self.writable = True",
"def install(text, path=None, name=None, on_path=False, env=None):\n if path is not None:\n tmp_paths.append(path)\n file_ = open(path, 'w')\n elif name is not None:\n directory = tempfile.mkdtemp()\n tmp_paths.append(directory)\n path = os.path.join(directory, name)\n file_ = open(path, 'w')\n else:\n handle, path = tempfile.mkstemp()\n tmp_paths.append(path)\n file_ = os.fdopen(handle, 'w')\n\n file_.write(HEAD % dict(\n executable=sys.executable,\n pythonpath=sys.path,\n ))\n\n file_.write(text)\n\n file_.close()\n os.chmod(path, 0754)\n\n if on_path:\n original_environ.setdefault('PATH', os.environ['PATH'])\n os.environ['PATH'] = ':'.join((directory, os.environ['PATH']))\n\n if env is not None:\n original_environ.setdefault(env, os.environ.get(env))\n os.environ[env] = path\n\n return path",
"def create_file(dir, path, contents):\n\n fullpath = os.path.join(dir, path)\n fulldir = os.path.dirname(fullpath)\n\n if fulldir:\n try:\n os.makedirs(fulldir)\n except OSError:\n pass\n\n with open(fullpath, 'w') as file:\n file.write(contents)",
"def safe_open_w(path):\n mkdir_p(os.path.dirname(path))\n return open(path, 'wb')",
"def put_file(self, path, contents):\n data = io.BytesIO()\n with tarfile.open(fileobj=data, mode='w') as tarfile_:\n file_contents = contents.encode() if isinstance(contents, str) else contents\n tarinfo = tarfile.TarInfo(path)\n\n # We set the modification time to now because some systems (e.g. logging) rely upon\n # timestamps to determine whether to read config files.\n tarinfo.mtime = time.time()\n tarinfo.size = len(file_contents)\n tarfile_.addfile(tarinfo, io.BytesIO(file_contents))\n data.seek(0)\n\n self.container.put_archive(path='/', data=data)",
"def repack_wheel(data: bytes):\n new_data = BytesIO()\n with ZipFile(BytesIO(data)) as existing_zip:\n with ZipFile(new_data, mode=\"w\") as new_zip:\n for zipinfo in existing_zip.infolist():\n if re.search(r\"pip-.+\\.dist-info/\", zipinfo.filename):\n continue\n new_zip.writestr(zipinfo, existing_zip.read(zipinfo))\n\n return new_data.getvalue()",
"def mkfile(self, _path, contents=None, overwrite=False):\n if path.isfile(_path) and not overwrite:\n self.die('Cannot make file \"{0}\". Already exists and overwrite={1}'.format(_path, repr(overwrite)))\n \n # Make sure the directory exists\n self.mkpath(_path)\n \n # Make the file\n fh = open(_path, 'w')\n \n # If writing contents\n if contents:\n fh.write(contents)\n \n # Close the file\n fh.close()\n \n # Return the path\n return _path",
"def create_target(cls, relpath, target):\r\n cls.create_file(cls.build_path(relpath), target, mode='a')",
"def write_file(path: str, content: Union[str, bytes], mode: str = 'w') -> None:\n from peltak.core import context, log\n\n if context.get('pretend', False):\n log.info(\"Would overwrite <34>{path}<32> with:\\n<90>{content}\",\n path=path,\n content=content)\n else:\n with open(path, mode) as fp:\n fp.write(content)",
"def _create_file(path, disk_subformat=\"rockridge\", files=None, **kwargs):\n if not files:\n raise RuntimeError(\"Unable to create an empty ISO file\")\n # We can use mkisofs, genisoimage, or xorriso, and fortunately\n # all three take similar parameters\n args = ['-output', path, '-full-iso9660-filenames',\n '-iso-level', '2', '-allow-lowercase']\n if disk_subformat == 'rockridge':\n args.append('-r')\n args += files\n helper = helper_select(['mkisofs', 'genisoimage', 'xorriso'])\n if helper.name == \"xorriso\":\n args = ['-as', 'mkisofs'] + args\n helper.call(args)",
"def create_file(self, value=None):\n if not path.isdir(\"Project\"):\n system(\"mkdir Project\")\n string_to_systemize = \"echo \\\"#!/usr/bin/python\\n\" + \\\n \"# Please use fp = open(\\'Project/yourfile.*\\') \" + \\\n \"when opening YOUR files\\n\" + \\\n \"# to not lose YOUR file in the jumble of OTHER files.\\n\" + \\\n \"# Also, do NOT delete the very first comment line.\\n\" + \\\n \"# \\'logs.txt\\' is your friend for your error logs.\\\"\" + \\\n \"> Project/myfile.py\"\n system(string_to_systemize)\n system(\"chmod +x Project/myfile.py\")\n self.open_file()"
]
| [
"0.5617606",
"0.55081505",
"0.5217721",
"0.5208986",
"0.5185103",
"0.5107417",
"0.50721467",
"0.5066343",
"0.4974916",
"0.49145496",
"0.49080288",
"0.4870449",
"0.48542884",
"0.4836735",
"0.4834694",
"0.4825824",
"0.48071554",
"0.47798565",
"0.47733852",
"0.47702336",
"0.4765205",
"0.47374624",
"0.47257882",
"0.47184515",
"0.47041327",
"0.46606803",
"0.46299818",
"0.4607599",
"0.459274",
"0.45823523"
]
| 0.6301335 | 0 |
Return a filename from file obj or a path. If given file, the asumption is that the filename is within the value of its `name` attribute. If given a `Path`, assumes it is a path to an actual file, not a directory. If given an unnamed object, this returns None. | def _get_filename(
cls, file_or_path: Union[BinaryIO, Path]
) -> Optional[str]:
if cls._is_unnamed_or_directory(file_or_path):
return None
# TODO: test this
# If a file object given, ensure its a filename, not a path
if isinstance(file_or_path, Path):
return file_or_path.name
else:
# File objects contain full path in ther name attribute
filename = Path(file_or_path.name).name
return filename | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getabsfile(object, _filename=None):\r\n if _filename is None:\r\n _filename = getsourcefile(object) or getfile(object)\r\n return os.path.normcase(os.path.abspath(_filename))",
"def extract_filename_from_object_info(object_info: dict) -> Optional[str]:\n if \"name\" in object_info and object_info[\"name\"]:\n return object_info[\"name\"]\n\n for access_method in object_info[\"access_methods\"]:\n url = access_method[\"access_url\"][\"url\"]\n parts = url.split(\"/\")\n if parts:\n return parts[-1]\n\n return None",
"def _retrieve_filename(file: Union[str, FileStorage]) -> str:\n if isinstance(file, FileStorage):\n return file.filename\n return file",
"def file2path (x,name):\n if isinstance(x,file):\n x.close()\n return x.name\n if isinstance(x,str):\n return x\n raise ValueError(name,x)",
"def get_file(_file):\n _file = pathlib.Path(_file)\n if not _file.is_file():\n _file = None\n return _file",
"def get_fname(a_file):\r\n fname, fext = os.path.splitext(a_file)\r\n return os.path.basename(fname)",
"def get_fname(a_file):\r\n fname, fext = os.path.splitext(a_file)\r\n return os.path.basename(fname)",
"def get_filename(self):\n \n for f in os.listdir(self.get_directory()):\n if os.path.isfile(os.path.join(self.get_directory(), f)):\n return f\n \n return None",
"def _get_file_object(infilename):\n\n _, extension = os.path.splitext(infilename)\n if extension.lower() == '.spe':\n return parsers.SpeFile(infilename)\n elif extension.lower() == '.spc':\n return parsers.SpcFile(infilename)\n elif extension.lower() == '.cnf':\n return parsers.CnfFile(infilename)\n else:\n raise NotImplementedError(\n 'File type {} can not be read'.format(extension))",
"def _get_file_object(inputfile=None):\n if type(inputfile) == str:\n return open(inputfile, 'r')\n return inputfile",
"def get_name(name, file: str) -> str:\n return os.path.basename(file) if name == \"__main__\" else name",
"def get_obj_path_name(uobject: unrealsdk.UObject) -> str:\n if uobject:\n return uobject.PathName(uobject)\n else:\n return \"None\"",
"def filename(self):\n if hasattr(self._file, 'name'):\n return self._file.name\n\n raise DescriptorOperationError(\n 'This property is not available on the underlying file-object: %r' % self._file)",
"def filename(self) -> Optional[str]:\n ...",
"def get_filename(self, latex_doc=None):\n if not self.filename:\n if latex_doc is None:\n filename = 'file'\n else:\n filename = latex_doc.filename\n else:\n filename = self.filename\n return self.fix_filename_extension(filename)",
"def GetFileName():\r\n d = GetData()\r\n return d.filename",
"def get_filename(self, file_object):\n\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n\n result = \"<show> <season>x<episode> <name>.mp4\"\n result = result.replace(\"<show>\", file_object.show.name)\n result = result.replace(\"<season>\", \"%.2d\" % \\\n int(file_object.season.number))\n result = result.replace(\"<episode>\", \"%s\" % \\\n str(file_object.number))\n result = result.replace(\"<name>\", file_object.name)\n return result",
"def get_file_name(file):\n return os.path.splitext(os.path.basename(file))[0]",
"def getObjFileName(self, metaObj, absoluteName=True, addSuffix=False):\n \n # absoulte or relative path\n if absoluteName:\n pathList = [self.rootDirName, self.baseDirName]\n else:\n pathList = []\n \n if metaObj.container is None:\n # Root package only\n pathList.append(ImpConstants.modellingPackageName)\n if self.codeDirName:\n pathList.append(self.codeDirName)\n pathList.append(self.rootFileName)\n \n else:\n # any other object\n ll = metaObj.qualifiedName().split('.')\n if self.codeDirName:\n ll[1:1] = [self.codeDirName]\n pathList.extend(ll)\n \n # special handling for ModelElements that may correspond to directories:\n if (isinstance(metaObj, MetaModel.MetaPackage) \n and (metaObj.containedPackages or not self.classesInPackage)):\n pathList.append(self.packageFile)\n \n elif not isinstance(metaObj.container, MetaModel.MetaPackage):\n raise MemopsError(\" file names not implemented for objects of type %s\"\n % (metaObj.__class__.__name__,))\n \n # add suffix\n if addSuffix and self.fileSuffix:\n pathList[-1] = '%s.%s' % (pathList[-1], self.fileSuffix)\n \n # \n return uniIo.joinPath(*pathList)",
"def get_file_name(path):\n return os.path.basename(path)",
"def file_name(path):\n return os.path.basename(path).split('.')[0]",
"def GetFileName(self):\n return self.file.GetPath()",
"def _get_file_name(id):\n client = Client(DRS_URL)\n c = client.client\n\n # assume id will be NA18537\n response = c.GetDataObject(data_object_id=id).result()\n return response['data_object'][\"name\"]",
"def filename(self):\n # TODO(aron): write tests for this\n\n return os.path.basename(self.file_on_disk.name)",
"def get_file_object(file_name, path):\n os.chdir(path)\n info = os.stat(file_name)\n\n time_format = \"%a %b %d %H:%M:%S %Y\"\n file_mod_date = time.ctime(info.st_mtime)\n file_mod_date = datetime.strptime(file_mod_date, time_format)\n\n file_size = str(info.st_size)\n\n file_type = \"folder\" if os.path.isdir(f\"{path}/{file_name}\") else \"file\"\n\n name, path, size, ftype, mod_date = file_name, path, file_size, file_type, file_mod_date\n\n file = File(name, path, size, ftype, mod_date)\n\n return file",
"def filePathToFileName(path):\n return os.path.splitext(os.path.basename(path))[0]",
"def get_name_from_file(filename):\n return filename.split(\".\")[0]",
"def get_filename(filepath):\n return os.path.basename(filepath)",
"def _getFileName(self, filePath):\r\n\t\thead, tail = ntpath.split(filePath)\r\n\t\treturn tail or ntpath.basename(head)",
"def get_file_name(path: str) -> str:\n return os.path.splitext(os.path.basename(path))[0]"
]
| [
"0.6543205",
"0.65163434",
"0.64702183",
"0.6166343",
"0.6082145",
"0.60737973",
"0.60737973",
"0.6067047",
"0.6064387",
"0.6030024",
"0.6028483",
"0.60068804",
"0.5997551",
"0.59268546",
"0.5924116",
"0.59208626",
"0.5917195",
"0.59133685",
"0.58956224",
"0.58722764",
"0.5863145",
"0.5849996",
"0.58464336",
"0.58387125",
"0.5832899",
"0.583194",
"0.5825071",
"0.5818888",
"0.5789165",
"0.5779801"
]
| 0.7912477 | 0 |
Write a file to the .data directory under a specified section. This method is a handy shortcut for writing into `.data/`, such that you dont have to generate the path yourself. Updates the wheel record, if the record is being kept. | def write_data(self, filename: Union[str, Path],
section: str, arcname: Optional[str] = None,
*, recursive: bool = True, resolve: bool = True) -> None:
self._check_section(section)
if isinstance(filename, str):
filename = Path(filename)
if arcname is None:
arcname = filename.name
arcname = self._distinfo_path(section + '/' + arcname.lstrip('/'),
kind='data')
self.write(filename, arcname, recursive=recursive, resolve=resolve) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def setSection(self, section, item, data):\n if not self.config.has_section(section):\n self.config.add_section(section)\n self.config.set(section, item, data)\n # Write the updated file whenever anything changes\n with open(self.filename, 'w') as configfile:\n self.config.write(configfile)",
"def write(self, data_pref)\n\n def _writeToAddama(self, addama_dir):",
"def writestr_data(self, section: str,\n zinfo_or_arcname: Union[ZipInfo, str],\n data: Union[bytes, str]) -> None:\n self._check_section(section)\n\n arcname = (\n zinfo_or_arcname.filename\n if isinstance(zinfo_or_arcname, ZipInfo)\n else zinfo_or_arcname\n )\n\n arcname = self._distinfo_path(section + '/' + arcname.lstrip('/'),\n kind='data')\n\n self.writestr(arcname, data)",
"def write_config(self, fname, data):\n with open(fname, 'w') as fhandle:\n fhandle.write(data)",
"def save(data, section): # save a config\n\tglobal _timesSaved\n\tif dynConfig['logConfigActions']:\n\t\tlogger.info( f'saving {section}: {data}' )\n\t# save\n\tif section != 'placeholderForSaving':\n\t\tcurrentConfigData[section] = data\n\t\tlogger.debug( f'saved {section}' )\n\telse:\n\t\t_timesSaved = 2\n\t# save to disk if this is the third save\n\tif _timesSaved == 0 or _timesSaved == 1:\n\t\t_timesSaved += 1\n\telse:\n\t\t_timesSaved = 0\n\t\ttry:\n\t\t\t# save to disk\n\t\t\twith open( configPath, 'w', encoding='utf-8' ) as file:\n\t\t\t\tjson.dump( currentConfigData, file, indent=4 )\n\t\texcept:\n\t\t\tlogger.error( f'failed to save config to disk!' )\n\t\t\traise ConfigError( 'error while saving the config file' )",
"def write_section(self, fhandle, sect):\n fhandle.write(\"[%s]\\n\" % sect)\n for opt in sorted(self.file_parser.options(sect)):\n fhandle.write('{0} = {1}\\n'.format(opt, self.file_parser.get(sect, opt)))",
"def set_data(self, section, option_name=None, option=None):\n #Check if file exists to prevent exceptions trying to reach it.\n if self.file[\"exists\"]:\n log.debug(\"File exists, setting up ConfigParser.\")\n config = ConfigParser()\n config.read(self.file[\"file_name\"])\n\n #Check if the section bot exists and add it if not.\n if not config.has_section(section):\n log.debug(\"Section not found, it will be added.\")\n config.add_section(section)\n \n #If a option is specified, it will add it to the section.\n if option_name != None:\n log.debug(\"Option specified, it will be added.\")\n config.set(section, option_name, option)\n\n #Write the config file.\n log.debug(\"Writing to settings file.\")\n with open(self.file[\"file_name\"], 'w') as f:\n config.write(f)\n \n log.info(\"Data added to '\" + self.file[\"file_name\"] + \"' file.\")\n else:\n log.error(\"Can't set token because the settings file is missing. Last error: \" +\n self.file[\"error\"])",
"def overwrite(section: str, data: any) -> None:\n\toverwriteDict[section] = data\n\tlogger.debug(f'Overwritten config {section}!')",
"def save_data(data: str, data_name: str):\n with open(config_path / data_name, \"w\") as f:\n f.write(data)",
"def save_data(data: str, data_name: str):\n with open(config_path / data_name, \"w\") as f:\n f.write(data)",
"def save_data(data: str, data_name: str):\n with open(config_path / data_name, \"w\") as f:\n f.write(data)",
"def save_data(data: str, data_name: str):\n with open(config_path / data_name, \"w\") as f:\n f.write(data)",
"def save_data(data: str, data_name: str):\n with open(config_path / data_name, \"w\") as f:\n f.write(data)",
"def save_data(data: str, data_name: str):\n with open(config_path / data_name, \"w\") as f:\n f.write(data)",
"def saveOnFile(self, path, data):\n with open(path, \"w\") as f:\n f.write(data)",
"def WriteData(self, name, data):\n tempname = os.path.join(self._output_dir, '_%s' % name)\n handle = open(tempname, 'w')\n self.WriteFile(handle, data)\n handle.close()\n os.rename(tempname,\n os.path.join(self._output_dir, name))",
"def write_data():",
"def exposed_write_data(self, chunk_id, data):\n local_filename = self.chunk_filename(chunk_id)\n with open(local_filename, \"w\") as file:\n file.write(data)\n # self.handle_table[chunk_id] = local_filename",
"def writeData( self, file, bAddBeginOfDataChunk = True ):\n self.writeSpecificData( file, self.data, bAddBeginOfDataChunk = bAddBeginOfDataChunk )",
"def write(self, filename, data):\n\t\t# create the path if it doesn't exists\n\t\tdir = os.path.dirname(filename)\n\t\tif not os.path.isdir(dir):\n\t\t\tos.mkdir(dir)\n\t\t\n\t\t# write data\n\t\tfile = codecs.open(filename, 'w', 'utf8')\n\t\tfile.write(data)\n\t\tfile.close()",
"def write_file(path, data):\n # opens file\n try:\n os.makedirs(os.path.dirname(path), exist_ok=True)\n f = open(str(path), \"w\")\n f.write(data)\n f.close()\n except Exception as e:\n print(\"Error writing file: \", e)\n sys.exit(1)",
"def write(self, filename, data):\n raise NotImplementedError",
"def write(self, filename, data, hdr):\n pass",
"def write_data():\n\n data_location = os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", DATA_DIR))\n\n sbi_file_name = os.path.join(data_location, SBI_FILE)\n\n sbi = SbiInfo(sbi_file_name)\n\n # the test file is stored in the same directory as the script\n test_file = os.path.splitext(os.path.join(os.path.dirname(__file__), SBI_FILE))[0] + \".pkl\"\n _logger.info(\"Writing header object to {}\".format(os.path.join(os.path.dirname(__file__),\n test_file)))\n sbi.data.to_pickle(test_file)",
"def writePaperData(docId, pubmedMeta, fulltextData, outDir):\n if TEST_OUTPUT:\n printFileHash(fulltextData, pubmedMeta)\n return\n pubmedMeta, warnMsgs = storeFilesNoZip(docId, pubmedMeta, fulltextData, outDir)\n oldHandler = signal.signal(signal.SIGINT, ignoreCtrlc)\n writeMeta(outDir, pubmedMeta, fulltextData)\n addStatus = ''\n if 'status' in fulltextData:\n addStatus = fulltextData['status']\n crawlerName = fulltextData['crawlerName']\n docIdStatus = 'OK\\t%s\\t%s %s, %d files\\t%s' % (crawlerName,\n pubmedMeta['journal'],\n pubmedMeta['year'],\n len(fulltextData),\n addStatus)\n writeDocIdStatus(outDir, docId, docIdStatus, ';'.join(warnMsgs))\n signal.signal(signal.SIGINT, oldHandler)",
"def put(self, filename, data, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n file_obj = open(file_path, \"w\")\n file_obj.write(data)",
"def _write_section_values(section_data, fobj):\n\n # Order is significant.\n section_dict = OrderedDict()\n section_dict['Armor'] = section_data.get('armor')\n section_dict['Internals'] = section_data.get('internals')\n section_dict['Rear'] = section_data.get('rear')\n section_dict['Config'] = section_data.get('config')\n\n for name, value in section_dict.items():\n if not value:\n continue\n val_str = \" {name:<14} {{ {value} }}\\n\".format(\n name=name, value=value)\n fobj.write(val_str)",
"def _writeSection(self, sectionName, options):\n return True",
"def write_data(data, file_name):\n file_path = pathlib.Path(__file__).parent / f'data/{file_name}'\n with open(file_path, 'w') as file:\n json.dump(data, file, indent=2)"
]
| [
"0.63379085",
"0.62890714",
"0.61172116",
"0.6040453",
"0.60129803",
"0.600598",
"0.5906952",
"0.59036005",
"0.5861724",
"0.58348304",
"0.58348304",
"0.58348304",
"0.58348304",
"0.58348304",
"0.58348304",
"0.579581",
"0.5784512",
"0.57745516",
"0.57627445",
"0.5730551",
"0.57143617",
"0.569936",
"0.5696623",
"0.5689743",
"0.56834126",
"0.5666042",
"0.5658261",
"0.5653685",
"0.5647829",
"0.56400925"
]
| 0.70789695 | 0 |
Write given data to the .data directory under a specified section. This method is a handy shortcut for writing into `.data/`, such that you dont have to generate the path yourself. Updates the wheel record, if the record is being kept. | def writestr_data(self, section: str,
zinfo_or_arcname: Union[ZipInfo, str],
data: Union[bytes, str]) -> None:
self._check_section(section)
arcname = (
zinfo_or_arcname.filename
if isinstance(zinfo_or_arcname, ZipInfo)
else zinfo_or_arcname
)
arcname = self._distinfo_path(section + '/' + arcname.lstrip('/'),
kind='data')
self.writestr(arcname, data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_data(self, filename: Union[str, Path],\n section: str, arcname: Optional[str] = None,\n *, recursive: bool = True, resolve: bool = True) -> None:\n self._check_section(section)\n\n if isinstance(filename, str):\n filename = Path(filename)\n if arcname is None:\n arcname = filename.name\n\n arcname = self._distinfo_path(section + '/' + arcname.lstrip('/'),\n kind='data')\n\n self.write(filename, arcname, recursive=recursive, resolve=resolve)",
"def WriteData(self, name, data):\n tempname = os.path.join(self._output_dir, '_%s' % name)\n handle = open(tempname, 'w')\n self.WriteFile(handle, data)\n handle.close()\n os.rename(tempname,\n os.path.join(self._output_dir, name))",
"def write_data():",
"def setSection(self, section, item, data):\n if not self.config.has_section(section):\n self.config.add_section(section)\n self.config.set(section, item, data)\n # Write the updated file whenever anything changes\n with open(self.filename, 'w') as configfile:\n self.config.write(configfile)",
"def write(self, data_pref)\n\n def _writeToAddama(self, addama_dir):",
"def overwrite(section: str, data: any) -> None:\n\toverwriteDict[section] = data\n\tlogger.debug(f'Overwritten config {section}!')",
"def write_data(self, data: Dict):\n raise NotImplementedError",
"def save(data, section): # save a config\n\tglobal _timesSaved\n\tif dynConfig['logConfigActions']:\n\t\tlogger.info( f'saving {section}: {data}' )\n\t# save\n\tif section != 'placeholderForSaving':\n\t\tcurrentConfigData[section] = data\n\t\tlogger.debug( f'saved {section}' )\n\telse:\n\t\t_timesSaved = 2\n\t# save to disk if this is the third save\n\tif _timesSaved == 0 or _timesSaved == 1:\n\t\t_timesSaved += 1\n\telse:\n\t\t_timesSaved = 0\n\t\ttry:\n\t\t\t# save to disk\n\t\t\twith open( configPath, 'w', encoding='utf-8' ) as file:\n\t\t\t\tjson.dump( currentConfigData, file, indent=4 )\n\t\texcept:\n\t\t\tlogger.error( f'failed to save config to disk!' )\n\t\t\traise ConfigError( 'error while saving the config file' )",
"def save_data(data: str, data_name: str):\n with open(config_path / data_name, \"w\") as f:\n f.write(data)",
"def save_data(data: str, data_name: str):\n with open(config_path / data_name, \"w\") as f:\n f.write(data)",
"def save_data(data: str, data_name: str):\n with open(config_path / data_name, \"w\") as f:\n f.write(data)",
"def save_data(data: str, data_name: str):\n with open(config_path / data_name, \"w\") as f:\n f.write(data)",
"def save_data(data: str, data_name: str):\n with open(config_path / data_name, \"w\") as f:\n f.write(data)",
"def save_data(data: str, data_name: str):\n with open(config_path / data_name, \"w\") as f:\n f.write(data)",
"def write_block_data(self, addr, reg, data):\n raise NotImplementedError()",
"def exposed_write_data(self, chunk_id, data):\n local_filename = self.chunk_filename(chunk_id)\n with open(local_filename, \"w\") as file:\n file.write(data)\n # self.handle_table[chunk_id] = local_filename",
"def save_data(self, speaker_name, data):\n if not os.path.exists(self.data_dir):\n os.makedirs(self.data_dir)\n \n path = os.path.join(self.data_dir, speaker_name + \".pkl\")\n np.array(data).dump(path)",
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def set_data(self, data_name, data, scratch_dir=None):\n if data_name == 'fcdoc':\n def _save_fct(doc, path):\n doc.saveAs(path)\n self.serial_fcdoc = store_serial(data, _save_fct, 'fcstd', scratch_dir=scratch_dir)\n\n elif data_name == 'mesh' or data_name == 'rmf':\n import fenics as fn\n\n def _save_fct(data, path):\n fn.File(path) << data\n if data_name == 'mesh':\n self.serial_mesh = store_serial(data, _save_fct, 'xml', scratch_dir=scratch_dir)\n if data_name == 'rmf':\n self.serial_region_marker = store_serial(data, _save_fct, 'xml',\n scratch_dir=scratch_dir)\n\n else:\n raise ValueError(str(data_name) + ' was not a valid data_name.')",
"def write_word_data(self, addr, reg, data):\n raise NotImplementedError()",
"def write_data(self):\n\n data_string = \"%s, %d, %d, %s\\n\" % (\n self.name, self.repos, self.members, self.created)\n\n file_path = os.path.join(os.path.dirname(__file__), self.data_file)\n\n with open(os.path.abspath(file_path), 'a') as f:\n f.write(data_string)",
"def set_data(self, path, data, owner='*'):\n import msgpack\n with self.open(path, mode='w', owner=owner, rev=0) as fp:\n fp.content_type = 'application/msgpack'\n fp.write(msgpack.packb(data, use_bin_type=True))",
"def write_data(self, data, path):\n if self.data_format == 'twenty': \n length = 20\n else: raise ValueError(\"self.data_format = '%s' unknown.\" % \n self.data_format)\n if len(data.shape) == 1: data = data.reshape((data.shape[0],1))\n with open(path,'w') as f:\n for k in range(data.shape[0]):\n f.write(''.join(\n [str(data[k,l]).rjust(length) for l in range(data.shape[1])]\n ) + '\\n')",
"def write_config(self, fname, data):\n with open(fname, 'w') as fhandle:\n fhandle.write(data)",
"def write_data(data, file_name):\n file_path = pathlib.Path(__file__).parent / f'data/{file_name}'\n with open(file_path, 'w') as file:\n json.dump(data, file, indent=2)",
"def write(self, filename, data):\n\t\t# create the path if it doesn't exists\n\t\tdir = os.path.dirname(filename)\n\t\tif not os.path.isdir(dir):\n\t\t\tos.mkdir(dir)\n\t\t\n\t\t# write data\n\t\tfile = codecs.open(filename, 'w', 'utf8')\n\t\tfile.write(data)\n\t\tfile.close()",
"def writeData(self, dataDict):\n pass",
"def write_data():\n\n data_location = os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", DATA_DIR))\n\n sbi_file_name = os.path.join(data_location, SBI_FILE)\n\n sbi = SbiInfo(sbi_file_name)\n\n # the test file is stored in the same directory as the script\n test_file = os.path.splitext(os.path.join(os.path.dirname(__file__), SBI_FILE))[0] + \".pkl\"\n _logger.info(\"Writing header object to {}\".format(os.path.join(os.path.dirname(__file__),\n test_file)))\n sbi.data.to_pickle(test_file)",
"def writePaperData(docId, pubmedMeta, fulltextData, outDir):\n if TEST_OUTPUT:\n printFileHash(fulltextData, pubmedMeta)\n return\n pubmedMeta, warnMsgs = storeFilesNoZip(docId, pubmedMeta, fulltextData, outDir)\n oldHandler = signal.signal(signal.SIGINT, ignoreCtrlc)\n writeMeta(outDir, pubmedMeta, fulltextData)\n addStatus = ''\n if 'status' in fulltextData:\n addStatus = fulltextData['status']\n crawlerName = fulltextData['crawlerName']\n docIdStatus = 'OK\\t%s\\t%s %s, %d files\\t%s' % (crawlerName,\n pubmedMeta['journal'],\n pubmedMeta['year'],\n len(fulltextData),\n addStatus)\n writeDocIdStatus(outDir, docId, docIdStatus, ';'.join(warnMsgs))\n signal.signal(signal.SIGINT, oldHandler)",
"def writeSpecificData( self, file, data, bAddBeginOfDataChunk = True ):\n if( not isinstance( data, np.ndarray ) ):\n data = np.array( data, dtype = self.dataType )\n if( bAddBeginOfDataChunk ):\n file.write( \"data\" )\n file.write( struct.pack( \"I\", len(data)*self.nNbrBitsPerSample/8 ) )\n data.tofile( file )"
]
| [
"0.67369294",
"0.62428963",
"0.6207554",
"0.61807334",
"0.61209357",
"0.6105475",
"0.60803586",
"0.60592484",
"0.6040712",
"0.6040712",
"0.6040712",
"0.6040712",
"0.6040712",
"0.6040712",
"0.59455603",
"0.59368366",
"0.5905411",
"0.58971053",
"0.588145",
"0.58433807",
"0.5820484",
"0.5815168",
"0.5813575",
"0.5800343",
"0.5775842",
"0.57675445",
"0.5747156",
"0.5735647",
"0.57351947",
"0.57326096"
]
| 0.6391121 | 1 |
Write a file to `.distinfo` directory in the wheel. This is a shorthand for `write(...)` with `arcname` prefixed with the `.distinfo` path. It also ensures that the metadata files critical to the wheel correctnes (i.e. the ones written into archive on `close()`) aren't being prewritten. | def write_distinfo(self, filename: Union[str, Path],
arcname: Optional[str] = None,
*, recursive: bool = True, resolve: bool = True) -> None:
if resolve and arcname is None:
arcname = resolved(filename)
elif arcname is None:
arcname = str(filename)
if arcname == '':
raise ProhibitedWriteError(
"Empty arcname - write would result in duplicating zip entry "
"for .dist-info directory."
)
if arcname in {"WHEEL", "METADATA", "RECORD"}:
raise ProhibitedWriteError(
f"Write would result in a duplicated metadata file: {arcname}."
)
arcname = self._distinfo_path(arcname)
self.write(filename, arcname=arcname, recursive=recursive) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_releaseinfo_file(projname, relinfo_str):\n dirs = projname.split('.')\n os.chdir(os.path.join(*dirs))\n print 'updating releaseinfo.py for %s' % projname\n with open('releaseinfo.py', 'w') as f:\n f.write(relinfo_str)",
"def create_meta(prefix, dist, info_dir, extra_info):\n # read info/index.json first\n with open(join(info_dir, 'index.json')) as fi:\n meta = json.load(fi)\n # add extra info\n meta.update(extra_info)\n # write into <prefix>/conda-meta/<dist>.json\n meta_dir = join(prefix, 'conda-meta')\n if not isdir(meta_dir):\n os.makedirs(meta_dir)\n with open(join(meta_dir, 'history'), 'w') as fo:\n fo.write('')\n with open(join(meta_dir, dist + '.json'), 'w') as fo:\n json.dump(meta, fo, indent=2, sort_keys=True)",
"def write_tarball(args, tarfilename, archivefiles=[]):\n if not archivefiles:\n return None\n \n manifest_filename, manifest_uuid = render_manifest(args, archivefiles)\n try:\n with tarfile.open(tarfilename, f\"{FILE_FLAG}:gz\") as tarball:\n file_count = 0\n for fname in archivefiles:\n LOG.debug(f\"Adding {fname} to {tarfilename}: \")\n if fname.endswith(\".csv\"):\n upload_name = f\"{manifest_uuid}_openshift_usage_report.{file_count}.csv\"\n tarball.add(fname, arcname=upload_name)\n file_count += 1\n tarball.add(manifest_filename, arcname=\"manifest.json\")\n except FileExistsError as exc:\n LOG.critical(exc)\n sys.exit(2)\n LOG.info(f\"Wrote: {tarfilename}\")\n return f\"{tarfilename}\"",
"def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)",
"def _add_to_archive(archive, filename, contents, tstamp):\n fileinfo = zipfile.ZipInfo(filename, tstamp)\n fileinfo.external_attr = 0666 << 16L\n archive.writestr(fileinfo, contents, zipfile.ZIP_DEFLATED)",
"def freeze_distribution(dist_name, dist_version, **attrs):\n for key in attrs.iterkeys():\n if key not in DIST_META_KEYS:\n raise AttributeError(\"unexpected keyword argument '%s'.\" % key)\n\n try:\n dist_meta = import_dist_meta(dist_name, dist_version)\n dist_meta.update(attrs)\n except ImportError:\n raise MetadataError(\"metadata of '%s-%s' not found.\" % \\\n (dist_name, dist_version))\n\n dist_files = []\n dist_scheme = {}\n\n for key in SCHEME_KEYS:\n location, outfiles = freeze_dist_section(key, dist_meta)\n dist_files.extend(outfiles)\n dist_scheme[key] = location\n\n return dist_scheme, dist_files",
"def write_to_file(info: List[str]) -> None:\n return",
"def write(name, keyword, domain, citation, author, description, species, version, contact, license, values, output):\n write_namespace(\n name, keyword, domain, author, citation, values,\n namespace_description=description,\n namespace_species=species,\n namespace_version=version,\n author_contact=contact,\n author_copyright=license,\n file=output,\n )",
"def write_data(self, filename: Union[str, Path],\n section: str, arcname: Optional[str] = None,\n *, recursive: bool = True, resolve: bool = True) -> None:\n self._check_section(section)\n\n if isinstance(filename, str):\n filename = Path(filename)\n if arcname is None:\n arcname = filename.name\n\n arcname = self._distinfo_path(section + '/' + arcname.lstrip('/'),\n kind='data')\n\n self.write(filename, arcname, recursive=recursive, resolve=resolve)",
"def close(self) -> None:\n\n if self.closed:\n return\n\n if 'r' not in self.mode:\n if self.metadata is not None:\n self.writestr(self._distinfo_path(\"METADATA\"),\n str(self.metadata).encode())\n if self.wheeldata is not None:\n self.writestr(self._distinfo_path(\"WHEEL\"),\n str(self.wheeldata).encode())\n self._zip.writestr(self._distinfo_path(\"RECORD\"),\n str(self.record).encode())\n\n self._zip.close()",
"def manifest_to_file(file_name, name_map):\n out_file = open(file_name, 'wb')\n try:\n write_file_manifest(name_map, out_file)\n finally:\n out_file.close()",
"def write(name, keyword, domain, citation, author, description, species, version, contact, license, values,\n functions, output, value_prefix):\n write_namespace(\n name, keyword, domain, author, citation, values,\n namespace_description=description,\n namespace_species=species,\n namespace_version=version,\n author_contact=contact,\n author_copyright=license,\n functions=functions,\n file=output,\n value_prefix=value_prefix\n )",
"def write(self, filename, arcname=None, compress_type=None):\n filename = str(filename)\n if arcname is None:\n arcname = os.getcwd()\n\n shutil.copy(filename, self.folder + arcname)",
"def generate_metadata(install_req):\n # type: (InstallRequirement) -> str\n assert install_req.pep517_backend is not None\n build_env = install_req.build_env\n backend = install_req.pep517_backend\n\n # NOTE: This needs to be refactored to stop using atexit\n metadata_tmpdir = TempDirectory(kind=\"modern-metadata\")\n atexit.register(metadata_tmpdir.cleanup)\n\n metadata_dir = metadata_tmpdir.path\n\n with build_env:\n # Note that Pep517HookCaller implements a fallback for\n # prepare_metadata_for_build_wheel, so we don't have to\n # consider the possibility that this hook doesn't exist.\n runner = runner_with_spinner_message(\"Preparing wheel metadata\")\n with backend.subprocess_runner(runner):\n distinfo_dir = backend.prepare_metadata_for_build_wheel(\n metadata_dir\n )\n\n return os.path.join(metadata_dir, distinfo_dir)",
"def writestr(self,\n zinfo_or_arcname: Union[ZipInfo, str],\n data: Union[bytes, str]) -> None:\n\n # XXX: ZipFile.writestr() does not normalize arcpaths the same way\n # ZipFile.write() does, and so this method won't do that either\n\n arcname = (\n zinfo_or_arcname.filename\n if isinstance(zinfo_or_arcname, ZipInfo)\n else zinfo_or_arcname\n )\n\n self._zip.writestr(zinfo_or_arcname, data)\n self.refresh_record(arcname)",
"def write_cando_file(self, file_name):\n cando_writer = CandoWriter(self.dna_structure)\n cando_writer.write(file_name)",
"def dist(c, wheel=False):\n commands = \"sdist\" if not wheel else \"sdist bdist_wheel\"\n c.run(f\"python {SETUP_FILE} {commands}\")",
"def write_info_to_file(self):\n\n self.info.write_mission_info()\n\n self.logger.info(\"Mission instance write succeeded.\")",
"def post_extract(env_name='root'):\n prefix = prefix_env(env_name)\n info_dir = join(prefix, 'info')\n with open(join(info_dir, 'index.json')) as fi:\n meta = json.load(fi)\n dist = '%(name)s-%(version)s-%(build)s' % meta\n if FORCE:\n run_script(prefix, dist, 'pre-unlink')\n link(prefix, dist, linktype=None)\n shutil.rmtree(info_dir)",
"def write_file(name, text, opts):\n fname = path.join(opts.destdir, \"%s.%s\" % (name, opts.suffix))\n if opts.dryrun:\n print 'Would create file %s.' % fname\n return\n if not opts.force and path.isfile(fname):\n print 'File %s already exists, skipping.' % fname\n else:\n print 'Creating file %s.' % fname\n f = open(fname, 'w')\n try:\n f.write(text)\n finally:\n f.close()",
"def make_readme_txt(self, args):\n with open(self.readme_txt, 'w') as writer:\n log.info(\"args=%s\\n\", args)\n writer.write(\"# Created by pbtranscript-internal-validation.ValidationRunner.make_readme_txt()\\n\")\n writer.write(\"args=%s\\n\\n\" % args)\n\n files = self.common_files + self.collapse_human_files + self.reseq_human_files + self.sirv_files\n for desc, fn in files:\n if op.exists(fn):\n writer.write(\"%s=%s\\n\" % (desc, fn))",
"def write_to_file(entry, file):\n with open(file, \"a\") as f:\n f.write(entry)",
"def save_meta_file(gen_dict, f_name):\r\n logger = custom_logger.CustomLogger(run_id+':'+file_id)\r\n filename = run_id+'_'+ f_name +'.meta'\r\n f = open(os.path.join(unique_op_dir, filename),'a')\r\n print('Output stored in %s'%(str(os.path.join(unique_op_dir, filename))))\r\n logger.info('Output stored in %s'%(str(os.path.join(unique_op_dir, filename))))\r\n for key, val in gen_dict.items():\r\n line = str(key)+\" : \"+str(val)+\"\\n\"\r\n f.write(line)",
"def repack_wheel(data: bytes):\n new_data = BytesIO()\n with ZipFile(BytesIO(data)) as existing_zip:\n with ZipFile(new_data, mode=\"w\") as new_zip:\n for zipinfo in existing_zip.infolist():\n if re.search(r\"pip-.+\\.dist-info/\", zipinfo.filename):\n continue\n new_zip.writestr(zipinfo, existing_zip.read(zipinfo))\n\n return new_data.getvalue()",
"def write(self,\n filename: Union[str, Path],\n arcname: Optional[str] = None,\n *, recursive: bool = True, resolve: bool = True) -> None:\n if resolve and arcname is None:\n arcname = resolved(filename)\n self._write_to_zip(filename, arcname)\n\n if recursive:\n common_root = str(filename)\n root_arcname = arcname\n for root, dirs, files in os.walk(filename):\n # For reproducibility, sort directories, so that os.walk\n # traverses them in a defined order.\n dirs.sort()\n\n dirs = [d + '/' for d in dirs]\n for name in sorted(dirs + files):\n filepath = os.path.join(root, name)\n arcpath = self._os_walk_path_to_arcpath(\n common_root, root, name, root_arcname\n )\n self._write_to_zip(filepath, arcpath)",
"def _post_src_install_write_metadata(settings):\n\n\teapi_attrs = _get_eapi_attrs(settings.configdict['pkg']['EAPI'])\n\n\tbuild_info_dir = os.path.join(settings['PORTAGE_BUILDDIR'], 'build-info')\n\n\tmetadata_keys = ['IUSE']\n\tif eapi_attrs.iuse_effective:\n\t\tmetadata_keys.append('IUSE_EFFECTIVE')\n\n\tfor k in metadata_keys:\n\t\tv = settings.configdict['pkg'].get(k)\n\t\tif v is not None:\n\t\t\twrite_atomic(os.path.join(build_info_dir, k), v + '\\n')\n\n\t# The following variables are irrelevant for virtual packages.\n\tif settings.get('CATEGORY') != 'virtual':\n\n\t\tfor k in ('CHOST',):\n\t\t\tv = settings.get(k)\n\t\t\tif v is not None:\n\t\t\t\twrite_atomic(os.path.join(build_info_dir, k), v + '\\n')\n\n\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t'BUILD_TIME'), encoding=_encodings['fs'], errors='strict'),\n\t\tmode='w', encoding=_encodings['repo.content'],\n\t\terrors='strict') as f:\n\t\tf.write(_unicode_decode(\"%.0f\\n\" % (time.time(),)))\n\n\tuse = frozenset(settings['PORTAGE_USE'].split())\n\tfor k in _vdb_use_conditional_keys:\n\t\tv = settings.configdict['pkg'].get(k)\n\t\tfilename = os.path.join(build_info_dir, k)\n\t\tif v is None:\n\t\t\ttry:\n\t\t\t\tos.unlink(filename)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\tcontinue\n\n\t\tif k.endswith('DEPEND'):\n\t\t\tif eapi_attrs.slot_operator:\n\t\t\t\tcontinue\n\t\t\ttoken_class = Atom\n\t\telse:\n\t\t\ttoken_class = None\n\n\t\tv = use_reduce(v, uselist=use, token_class=token_class)\n\t\tv = paren_enclose(v)\n\t\tif not v:\n\t\t\ttry:\n\t\t\t\tos.unlink(filename)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\tcontinue\n\t\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t\tk), encoding=_encodings['fs'], errors='strict'),\n\t\t\tmode='w', encoding=_encodings['repo.content'],\n\t\t\terrors='strict') as f:\n\t\t\tf.write(_unicode_decode(v + '\\n'))\n\n\tif eapi_attrs.slot_operator:\n\t\tdeps = evaluate_slot_operator_equal_deps(settings, use, QueryCommand.get_db())\n\t\tfor k, v in deps.items():\n\t\t\tfilename = os.path.join(build_info_dir, k)\n\t\t\tif not v:\n\t\t\t\ttry:\n\t\t\t\t\tos.unlink(filename)\n\t\t\t\texcept OSError:\n\t\t\t\t\tpass\n\t\t\t\tcontinue\n\t\t\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t\t\tk), encoding=_encodings['fs'], errors='strict'),\n\t\t\t\tmode='w', encoding=_encodings['repo.content'],\n\t\t\t\terrors='strict') as f:\n\t\t\t\tf.write(_unicode_decode(v + '\\n'))",
"def perform_register(path, file_name):\n subprocess.call(\n [sys.executable, 'setup.py', 'sdist', 'bdist_wheel'], cwd=path)\n subprocess.call(['twine', 'register', '-r', 'pypi', os.path.join(\n path, 'dist', file_name + '.tar.gz')])\n subprocess.call(['twine', 'register', '-r', 'pypi', os.path.join(\n path, 'dist', file_name + '-py3-none-any.whl')])",
"def run(self):\n if self.formats != [\"gztar\"] and self.formats != [\"zip\"]:\n print(\"'setup.py sdist' unsupported format.\")\n sys.exit(1)\n\n if glob.glob(\"*.tar.gz\"):\n print(\"'setup.py sdist' remove existing *.tar.gz files from \"\n \"source directory.\")\n sys.exit(1)\n\n command = \"make dist\"\n exit_code = subprocess.call(command, shell=True)\n if exit_code != 0:\n raise RuntimeError(\"Running: {0:s} failed.\".format(command))\n\n if not os.path.exists(self.dist_dir):\n os.mkdir(self.dist_dir)\n\n source_package_file = glob.glob(\"*.tar.gz\")[0]\n source_package_prefix, _, source_package_suffix = (\n source_package_file.partition(\"-\"))\n sdist_package_file = \"{0:s}-python-{1:s}\".format(\n source_package_prefix, source_package_suffix)\n sdist_package_file = os.path.join(self.dist_dir, sdist_package_file)\n os.rename(source_package_file, sdist_package_file)\n\n # Create and add the PKG-INFO file to the source package.\n with gzip.open(sdist_package_file, \"rb\") as input_file:\n with open(sdist_package_file[:-3], \"wb\") as output_file:\n shutil.copyfileobj(input_file, output_file)\n os.remove(sdist_package_file)\n\n self.distribution.metadata.write_pkg_info(\".\")\n pkg_info_path = \"{0:s}-{1:s}/PKG-INFO\".format(\n source_package_prefix, source_package_suffix[:-7])\n with tarfile.open(sdist_package_file[:-3], \"a:\") as tar_file:\n tar_file.add(\"PKG-INFO\", arcname=pkg_info_path)\n os.remove(\"PKG-INFO\")\n\n with open(sdist_package_file[:-3], \"rb\") as input_file:\n with gzip.open(sdist_package_file, \"wb\") as output_file:\n shutil.copyfileobj(input_file, output_file)\n os.remove(sdist_package_file[:-3])\n\n # Convert the .tar.gz into a .zip\n if self.formats == [\"zip\"]:\n zip_sdist_package_file = \"{0:s}.zip\".format(sdist_package_file[:-7])\n\n with tarfile.open(sdist_package_file, \"r|gz\") as tar_file:\n with zipfile.ZipFile(\n zip_sdist_package_file, \"w\", zipfile.ZIP_DEFLATED) as zip_file:\n for tar_file_entry in tar_file:\n file_entry = tar_file.extractfile(tar_file_entry)\n if tar_file_entry.isfile():\n modification_time = datetime.datetime.fromtimestamp(\n tar_file_entry.mtime)\n zip_modification_time = (\n modification_time.year, modification_time.month,\n modification_time.day, modification_time.hour,\n modification_time.minute, modification_time.second)\n zip_info = zipfile.ZipInfo(\n date_time=zip_modification_time,\n filename=tar_file_entry.name)\n zip_info.external_attr = (tar_file_entry.mode & 0xff) << 16\n\n file_data = file_entry.read()\n zip_file.writestr(zip_info, file_data)\n\n os.remove(sdist_package_file)\n sdist_package_file = zip_sdist_package_file\n\n # Inform distutils what files were created.\n dist_files = getattr(self.distribution, \"dist_files\", [])\n dist_files.append((\"sdist\", \"\", sdist_package_file))",
"def create_readme(case_dict):\n # ---------------------------------------------------------------------\n logger.debug(\"create_readme\")\n os.chdir(case_dict[\"archive_temp_dir\"])\n\n fname = open(\"README.archive\", \"w\")\n fname.write(\"Archived metadata is available for this case at URL:\\n\")\n fname.write(case_dict[\"base_expdb_url\"])\n fname.close()",
"def _save_metadata(self, search_name):\r\n with open_(self.output_path / \"metadata\", \"a\") as f:\r\n f.write(\r\n f\"\"\"name={self.name}\r\n non_linear_search={search_name}\r\n \"\"\"\r\n )"
]
| [
"0.62034583",
"0.58835083",
"0.5526442",
"0.5514737",
"0.55070674",
"0.549292",
"0.5466621",
"0.5440966",
"0.53879744",
"0.538644",
"0.5340823",
"0.5244212",
"0.5236637",
"0.52094686",
"0.52030265",
"0.51610017",
"0.5137117",
"0.5121621",
"0.50919354",
"0.50608385",
"0.50367254",
"0.5026365",
"0.5013897",
"0.50089407",
"0.49855325",
"0.49810913",
"0.49778345",
"0.49754465",
"0.49698415",
"0.4944445"
]
| 0.7784242 | 0 |
Get an absolute path relative depth | def __getLibAbsPath(currentPath, depth):
libPath = currentPath
while depth:
libPath = os.path.split(libPath)[0]
depth -= 1
return libPath | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def path_depth(path):\n parts = os.path.dirname(path).split('/')\n parts = [part for part in parts if part != '']\n length = len(parts)\n return length",
"def dir_by_levels(path, levels):\n return op.abspath(op.join(path, *(['..'] * levels)))",
"def dirname_recurse(filepath: str, depth: int) -> str:\n for _ in range(depth):\n filepath = os.path.dirname(filepath)\n return filepath",
"def Depth(self):\n return self.path.count('.') + (self.parent is not None)",
"def relativePath(path):\n spath = pathSplit(path)\n return spath[-1]",
"def abs_path(self) -> str:\n full_path = '/'.join(folder.name for folder in reversed(self.ancestors))\n return f'/{full_path}/'",
"def resolve_level(target_level, cwd=None):\n if cwd is None:\n cwd = os.getcwd()\n this_level = level(cwd)\n this_idx = levels.index(this_level)\n target_idx = levels.index(target_level)\n pl = [\".\"]\n for i in range(0, this_idx - target_idx):\n pl.append(\"..\")\n return os.path.join(*pl)",
"def depth_of(self, path):\n new_path = self.relative(path)\n final_path = self.join(new_path)\n if isfile(final_path, exists(final_path)):\n new_path = dirname(new_path)\n\n new_path = new_path.rstrip('/')\n new_path = \"{0}/\".format(new_path)\n return new_path.count(os.sep)",
"def path_to_related(self, path):\n # self.path = \"...functional/fixtures/img/logo.png\"\n # path = \"...functional/fixtures/docs/index.md\"\n current = self.dir\n\n while not path.startswith(current.dir.path):\n current = current.dir.parent.dir\n\n remaining = current.relative(self.path)\n\n level = current.relative(path).count(os.sep)\n\n way_back = os.sep.join(['..'] * level) or '.'\n result = \"{0}/{1}\".format(way_back, remaining)\n\n return result",
"def expand_path(path, start_path, base_depth):\n path = path.replace(start_path, '')\n path = os.path.normpath(path)\n pathdepth = path_depth(path)\n path = path.replace('../', '', base_depth)\n if base_depth > pathdepth:\n for _ in range(base_depth - path_depth(path)):\n path = '_/' + path\n return path",
"def get_path(path, parent=None, prj=None):\n if prj is None:\n prj = QgsProject.instance()\n\n if parent is None:\n parent = prj.layerTreeRoot()\n\n if path is None:\n return parent\n if not isinstance(path, (list, tuple)):\n path = path.split(\"/\")\n\n for part in path:\n if len(path) > 0:\n parent = get_group(part, parent)\n\n return parent",
"def path_child(path):\n return path_to_str(parse_path(path)[1:])",
"def pathlookup(obj_or_path_tuple, depth=None, include_origin=True):",
"def get_relative_path(dir, full_path):\n if dir[-1] == '/' or dir[-1] == ':':\n return full_path[ len(dir) : ]\n else:\n return full_path[ len(dir)+1 : ]",
"def _GetRelPath(self, filename):\n assert filename.startswith(self.subdir), (filename, self.subdir)\n return filename[len(self.subdir):].lstrip(r\"\\/\")",
"def get_full_path(path, *args):\n\n return os.path.join(_search_parent_dir(\".wit\"), *args, path)",
"def getAbsPath(*p):\n\tfrom os.path import abspath, join\n\tif len(p) >= 1:\n\t\treturn normalizePath(join(abspath(p[0]), *p))\n\treturn \"\"",
"def findShortestPath(self):\r\n pass",
"def relatif (path, root = None):\n\tfrom os import sep, getcwd\n\tpath = normalizePath(path)\n\tif root != None:\n\t\troot =normalizePath(root)\n\t# If the path is empty\n\tif len(path) == 0:\n\t\treturn \"\"\n\n\t# If the root is not defined\n\tif root == None:\n\t\t# Take the current directory\n\t\troot = getcwd()\n\t\t\n\t# Cut paths to directory\n\tif path[-1] == sep:\n\t\tpath = path[:-1]\n\tspPath = path.split(sep)\n\tspRoot = root.split(sep)\n\n\t# Constructs the list of the identical path\n\tequal = []\n\tfor i in range(0,mini(len(spRoot),len(spPath))):\n\t\tif spRoot[i] != spPath[i]:\n\t\t\tbreak\n\t\telse:\n\t\t\tequal.append(spPath[i])\n\n\t# If the identical list is not empty\n\tif len(equal) != 0:\n\t\t# Remove identical paths \n\t\tspRoot = spRoot[len(equal):]\n\t\tspPath = spPath[len(equal):]\n\t\t\n\t\t# Add an indirection\n\t\tfor i in range(len(spRoot)):\n\t\t\tspPath.insert(0,\"..\")\n\n\t# Constructs the relative path\n\tresult = \"\"\n\tfor i in spPath:\n\t\tresult += i + sep\n\n\tif result != \"\":\n\t\treturn result[:-1]\n\telse:\n\t\treturn \"\"",
"def _absolute_root(path: _Path) -> str:\n path_ = Path(path)\n parent = path_.parent\n\n if path_.exists():\n return str(path_.resolve())\n else:\n return str(parent.resolve() / path_.name)",
"def path(self):\n p = self\n\n name = [p.name()]\n offsets = set([p._offset])\n while p.has_parent_key():\n p = p.parent_key()\n if p._offset in offsets:\n name.append(\"[path cycle]\")\n break\n name.append(p.name())\n offsets.add(p._offset)\n return '\\\\'.join(reversed(name))",
"def test_relativise_different_parents_deep():\n src = pathlib.Path(\"/tmp/foo/bar1/bar2/src.txt\")\n dst = pathlib.Path(\"/tmp/foo/baz1/baz2/baz3/dst.txt\")\n rel = relativise(src, dst)\n assert rel == pathlib.Path(\"../../baz1/baz2/baz3/dst.txt\")",
"def _find_relative(self, spec):\n if spec.template_rel_path is not None:\n return os.path.split(spec.template_rel_path)\n # Otherwise, determine the file name separately.\n\n locator = self.loader._make_locator()\n\n # We do not use the ternary operator for Python 2.4 support.\n if spec.template_name is not None:\n template_name = spec.template_name\n else:\n template_name = locator.make_template_name(spec)\n\n file_name = locator.make_file_name(template_name, spec.template_extension)\n\n return (spec.template_rel_directory, file_name)",
"def get_level_path(target_level, cwd=None):\n if cwd is None:\n cwd = os.getwd()\n q = \"\"\n for ll in levels:\n q = os.path.join(q, get_level_name(ll, cwd))\n if ll == target_level:\n break\n return q",
"def constructShortestPath(self):",
"def get_relative_path(path):\r\n components = split_all(path)\r\n if len(components) <= 1:\r\n return os.curdir\r\n else:\r\n parents = [os.pardir] * (len(components) - 1)\r\n return os.path.join(*parents)",
"def relName(path, cwd=None, root=None):\n relRoot = os.path.normpath((root or projectRoot)) + os.sep\n cwd = os.path.abspath((cwd or os.getcwd())) + os.sep\n if path == cwd or path == cwd[:-1]:\n return \".\"\n\n if path.startswith(cwd):\n # The relative name is below the CWD, so we simply strip off the\n # leading parts.\n return path[len(cwd):]\n\n if path.startswith(relRoot) and cwd.startswith(relRoot):\n # The path is below the nominal root but parallel to the CWD. We need\n # to add some '../' parts.\n relToRootPath = path[len(relRoot):]\n relToRootCWD = cwd[len(relRoot):-1]\n count = 0\n while count < 1000 and relToRootCWD and relToRootCWD != os.sep:\n relToRootCWD, b = os.path.split(relToRootCWD)\n relToRootPath = \"..\" + os.sep + relToRootPath\n assert count < 1000\n return relToRootPath\n\n return path",
"def flatten_path(path):\n return path.split(\"/\")[-1]",
"def absolute(self):\n if self.relative == '':\n return self.root # don't join in this case as that appends trailing '/'\n return os.path.join(self.root, self.relative)",
"def cfgpath(p):\n p = Path(p)\n if p.is_absolute():\n return p\n else:\n for d in reversed(cfgdirs):\n try:\n fp = (d / p).resolve()\n except FileNotFoundError:\n continue\n if fp.is_file():\n return fp\n else:\n return p"
]
| [
"0.64539737",
"0.6346503",
"0.62048346",
"0.6162363",
"0.6061876",
"0.6044649",
"0.5989426",
"0.5930455",
"0.5924277",
"0.5897604",
"0.58891785",
"0.58871776",
"0.58747077",
"0.58283246",
"0.5806055",
"0.5762614",
"0.5754181",
"0.5748785",
"0.5744405",
"0.57313746",
"0.57048595",
"0.5692127",
"0.567218",
"0.5669338",
"0.56674045",
"0.565917",
"0.5639465",
"0.5621801",
"0.5621131",
"0.56147766"
]
| 0.6351144 | 1 |
init Lib Path. append lib path into python path. | def initLibPath():
libHash = {
'Framework': 1,
'UserControlleLib': 1,
'CaseLib': 1
}
binPath = os.path.split(os.path.realpath(__file__))[0]
for key in libHash:
sys.path.append(os.path.join(__getLibAbsPath(binPath, libHash[key]), key)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setSysPath():\n c = os.path.abspath(os.path.dirname(__file__))\n\n add = [\n ['lib'],\n ]\n\n for item in add:\n p = os.path.join(c, *item)\n if not p in sys.path:\n sys.path[1:1] = [p]\n\n remove = ['django', 'simplejson']\n\n # Remove unwanted paths\n for item in sys.path:\n for r in remove:\n if item.find(r) > 0:\n sys.path.remove(item)",
"def _setLibraryRoot(self):\n\t\tself._libHome = os.path.abspath(rootDir)",
"def setBEGINLIBPATH():\r\n dllpath = os.path.join(sys.prefix, \"Lib\", \"lib-dynload\")\r\n libpath = os.environ['BEGINLIBPATH'].split(';')\r\n if libpath[-1]:\r\n libpath.append(dllpath)\r\n else:\r\n libpath[-1] = dllpath\r\n os.environ['BEGINLIBPATH'] = ';'.join(libpath)",
"def set_syspath(self, hasal_dir):\n library_path = os.path.join(hasal_dir, \"lib\", \"sikuli\")\n sys.path.append(library_path)\n return library_path",
"def add_library_path(path):\n if not os.path.isdir(path):\n raise OSError('No such directory: %s' % path)\n gbl.gSystem.AddDynamicPath(path)",
"def set_path():\n import os\n import sys\n\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), \"..\"))",
"def set_library_path(path):\r\n if Config.loaded:\r\n raise Exception(\"library path must be set before before using \" \\\r\n \"any other functionalities in libclang.\")\r\n\r\n Config.library_path = path",
"def set_library():\n print('Path to local library: {}'.format(library.get_path()))\n path = input('Give new absolute path to local library '\n '[leave empty to keep current]:\\n>> ')\n if os.path.isabs(path):\n print('New library path: {}'.format(library.set_path(path)))\n else:\n print('Path not changed.')\n library.store()",
"def library_dirs(self):",
"def update_path():\n\timport sys\n\tsys.path.append(directory_root())",
"def __update_path():\n\n # The first time `dtf` is executed, there is no main.db.\n # As such, we basically need to assume that if main.db\n # doesn't exist, don't do this.\n if not os.path.isfile(DTF_DB):\n return 0\n\n for lib in pm.get_libraries(name_only=True):\n\n lib_path = \"%s/%s\" % (DTF_LIBRARIES_DIR, lib)\n\n sys.path.append(lib_path)\n\n return 0",
"def linking_library_dirs(self):",
"def __init__(self, libpath):\n self._lib = CDLL(libpath)\n self._functions = {}",
"def setup_lib(CLIB):\n # {{ SETUP_LIB }}",
"def addlibdir(dirname):\n if dirname not in sys.path:\n sys.path.append(dirname)",
"def AddThirdPartyLibToPath(lib, override=False):\n libpath = os.path.abspath(os.path.join(BUILD_DIR, 'third_party', lib))\n if override:\n sys.path.insert(0, libpath)\n else:\n sys.path.append(libpath)",
"def static_init(cls):\n for path in sys.path:\n if os.path.isdir(path + \"/support_diagnostics\"):\n ImportModules.base_directory = path + \"/support_diagnostics\"",
"def init_env_path(path=None) -> None:\n if path is None:\n sys.path.insert(1, file_dir_dir())\n else:\n sys.path.insert(1, path)",
"def get_library_dir():\n return os.path.join(get_script_path(), 'library')",
"def insert_package_path():\n sys.path.insert(0, ospdn(ospdn(ospdn(ospap(__file__)))))",
"def _addpath(d, atend=None):\n if atend:\n sys.path = sys.path + [d]\n else:\n sys.path = [d] + sys.path",
"def _addpath(d, atend=None):\n if atend:\n sys.path = sys.path + [d]\n else:\n sys.path = [d] + sys.path",
"def patch_sys_path():\n this_dir = os.path.dirname(__file__)\n to_add = os.path.join(this_dir, \"..\")\n to_add = os.path.abspath(to_add)\n sys.path.insert(0, to_add)",
"def __getLibAbsPath(currentPath, depth):\n libPath = currentPath\n while depth:\n libPath = os.path.split(libPath)[0]\n depth -= 1\n return libPath",
"def getPythonPath():\n python_path = os.environ.get(\"PYTHONPATH\",\"\")\n \n if os.path.basename(os.path.abspath(os.curdir)) == \"Test\":\n new_python_path = os.path.pathsep.join([\n python_path,os.path.normpath(\"../Lib/external/SQLObject-compat\"),\n os.path.normpath(\"../Lib/external\"),\n os.path.normpath(\"../Lib\"),\n ])\n else:\n new_python_path = os.path.pathsep.join([\n python_path,os.path.normpath(\"./Lib/external/SQLObject-compat\"),\n os.path.normpath(\"./Lib/external\"),\n os.path.normpath(\"./Lib\"),\n ])\n \n return new_python_path",
"def add_path(path):\n if path not in sys.path:\n sys.path.insert(0, path)",
"def _add_path(manifest: dict, lib_folders):\n p = manifest.get('installdir')\n for lib_folder in lib_folders:\n if not p:\n break\n abs_p = lib_folder / STEAM_APPS_INSTALL_FOLDER / p\n if not abs_p.exists():\n continue\n\n manifest['installdir'] = abs_p.as_posix()\n\n # Update absolute path to executable\n if manifest.get('exe_sub_path'):\n # Remove potential leading slashes\n if manifest['exe_sub_path'][0] in ('/', '\\\\'):\n manifest['exe_sub_path'] = manifest['exe_sub_path'][1:]\n manifest['path'] = Path(abs_p / manifest['exe_sub_path']).as_posix()\n else:\n manifest['path'] = abs_p.as_posix()",
"def load_lib():\n root_dir = command.get_base_dirs(bin_dir)[0]\n _bin_dir, lib_dir = command.get_bin_lib_dirs(root_dir)\n magic_so = os.path.join(lib_dir, 'libmagic' + system.lib_ext)\n\n # add lib path to the front of the PATH env var\n new_path = os.pathsep.join([lib_dir, os.environ['PATH']])\n os.environ['PATH'] = new_path\n\n if os.path.exists(magic_so):\n lib = ctypes.CDLL(magic_so)\n if lib and lib._name:\n return lib\n raise ImportError('Failed to load libmagic from %(magic_so)r' % locals())",
"def libRootPath(self):\n path = grapher.binPath\n libFld = None\n if self.rbStudio.isChecked():\n libFld = 'studio'\n path = os.path.join(path, libFld)\n elif self.rbProd.isChecked():\n libFld = 'prod'\n path = os.path.join(path, libFld)\n elif self.rbUsers.isChecked():\n libFld = 'users'\n path = os.path.join(path, libFld, grapher.user[0], grapher.user, 'lib')\n return path, libFld",
"def find_lib_path():\n curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\n # make pythonpack hack: copy this directory one level upper for setup.py\n dll_path = [curr_path, os.path.join(curr_path, '../../lib/'),\n os.path.join(curr_path, './lib/'),\n os.path.join(sys.prefix, 'xlearn')]\n if sys.platform == 'win32':\n if platform.architecture()[0] == '64bit':\n dll_path.append(os.path.join(curr_path, '../../windows/x64/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/x64/Release/'))\n else:\n dll_path.append(os.path.join(curr_path, '../../windows/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/Release/'))\n dll_path = [os.path.join(p, 'xlearn_api.dll') for p in dll_path]\n elif sys.platform.startswith('linux'):\n dll_path = [os.path.join(p, 'libxlearn_api.so') for p in dll_path]\n elif sys.platform == 'darwin':\n dll_path = [os.path.join(p, 'libxlearn_api.dylib') for p in dll_path]\n\n lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]\n\n # From github issues, most of installation errors come from machines w/o compilers\n if not lib_path:\n raise XLearnLibraryNotFound(\n 'Cannot find xlearn Library in the candidate path'\n )\n return lib_path"
]
| [
"0.69243926",
"0.68290377",
"0.68250597",
"0.68074346",
"0.6722081",
"0.66797453",
"0.6677228",
"0.6616815",
"0.6494127",
"0.6485778",
"0.6431461",
"0.63157105",
"0.6290918",
"0.6264477",
"0.61724114",
"0.6129387",
"0.6102272",
"0.6092327",
"0.6057182",
"0.60421085",
"0.60419923",
"0.60419923",
"0.59601986",
"0.59312296",
"0.5915713",
"0.5886731",
"0.5885384",
"0.5883094",
"0.5869792",
"0.5853774"
]
| 0.86582345 | 0 |
Splitting multipart copy upload. Splits copy upload parts into several ones to fit maximum upload part size limit. Also takes into the account minimum upload part size. | def __init__(self, mpu, min_part_size=5 * MB, max_part_size=5 * GB):
super(SplittingMultipartCopyUpload, self).__init__(mpu)
self._mpu = mpu
self._min_part_size = min_part_size
self._max_part_size = max_part_size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upload_all_parts(self):\n if not self.upload_id:\n raise RuntimeError(\"Attempting to use a multipart upload that has not been initiated.\")\n\n if self.file.name != \"<stdin>\":\n size_left = file_size = os.stat(self.file.name)[ST_SIZE]\n nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size and 1)\n debug(\"MultiPart: Uploading %s in %d parts\" % (self.file.name, nr_parts))\n else:\n debug(\"MultiPart: Uploading from %s\" % (self.file.name))\n\n\tself.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024\n\n seq = 1\n\tif self.file.name != \"<stdin>\":\n while size_left > 0:\n offset = self.chunk_size * (seq - 1)\n current_chunk_size = min(file_size - offset, self.chunk_size)\n size_left -= current_chunk_size\n labels = {\n 'source' : unicodise(self.file.name),\n 'destination' : unicodise(self.uri.uri()),\n 'extra' : \"[part %d of %d, %s]\" % (seq, nr_parts, \"%d%sB\" % formatSize(current_chunk_size, human_readable = True))\n }\n try:\n self.upload_part(seq, offset, current_chunk_size, labels)\n except:\n error(u\"Upload of '%s' part %d failed. Aborting multipart upload.\" % (self.file.name, seq))\n self.abort_upload()\n raise\n seq += 1\n else:\n while True:\n buffer = self.file.read(self.chunk_size)\n offset = self.chunk_size * (seq - 1)\n current_chunk_size = len(buffer)\n labels = {\n 'source' : unicodise(self.file.name),\n 'destination' : unicodise(self.uri.uri()),\n 'extra' : \"[part %d, %s]\" % (seq, \"%d%sB\" % formatSize(current_chunk_size, human_readable = True))\n }\n if len(buffer) == 0: # EOF\n break\n try:\n self.upload_part(seq, offset, current_chunk_size, labels, buffer)\n except:\n error(u\"Upload of '%s' part %d failed. Aborting multipart upload.\" % (self.file.name, seq))\n self.abort_upload()\n raise\n seq += 1\n\n debug(\"MultiPart: Upload finished: %d parts\", seq - 1)",
"def __init__(self, mpu, original_size, min_part_size, max_part_size):\n super(OutOfBoundsSplittingMultipartCopyUpload, self).__init__(mpu, min_part_size, max_part_size)\n self._original_size = original_size",
"async def _multipart_upload_from_buffer(self):\n # check to see if bucket needs to be created\n if self._create_bucket:\n # check whether the bucket exists\n bucket_list = await self._get_bucket_list()\n if not self._bucket in bucket_list:\n await self._conn_obj.conn.create_bucket(Bucket=self._bucket)\n\n # if the current part is 1 we have to create the multipart upload\n if self._current_part == 1:\n response = await self._conn_obj.conn.create_multipart_upload(\n Bucket = self._bucket,\n Key = self._path\n )\n self._upload_id = response['UploadId']\n # we need to keep a track of the multipart info\n self._multipart_info = {'Parts' : []}\n\n # upload from a buffer - do we need to split into more than one\n # multiparts?\n new_buffer = []\n for buffer_part in range(0, len(self._buffer)):\n # is the current part of the buffer larger than the maximum\n # upload size? split if it is\n data_buf = self._buffer[buffer_part]\n data_len = data_buf.tell()\n if data_len >= self._part_size:\n data_buf.seek(0)\n data_pos = 0\n # split the file up\n while data_pos < data_len:\n new_buffer.append(io.BytesIO())\n # copy the data - don't overstep the buffer\n if data_pos + self._part_size >= data_len:\n sub_data = data_buf.read(data_len-data_pos)\n else:\n sub_data = data_buf.read(\n self._part_size\n )\n new_buffer[-1].write(sub_data)\n # increment to next\n data_pos += self._part_size\n\n # free the old memory\n self._buffer[buffer_part].close()\n else:\n # copy the old buffer into a new one\n self._buffer[buffer_part].seek(0)\n new_buffer.append(io.BytesIO(self._buffer[buffer_part].read()))\n\n # close other buffers first\n for b in self._buffer:\n b.close()\n self._buffer = new_buffer\n\n tasks = []\n\n for buffer_part in range(0, len(self._buffer)):\n # seek in the BytesIO buffer to get to the beginning after the\n # writing\n self._buffer[buffer_part].seek(0)\n # upload here\n # schedule the uploads\n event_loop = asyncio.get_event_loop()\n task = event_loop.create_task(self._conn_obj.conn.upload_part(\n Bucket=self._bucket,\n Key=self._path,\n UploadId=self._upload_id,\n PartNumber=self._current_part + buffer_part,\n Body=self._buffer[buffer_part]\n ))\n tasks.append(task)\n\n # await the completion of the uploads\n res = await asyncio.gather(*tasks)\n for buffer_part in range(0, len(self._buffer)):\n # insert into the multipart info list of dictionaries\n part = res[buffer_part]\n self._multipart_info['Parts'].append(\n {\n 'PartNumber' : self._current_part + buffer_part,\n 'ETag' : part['ETag']\n }\n )\n\n # add the total number of uploads to the current part\n self._current_part += len(self._buffer)\n\n # reset all the byte buffers and their positions\n for buffer_part in range(0, len(self._buffer)):\n self._buffer[buffer_part].close()\n self._buffer = [io.BytesIO()]\n self._seek_pos = 0",
"def __init__(self, mpu, original_size, chunk_size, download):\n super(AppendOptimizedCompositeMultipartCopyUpload, self).__init__(mpu)\n self._mpu = mpu\n self._original_size = original_size\n self._chunk_size = chunk_size\n self._download = download\n self._copy_parts = []\n self._first_chunk = sys.maxint\n self._first_chunk_offset = 0",
"def __init__(self, mpu, length, min_part_number=1):\n super(TruncatingMultipartCopyUpload, self).__init__(mpu)\n self._mpu = mpu\n self._length = length\n self._min_part_number = min_part_number",
"def do_part_copy(args):\r\n # Multiprocessing args lameness\r\n src_bucket_name, src_key_name, dest_bucket_name, mpu_id, part_num, start_pos, end_pos = args\r\n logger.debug(\"do_part_copy got args: %s\" % (args,))\r\n\r\n # Connect to S3, get the MultiPartUpload\r\n s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())\r\n dest_bucket = s3.lookup(dest_bucket_name)\r\n mpu = None\r\n for mp in dest_bucket.list_multipart_uploads():\r\n if mp.id == mpu_id:\r\n mpu = mp\r\n break\r\n if mpu is None:\r\n raise Exception(\"Could not find MultiPartUpload %s\" % mpu_id)\r\n\r\n # make sure we have a valid key\r\n src_bucket = s3.lookup( src_bucket_name )\r\n src_key = src_bucket.get_key( src_key_name )\r\n # Do the copy\r\n t1 = time.time()\r\n mpu.copy_part_from_key(src_bucket_name, src_key_name, part_num, start_pos, end_pos)\r\n\r\n # Print some timings\r\n t2 = time.time() - t1\r\n s = (end_pos - start_pos)/1024./1024.\r\n logger.info(\"Copied part %s (%0.2fM) in %0.2fs at %0.2fMbps\" % (part_num, s, t2, s/t2))",
"def __init__(self, mpu, original_size, download, chunk_size, min_chunk, max_chunk):\n super(ChunkedMultipartUpload, self).__init__(mpu)\n self._mpu = mpu\n self._original_size = original_size\n self._download = download\n self._chunk_size = chunk_size\n self._partial_chunks = {}\n self._min_chunk = min_chunk\n self._max_chunk = max_chunk",
"def _buff_split(self, upload_buffer):\n if upload_buffer.intent_count() == 0:\n return\n tail_buffer = upload_buffer\n while True:\n if tail_buffer.length < self.recommended_upload_part_size + self.min_part_size:\n # `EmergePlanner_buff_partition` can split in such way that tail part\n # can be smaller than `min_part_size` - to avoid unnecessary download of possible\n # incoming copy intent, we don't split further\n yield tail_buffer\n return\n head_buff, tail_buffer = self._buff_partition(tail_buffer)\n yield head_buff",
"def _get_copy_parts(self, copy_intent, start_offset, end_offset):\n fragment_length = end_offset - start_offset\n part_count = int(fragment_length / self.max_part_size)\n last_part_length = fragment_length % self.max_part_size\n if last_part_length == 0:\n last_part_length = self.max_part_size\n else:\n part_count += 1\n\n if part_count == 1:\n part_sizes = [last_part_length]\n else:\n if last_part_length < int(fragment_length / (part_count + 1)):\n part_count += 1\n base_part_size = int(fragment_length / part_count)\n size_remainder = fragment_length % part_count\n part_sizes = [\n base_part_size + (1 if i < size_remainder else 0) for i in range(part_count)\n ]\n\n copy_source = copy_intent.outbound_source\n relative_offset = start_offset - copy_intent.destination_offset\n for part_size in part_sizes:\n yield EmergePart(CopyEmergePartDefinition(copy_source, relative_offset, part_size))\n relative_offset += part_size",
"def split(self):\n \n spl = self.which('split')\n if spl:\n self.__tmp = \"/tmp\"\n self.__tmpout = \"/tmp/output\"\n if not os.path.exists(self.__tmpout):\n os.makedirs(self.__tmpout)\n #os.chdir(\"/tmp\")\n '''\n assume split prog overwrites existing files if\n there is a conflict in file names\n '''\n #thecommand = \"%s -a 3 -b 500k %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n thecommand = \"%s -a 3 -b 10m %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n os.system(thecommand)\n dirList=os.listdir(self.__tmpout)\n #self.constructCat(dirList)\n for chunkfilename in dirList:\n #print chunkfilename \n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n #print self.__cat\n self.__flist.append(self.__tmpout + \"/\" + chunkfilename)\n #print self.__flist\n self.writeLog(chunkfilename, self.md5(fileName=self.__tmpout + \"/\" + chunkfilename))\n self.__numchunks = len([item for item in os.listdir(self.__tmpout) if os.path.isfile(self.__tmpout + \"/\" + item)])\n else:\n try:\n f = open(self.__filename, 'rb')\n except (OSError, IOError), e:\n raise FileSplitterException, str(e)\n \n bname = (os.path.split(self.__filename))[1]\n # Get the file size\n fsize = os.path.getsize(self.__filename)\n # dynamically calculate number of chunks\n strfsize = str(fsize)\n '''\n in MB's\n 8 - teens\n 9 - hundreds\n 10 - gigabytes\n '''\n if len(strfsize) == 8:\n #self.__numchunks = fsize/100000\n self.__numchunks = fsize/50000\n elif len(strfsize) == 9:\n #self.__numchunks = fsize/1000000\n self.__numchunks = fsize/500000\n elif len(strfsize) == 10:\n #self.__numchunks = fsize/10000000\n self.__numchunks = fsize/5000000\n #print '\\nSplitting file %s into %d chunks' % (self.__filename, self.__numchunks)\n # Get size of each chunk\n self.__chunksize = int(float(fsize)/float(self.__numchunks))\n \n chunksz = self.__chunksize\n total_bytes = 0\n \n for x in range(self.__numchunks):\n #chunkfilename = bname + '-' + str(x+1) + self.__postfix\n chunkfilename = bname + ('-%03d' % (x+1)) + self.__postfix\n # kill residual file if it exists\n if os.path.exists(chunkfilename):\n os.remove(chunkfilename)\n \"\"\"\n if reading the last section, calculate correct\n chunk size.\n \"\"\"\n if x == self.__numchunks - 1:\n chunksz = fsize - total_bytes\n \n try:\n if self.__debug:\n print 'Writing file chunk: %s' % chunkfilename\n data = f.read(chunksz)\n total_bytes += len(data)\n chunkf = file(chunkfilename, 'wb')\n chunkf.write(data)\n chunkf.close()\n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n self.__flist.append(chunkfilename)\n self.writeLog(chunkfilename, self.md5(fileName=chunkfilename))\n except (OSError, IOError), e:\n print e\n continue\n except EOFError, e:\n print e\n break\n\n print '\\nSplit complete on file: %s into %d chunks\\n' % (self.__filename, self.__numchunks)\n self.__logfhandle.close()\n #self.__cat += \"> \" + self.__remotepath + \"/\" + self.__filename\n self.set_cat_statement()",
"def test_super_chunk(self):\n chunksize = MAX_SINGLE_UPLOAD_SIZE + 1\n size = MAX_SINGLE_UPLOAD_SIZE * 2\n self.assertEqual(find_chunksize(size, chunksize),\n MAX_SINGLE_UPLOAD_SIZE)",
"def multipart_push(self, upload_id, url, part_number, chunk_size, data, md5=None):\n path = self.base_path / url\n assert path.is_file(), f\"{self}: multipart upload file {path} does not exist.\"\n with path.open(\"r+b\") as stream:\n stream.seek((part_number - 1) * chunk_size)\n shutil.copyfileobj(data, stream, 1024 * 1024)\n return dict()",
"def upload_chunked(self, chunk_size = 4 * 1024 * 1024):\n\n while self.offset < self.target_length:\n next_chunk_size = min(chunk_size, self.target_length - self.offset)\n if self.last_block == None:\n self.last_block = self.file_obj.read(next_chunk_size)\n\n try:\n (self.offset, self.upload_id) = self.client.upload_chunk(\n StringIO(self.last_block), next_chunk_size, self.offset, self.upload_id)\n self.last_block = None\n except ErrorResponse as e:\n reply = e.body\n if \"offset\" in reply and reply['offset'] != 0:\n if reply['offset'] > self.offset:\n self.last_block = None\n self.offset = reply['offset']",
"def chunkFileUpload(self, fp, chunksize=1024 * 4096):\n parts = int(math.ceil(fp.stat().st_size / float(chunksize)))\n err = False\n maxchunksize = 1024 * 1024 * 100\n if chunksize >= maxchunksize:\n print(\n 'not uploaded: defined chunksize {0} is bigger than the allowed maximum {1}'.format(chunksize, maxchunksize))\n return None\n\n part = 0\n for part, chunk in enumerate(self.chunkedread(fp, chunksize),1):\n logger.info('({2})uploading part {0} of {1}'.format(part, parts, fp.name))\n files = {'file': (str(fp.name), chunk)}\n res = self._post(self.fullUrl('/chunked_upload?chunk={0}').format(part), files=files)\n\n print('finish, uploaded part {0} of {1} '.format(part, parts))\n res = self._post(self.fullUrl('chunked_upload/commit?filename={0}'.format(fp.name)))\n return self.getFile(res['file']['selfUrl']), self.getObject(res['relatedObject']['selfUrl'])\n\n # relObj = res['relatedObject']\n # obj = self.getObject(relObj['selfUrl'])\n # return obj",
"async def post_multipart(self, part1, part_2, test):",
"def do_part_upload(args):\r\n # Multiprocessing args lameness\r\n bucket_name, mpu_id, fname, i, start, size, secure, max_tries, current_tries = args\r\n logger.debug(\"do_part_upload got args: %s\" % (args,))\r\n\r\n # Connect to S3, get the MultiPartUpload\r\n s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())\r\n s3.is_secure = secure\r\n bucket = s3.lookup(bucket_name)\r\n mpu = None\r\n for mp in bucket.list_multipart_uploads():\r\n if mp.id == mpu_id:\r\n mpu = mp\r\n break\r\n if mpu is None:\r\n raise Exception(\"Could not find MultiPartUpload %s\" % mpu_id)\r\n\r\n # Read the chunk from the file\r\n fp = open(fname, 'rb')\r\n fp.seek(start)\r\n data = fp.read(size)\r\n fp.close()\r\n if not data:\r\n raise Exception(\"Unexpectedly tried to read an empty chunk\")\r\n\r\n def progress(x,y):\r\n logger.debug(\"Part %d: %0.2f%%\" % (i+1, 100.*x/y))\r\n\r\n try:\r\n # Do the upload\r\n t1 = time.time()\r\n mpu.upload_part_from_file(StringIO(data), i+1, cb=progress)\r\n\r\n # Print some timings\r\n t2 = time.time() - t1\r\n s = len(data)/1024./1024.\r\n logger.info(\"Uploaded part %s (%0.2fM) in %0.2fs at %0.2fMBps\" % (i+1, s, t2, s/t2))\r\n except Exception, err:\r\n logger.debug(\"Retry request %d of max %d times\" % (current_tries, max_tries))\r\n if (current_tries > max_tries):\r\n logger.error(err)\r\n else:\r\n time.sleep(3)\r\n current_tries += 1\r\n do_part_download(bucket_name, mpu_id, fname, i, start, size, secure, max_tries, current_tries)",
"def multipart(self):\n self.add_file_string('Multipart file')\n self.should_copy = False",
"def upload_part_copy(Bucket=None, CopySource=None, CopySourceIfMatch=None, CopySourceIfModifiedSince=None, CopySourceIfNoneMatch=None, CopySourceIfUnmodifiedSince=None, CopySourceRange=None, Key=None, PartNumber=None, UploadId=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, CopySourceSSECustomerAlgorithm=None, CopySourceSSECustomerKey=None, CopySourceSSECustomerKeyMD5=None, RequestPayer=None):\n pass",
"def split(self):\n\n print 'Splitting file', self.__filename\n print 'Number of chunks', self.__numchunks, '\\n'\n \n try:\n f = open(self.__filename, 'rb')\n except (OSError, IOError), e:\n raise FileSplitterException, str(e)\n\n bname = (os.path.split(self.__filename))[1]\n # Get the file size\n fsize = os.path.getsize(self.__filename)\n # Get size of each chunk\n self.__chunksize = int(float(fsize)/float(self.__numchunks))\n\n chunksz = self.__chunksize\n total_bytes = 0\n\n for x in range(self.__numchunks):\n chunkfilename = bname + '-' + str(x+1) + self.__postfix\n\n # if reading the last section, calculate correct\n # chunk size.\n if x == self.__numchunks - 1:\n chunksz = fsize - total_bytes\n\n try:\n print 'Writing file',chunkfilename\n data = f.read(chunksz)\n total_bytes += len(data)\n chunkf = file(chunkfilename, 'wb')\n chunkf.write(data)\n chunkf.close()\n except (OSError, IOError), e:\n print e\n continue\n except EOFError, e:\n print e\n break\n\n print 'Done.'",
"def create_part_copy(self, object_name, offset, size, multipart_id, part_number):\n\n return h3lib.create_part_copy(self._handle, object_name, offset, size, multipart_id, part_number, self._user_id)",
"def upload_chunk(self, request, **kwargs):\n import uuid\n\n self.method_check(request, allowed=[\"post\"])\n self.is_authenticated(request)\n\n if not self.check_dfo(request, kwargs[\"dfo_id\"]):\n return self.handle_error(\"Invalid object or access denied.\")\n\n checksum = request.headers.get(\"Checksum\", None)\n if checksum is None:\n checksum = request.META.get(\"Checksum\", None)\n if checksum is None:\n return self.handle_error(\"Missing 'Checksum' in header.\")\n\n content_range = request.headers.get(\"Content-Range\", None)\n if content_range is None:\n content_range = request.META.get(\"Content-Range\", None)\n if content_range is None:\n return self.handle_error(\"Missing 'Content-Range' in header.\")\n\n m = re.search(r\"^(\\d+)\\-(\\d+)\\/(\\d+)$\", content_range).groups()\n content_start = int(m[0])\n content_end = int(m[1])\n content_length = content_end-content_start\n if content_length > settings.CHUNK_MAX_SIZE:\n return self.handle_error(\"Chunk size is larger than max allowed.\")\n\n check = Chunk.objects.filter(\n dfo_id=kwargs[\"dfo_id\"],\n offset=content_start\n )\n if len(check) != 0:\n return self.handle_error(\"Chunk already uploaded.\")\n\n content_checksum = calc_checksum(settings.CHUNK_CHECKSUM, request.body)\n if content_checksum is None or content_checksum != checksum:\n return self.handle_error(\n \"Checksum does not match. {}:{}\".format(settings.CHUNK_CHECKSUM, content_checksum))\n\n if not os.path.exists(settings.CHUNK_STORAGE):\n try:\n os.mkdir(settings.CHUNK_STORAGE)\n except Exception as e:\n return self.handle_error(str(e))\n\n data_path = os.path.join(settings.CHUNK_STORAGE, kwargs[\"dfo_id\"])\n if not os.path.exists(data_path):\n try:\n os.makedirs(data_path, mode=0o770, exist_ok=True)\n os.chmod(data_path, 0o770)\n except Exception as e:\n return self.handle_error(str(e))\n\n chunk_id = str(uuid.uuid4())\n file_path = os.path.join(data_path, chunk_id)\n\n try:\n file = open(file_path, \"wb\")\n file.write(request.body)\n file.close()\n except Exception as e:\n return self.handle_error(str(e))\n\n dfo = DataFileObject.objects.get(id=kwargs[\"dfo_id\"])\n\n instrument = dfo.datafile.dataset.instrument\n if instrument is not None:\n instrument_id = instrument.id\n else:\n instrument_id = None\n\n try:\n chunk = Chunk.objects.create(\n chunk_id=chunk_id,\n dfo_id=kwargs[\"dfo_id\"],\n offset=content_start,\n size=content_length,\n instrument_id=instrument_id,\n user_id=request.user.id\n )\n except Exception as e:\n try:\n os.remove(file_path)\n except Exception as e:\n pass\n return self.handle_error(str(e))\n\n data = {\n \"success\": True,\n \"id\": chunk.id\n }\n\n return JsonResponse(data, status=200)",
"def chunk_input(self, input_files, chunksize):\n part_lists = [] # Lists of partial files\n known_nlines = None\n part_suffix = \"\"\n chunk_nlines = chunksize * 2\n\n for input_file in input_files:\n # Count number of lines in the file\n nlines = int(command.execute_with_output(\"wc -l %s\" % input_file)\n .strip().split()[0])\n # Number of lines should be the same in paired files\n if known_nlines is not None:\n msg = \"Mismatched line counts in supposedly paired files: {}\".format(\n input_files)\n assert nlines == known_nlines, msg\n known_nlines = nlines\n\n # Set number of pieces and names\n numparts = (nlines + chunk_nlines - 1) // chunk_nlines\n ndigits = len(str(numparts - 1))\n part_suffix = \"-chunksize-%d-numparts-%d-part-\" % (chunksize, numparts)\n out_prefix_base = os.path.basename(input_file) + part_suffix\n out_prefix = os.path.join(self.chunks_result_dir_local, out_prefix_base)\n\n # Split large file into smaller named pieces\n command.execute(\"split -a %d --numeric-suffixes -l %d %s %s\" %\n (ndigits, chunk_nlines, input_file, out_prefix))\n command.execute_with_retries(f\"aws s3 sync --only-show-errors {self.chunks_result_dir_local}/ {self.chunks_result_dir_s3}/ --exclude '*' --include '{out_prefix_base}*'\")\n\n # Get the partial file names\n partial_files = []\n paths = command.execute_with_output(\"ls %s*\" % out_prefix).rstrip().split(\"\\n\")\n for pf in paths:\n partial_files.append(os.path.basename(pf))\n\n # Check that the partial files match our expected chunking pattern\n pattern = \"{:0%dd}\" % ndigits\n expected_partial_files = [(out_prefix_base + pattern.format(i))\n for i in range(numparts)]\n msg = \"something went wrong with chunking: {} != {}\".format(\n partial_files, expected_partial_files)\n assert expected_partial_files == partial_files, msg\n part_lists.append(partial_files)\n\n # Ex: [[\"input_R1.fasta-part-1\", \"input_R2.fasta-part-1\"],\n # [\"input_R1.fasta-part-2\", \"input_R2.fasta-part-2\"],\n # [\"input_R1.fasta-part-3\", \"input_R2.fasta-part-3\"],...]\n input_chunks = [list(part) for part in zip(*part_lists)]\n return part_suffix, input_chunks",
"def _get_upload_part(self, upload_buffer):\n if upload_buffer.intent_count() == 1 and upload_buffer.get_intent(0).is_upload():\n intent = upload_buffer.get_intent(0)\n relative_offset = upload_buffer.start_offset - intent.destination_offset\n length = upload_buffer.length\n definition = UploadEmergePartDefinition(intent.outbound_source, relative_offset, length)\n else:\n subparts = []\n fragment_start = upload_buffer.start_offset\n for intent, fragment_end in upload_buffer.iter_items():\n relative_offset = fragment_start - intent.destination_offset\n length = fragment_end - fragment_start\n if intent.is_upload():\n subpart_class = LocalSourceUploadSubpart\n elif intent.is_copy():\n subpart_class = RemoteSourceUploadSubpart\n else:\n raise RuntimeError('This cannot happen!!!')\n subparts.append(subpart_class(intent.outbound_source, relative_offset, length))\n fragment_start = fragment_end\n definition = UploadSubpartsEmergePartDefinition(subparts)\n return EmergePart(definition)",
"def _upload_part(\n self,\n bucket_id,\n file_id,\n part_upload_source: _TypeUploadSource,\n part_number,\n large_file_upload_state,\n finished_parts,\n encryption: EncryptionSetting,\n ):\n\n # b2_upload_part doesn't need SSE-B2. Large file encryption is decided on b2_start_large_file.\n if encryption is not None and encryption.mode == EncryptionMode.SSE_B2:\n encryption = None\n\n # Check if this part was uploaded before\n if finished_parts is not None and part_number in finished_parts:\n # Report this part finished\n part = finished_parts[part_number]\n large_file_upload_state.update_part_bytes(part_upload_source.get_content_length())\n\n # Return SHA1 hash\n return {'contentSha1': part.content_sha1}\n\n # Set up a progress listener\n part_progress_listener = PartProgressReporter(large_file_upload_state)\n\n # Retry the upload as needed\n exception_list = []\n with ExitStack() as stream_guard:\n part_stream = None\n\n def close_stream_callback(stream):\n if not stream.closed:\n stream.close()\n\n for _ in range(self.MAX_UPLOAD_ATTEMPTS):\n # if another part has already had an error there's no point in\n # uploading this part\n if large_file_upload_state.has_error():\n raise AlreadyFailed(large_file_upload_state.get_error_message())\n\n try:\n # reuse the stream in case of retry\n part_stream = part_stream or part_upload_source.open()\n # register stream closing callback only when reading is finally concluded\n stream_guard.callback(close_stream_callback, part_stream)\n\n content_length = part_upload_source.get_content_length()\n input_stream = ReadingStreamWithProgress(\n part_stream, part_progress_listener, length=content_length\n )\n if part_upload_source.is_sha1_known():\n content_sha1 = part_upload_source.get_content_sha1()\n else:\n input_stream = StreamWithHash(input_stream, stream_length=content_length)\n content_sha1 = HEX_DIGITS_AT_END\n # it is important that `len()` works on `input_stream`\n response = self.services.session.upload_part(\n file_id,\n part_number,\n len(input_stream),\n content_sha1,\n input_stream,\n server_side_encryption=encryption, # todo: client side encryption\n )\n if content_sha1 == HEX_DIGITS_AT_END:\n content_sha1 = input_stream.hash\n assert content_sha1 == response['contentSha1']\n return response\n except B2Error as e:\n if not e.should_retry_upload():\n raise\n exception_list.append(e)\n self.account_info.clear_bucket_upload_data(bucket_id)\n\n large_file_upload_state.set_error(str(exception_list[-1]))\n raise MaxRetriesExceeded(self.MAX_UPLOAD_ATTEMPTS, exception_list)",
"def choose_part_ranges(content_length, minimum_part_size):\n\n # If the file is at least twice the minimum part size, we are guaranteed\n # to be able to break it into multiple parts that are all at least\n # the minimum part size.\n assert minimum_part_size * 2 <= content_length\n\n # How many parts can we make?\n part_count = min(content_length // minimum_part_size, 10000)\n assert 2 <= part_count\n\n # All of the parts, except the last, are the same size. The\n # last one may be bigger.\n part_size = content_length // part_count\n last_part_size = content_length - (part_size * (part_count - 1))\n assert minimum_part_size <= last_part_size\n\n # Make all of the parts except the last\n parts = [(i * part_size, part_size) for i in range(part_count - 1)]\n\n # Add the last part\n start_of_last = (part_count - 1) * part_size\n last_part = (start_of_last, content_length - start_of_last)\n parts.append(last_part)\n\n return parts",
"def part_lister(mpupload, part_number_marker=None):\r\n more_results = True\r\n part = None\r\n while more_results:\r\n parts = mpupload.get_all_parts(None, part_number_marker)\r\n for part in parts:\r\n yield part\r\n part_number_marker = mpupload.next_part_number_marker\r\n more_results= mpupload.is_truncated",
"def _choose_boto3_chunksize(file_obj):\n file_obj_size = getattr(file_obj, \"_fsize\", None)\n\n if file_obj_size:\n allowed_chunk_sizes = [size * 1024 ** 2 for size in range(10, 110, 10)]\n\n for chunk_size in allowed_chunk_sizes:\n if math.ceil(file_obj_size / chunk_size) < 10000:\n break\n else:\n max_file_size = chunk_size * 10000\n uncompressed = \"uncompressed \" if isinstance(file_obj, FASTXInterleave) else \"\"\n\n raise OneCodexException(\n \"File is too large to upload ({}size: {}, max: {})\".format(\n uncompressed, file_obj_size, max_file_size\n )\n )\n\n multipart_chunksize = chunk_size\n else:\n # default to 25 mb\n multipart_chunksize = 25 * 1024 ** 2\n\n return multipart_chunksize",
"def chunk_split(cls, text):\n parts = []\n current = []\n for line in text.splitlines():\n size = sum(len(part) + 1 for part in current)\n extra = len(line)\n if size + extra >= 2000:\n if current:\n # The message is full, split here.\n parts.append(\"\\n\".join(current))\n current.clear()\n if extra >= 2000:\n # The line itself is too long, split on whitespace instead.\n *lines, line = wrap(line, 2000, expand_tabs=False, replace_whitespace=False)\n parts.extend(lines)\n current.append(line)\n if current:\n parts.append(\"\\n\".join(current))\n return parts",
"def _split_chunk(self, collection_name: str, key: int):\n def split_command():\n self._mongo_client.admin.command('split', collection_name, middle={SHARD_KEY: key})\n self._try_until_done(split_command)\n self._chunks[collection_name][key] = MAIN_MONGO_SHARD_NAME\n logging.info(f\"MongoAgent: Split chunk of {collection_name} at {key}\")",
"def join_chunks(self):\n if self.state == self.STATE_UPLOADING and self.total_chunks_uploaded == self.total_chunks:\n\n # create file and write chunks in the right order\n temp_file = open(self.full_path, \"wb\")\n for chunk in self.chunks.all():\n chunk_bytes = chunk.file.read()\n temp_file.write(chunk_bytes)\n temp_file.close()\n\n # set state as completed\n self.state = self.STATE_COMPLETED\n super(FlowFile, self).save()\n\n # delete chunks automatically if is activated in settings\n if FLOWJS_AUTO_DELETE_CHUNKS:\n self.chunks.all().delete()"
]
| [
"0.7412221",
"0.66999257",
"0.6614869",
"0.63811994",
"0.6228369",
"0.6207673",
"0.6183003",
"0.6180115",
"0.61617076",
"0.6054617",
"0.60039365",
"0.59923476",
"0.59339154",
"0.59104306",
"0.5721449",
"0.55976",
"0.5578122",
"0.55743504",
"0.55683196",
"0.55657595",
"0.5533601",
"0.5522333",
"0.5519882",
"0.55059624",
"0.5462241",
"0.54581",
"0.54237354",
"0.54106206",
"0.5383166",
"0.5378792"
]
| 0.69681275 | 1 |
Out of bounds splitting multipart copy upload. Splits out of bounds copy upload parts into several ones to fit memorysafe part size limit. Also takes into the account minimum upload part size. | def __init__(self, mpu, original_size, min_part_size, max_part_size):
super(OutOfBoundsSplittingMultipartCopyUpload, self).__init__(mpu, min_part_size, max_part_size)
self._original_size = original_size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upload_all_parts(self):\n if not self.upload_id:\n raise RuntimeError(\"Attempting to use a multipart upload that has not been initiated.\")\n\n if self.file.name != \"<stdin>\":\n size_left = file_size = os.stat(self.file.name)[ST_SIZE]\n nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size and 1)\n debug(\"MultiPart: Uploading %s in %d parts\" % (self.file.name, nr_parts))\n else:\n debug(\"MultiPart: Uploading from %s\" % (self.file.name))\n\n\tself.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024\n\n seq = 1\n\tif self.file.name != \"<stdin>\":\n while size_left > 0:\n offset = self.chunk_size * (seq - 1)\n current_chunk_size = min(file_size - offset, self.chunk_size)\n size_left -= current_chunk_size\n labels = {\n 'source' : unicodise(self.file.name),\n 'destination' : unicodise(self.uri.uri()),\n 'extra' : \"[part %d of %d, %s]\" % (seq, nr_parts, \"%d%sB\" % formatSize(current_chunk_size, human_readable = True))\n }\n try:\n self.upload_part(seq, offset, current_chunk_size, labels)\n except:\n error(u\"Upload of '%s' part %d failed. Aborting multipart upload.\" % (self.file.name, seq))\n self.abort_upload()\n raise\n seq += 1\n else:\n while True:\n buffer = self.file.read(self.chunk_size)\n offset = self.chunk_size * (seq - 1)\n current_chunk_size = len(buffer)\n labels = {\n 'source' : unicodise(self.file.name),\n 'destination' : unicodise(self.uri.uri()),\n 'extra' : \"[part %d, %s]\" % (seq, \"%d%sB\" % formatSize(current_chunk_size, human_readable = True))\n }\n if len(buffer) == 0: # EOF\n break\n try:\n self.upload_part(seq, offset, current_chunk_size, labels, buffer)\n except:\n error(u\"Upload of '%s' part %d failed. Aborting multipart upload.\" % (self.file.name, seq))\n self.abort_upload()\n raise\n seq += 1\n\n debug(\"MultiPart: Upload finished: %d parts\", seq - 1)",
"def __init__(self, mpu, min_part_size=5 * MB, max_part_size=5 * GB):\n super(SplittingMultipartCopyUpload, self).__init__(mpu)\n self._mpu = mpu\n self._min_part_size = min_part_size\n self._max_part_size = max_part_size",
"def __init__(self, mpu, length, min_part_number=1):\n super(TruncatingMultipartCopyUpload, self).__init__(mpu)\n self._mpu = mpu\n self._length = length\n self._min_part_number = min_part_number",
"def test_super_chunk(self):\n chunksize = MAX_SINGLE_UPLOAD_SIZE + 1\n size = MAX_SINGLE_UPLOAD_SIZE * 2\n self.assertEqual(find_chunksize(size, chunksize),\n MAX_SINGLE_UPLOAD_SIZE)",
"def __init__(self, mpu, original_size, chunk_size, download):\n super(AppendOptimizedCompositeMultipartCopyUpload, self).__init__(mpu)\n self._mpu = mpu\n self._original_size = original_size\n self._chunk_size = chunk_size\n self._download = download\n self._copy_parts = []\n self._first_chunk = sys.maxint\n self._first_chunk_offset = 0",
"def _buff_split(self, upload_buffer):\n if upload_buffer.intent_count() == 0:\n return\n tail_buffer = upload_buffer\n while True:\n if tail_buffer.length < self.recommended_upload_part_size + self.min_part_size:\n # `EmergePlanner_buff_partition` can split in such way that tail part\n # can be smaller than `min_part_size` - to avoid unnecessary download of possible\n # incoming copy intent, we don't split further\n yield tail_buffer\n return\n head_buff, tail_buffer = self._buff_partition(tail_buffer)\n yield head_buff",
"async def _multipart_upload_from_buffer(self):\n # check to see if bucket needs to be created\n if self._create_bucket:\n # check whether the bucket exists\n bucket_list = await self._get_bucket_list()\n if not self._bucket in bucket_list:\n await self._conn_obj.conn.create_bucket(Bucket=self._bucket)\n\n # if the current part is 1 we have to create the multipart upload\n if self._current_part == 1:\n response = await self._conn_obj.conn.create_multipart_upload(\n Bucket = self._bucket,\n Key = self._path\n )\n self._upload_id = response['UploadId']\n # we need to keep a track of the multipart info\n self._multipart_info = {'Parts' : []}\n\n # upload from a buffer - do we need to split into more than one\n # multiparts?\n new_buffer = []\n for buffer_part in range(0, len(self._buffer)):\n # is the current part of the buffer larger than the maximum\n # upload size? split if it is\n data_buf = self._buffer[buffer_part]\n data_len = data_buf.tell()\n if data_len >= self._part_size:\n data_buf.seek(0)\n data_pos = 0\n # split the file up\n while data_pos < data_len:\n new_buffer.append(io.BytesIO())\n # copy the data - don't overstep the buffer\n if data_pos + self._part_size >= data_len:\n sub_data = data_buf.read(data_len-data_pos)\n else:\n sub_data = data_buf.read(\n self._part_size\n )\n new_buffer[-1].write(sub_data)\n # increment to next\n data_pos += self._part_size\n\n # free the old memory\n self._buffer[buffer_part].close()\n else:\n # copy the old buffer into a new one\n self._buffer[buffer_part].seek(0)\n new_buffer.append(io.BytesIO(self._buffer[buffer_part].read()))\n\n # close other buffers first\n for b in self._buffer:\n b.close()\n self._buffer = new_buffer\n\n tasks = []\n\n for buffer_part in range(0, len(self._buffer)):\n # seek in the BytesIO buffer to get to the beginning after the\n # writing\n self._buffer[buffer_part].seek(0)\n # upload here\n # schedule the uploads\n event_loop = asyncio.get_event_loop()\n task = event_loop.create_task(self._conn_obj.conn.upload_part(\n Bucket=self._bucket,\n Key=self._path,\n UploadId=self._upload_id,\n PartNumber=self._current_part + buffer_part,\n Body=self._buffer[buffer_part]\n ))\n tasks.append(task)\n\n # await the completion of the uploads\n res = await asyncio.gather(*tasks)\n for buffer_part in range(0, len(self._buffer)):\n # insert into the multipart info list of dictionaries\n part = res[buffer_part]\n self._multipart_info['Parts'].append(\n {\n 'PartNumber' : self._current_part + buffer_part,\n 'ETag' : part['ETag']\n }\n )\n\n # add the total number of uploads to the current part\n self._current_part += len(self._buffer)\n\n # reset all the byte buffers and their positions\n for buffer_part in range(0, len(self._buffer)):\n self._buffer[buffer_part].close()\n self._buffer = [io.BytesIO()]\n self._seek_pos = 0",
"def __init__(self, mpu, original_size, download, chunk_size, min_chunk, max_chunk):\n super(ChunkedMultipartUpload, self).__init__(mpu)\n self._mpu = mpu\n self._original_size = original_size\n self._download = download\n self._chunk_size = chunk_size\n self._partial_chunks = {}\n self._min_chunk = min_chunk\n self._max_chunk = max_chunk",
"def do_part_copy(args):\r\n # Multiprocessing args lameness\r\n src_bucket_name, src_key_name, dest_bucket_name, mpu_id, part_num, start_pos, end_pos = args\r\n logger.debug(\"do_part_copy got args: %s\" % (args,))\r\n\r\n # Connect to S3, get the MultiPartUpload\r\n s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())\r\n dest_bucket = s3.lookup(dest_bucket_name)\r\n mpu = None\r\n for mp in dest_bucket.list_multipart_uploads():\r\n if mp.id == mpu_id:\r\n mpu = mp\r\n break\r\n if mpu is None:\r\n raise Exception(\"Could not find MultiPartUpload %s\" % mpu_id)\r\n\r\n # make sure we have a valid key\r\n src_bucket = s3.lookup( src_bucket_name )\r\n src_key = src_bucket.get_key( src_key_name )\r\n # Do the copy\r\n t1 = time.time()\r\n mpu.copy_part_from_key(src_bucket_name, src_key_name, part_num, start_pos, end_pos)\r\n\r\n # Print some timings\r\n t2 = time.time() - t1\r\n s = (end_pos - start_pos)/1024./1024.\r\n logger.info(\"Copied part %s (%0.2fM) in %0.2fs at %0.2fMbps\" % (part_num, s, t2, s/t2))",
"def _get_copy_parts(self, copy_intent, start_offset, end_offset):\n fragment_length = end_offset - start_offset\n part_count = int(fragment_length / self.max_part_size)\n last_part_length = fragment_length % self.max_part_size\n if last_part_length == 0:\n last_part_length = self.max_part_size\n else:\n part_count += 1\n\n if part_count == 1:\n part_sizes = [last_part_length]\n else:\n if last_part_length < int(fragment_length / (part_count + 1)):\n part_count += 1\n base_part_size = int(fragment_length / part_count)\n size_remainder = fragment_length % part_count\n part_sizes = [\n base_part_size + (1 if i < size_remainder else 0) for i in range(part_count)\n ]\n\n copy_source = copy_intent.outbound_source\n relative_offset = start_offset - copy_intent.destination_offset\n for part_size in part_sizes:\n yield EmergePart(CopyEmergePartDefinition(copy_source, relative_offset, part_size))\n relative_offset += part_size",
"def upload_chunked(self, chunk_size = 4 * 1024 * 1024):\n\n while self.offset < self.target_length:\n next_chunk_size = min(chunk_size, self.target_length - self.offset)\n if self.last_block == None:\n self.last_block = self.file_obj.read(next_chunk_size)\n\n try:\n (self.offset, self.upload_id) = self.client.upload_chunk(\n StringIO(self.last_block), next_chunk_size, self.offset, self.upload_id)\n self.last_block = None\n except ErrorResponse as e:\n reply = e.body\n if \"offset\" in reply and reply['offset'] != 0:\n if reply['offset'] > self.offset:\n self.last_block = None\n self.offset = reply['offset']",
"def testUploadWrapperPartialTransfer(self):\n # Check that small reads still work.\n encrypted_data = \"\"\n count = 0\n while 1:\n small_read = self.encrypt_wrapper.read(2)\n if not small_read:\n break\n encrypted_data += small_read\n count += len(small_read)\n\n # Exit this loop sooner than it needs to.\n if count == 6000:\n break\n\n self.decrypt_wrapper.write(small_read)\n\n # This should raise a HMAC error because the tranfer is too short.\n with self.assertRaisesRegexp(IOError, \"Partial Message Received\"):\n self.decrypt_wrapper.close()\n\n # But the data sent up until the corruption is still saved. At least 4\n # chunks.\n self.assertTrue(len(self.outfd.getvalue()) >= 4096)",
"def split(self):\n \n spl = self.which('split')\n if spl:\n self.__tmp = \"/tmp\"\n self.__tmpout = \"/tmp/output\"\n if not os.path.exists(self.__tmpout):\n os.makedirs(self.__tmpout)\n #os.chdir(\"/tmp\")\n '''\n assume split prog overwrites existing files if\n there is a conflict in file names\n '''\n #thecommand = \"%s -a 3 -b 500k %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n thecommand = \"%s -a 3 -b 10m %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n os.system(thecommand)\n dirList=os.listdir(self.__tmpout)\n #self.constructCat(dirList)\n for chunkfilename in dirList:\n #print chunkfilename \n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n #print self.__cat\n self.__flist.append(self.__tmpout + \"/\" + chunkfilename)\n #print self.__flist\n self.writeLog(chunkfilename, self.md5(fileName=self.__tmpout + \"/\" + chunkfilename))\n self.__numchunks = len([item for item in os.listdir(self.__tmpout) if os.path.isfile(self.__tmpout + \"/\" + item)])\n else:\n try:\n f = open(self.__filename, 'rb')\n except (OSError, IOError), e:\n raise FileSplitterException, str(e)\n \n bname = (os.path.split(self.__filename))[1]\n # Get the file size\n fsize = os.path.getsize(self.__filename)\n # dynamically calculate number of chunks\n strfsize = str(fsize)\n '''\n in MB's\n 8 - teens\n 9 - hundreds\n 10 - gigabytes\n '''\n if len(strfsize) == 8:\n #self.__numchunks = fsize/100000\n self.__numchunks = fsize/50000\n elif len(strfsize) == 9:\n #self.__numchunks = fsize/1000000\n self.__numchunks = fsize/500000\n elif len(strfsize) == 10:\n #self.__numchunks = fsize/10000000\n self.__numchunks = fsize/5000000\n #print '\\nSplitting file %s into %d chunks' % (self.__filename, self.__numchunks)\n # Get size of each chunk\n self.__chunksize = int(float(fsize)/float(self.__numchunks))\n \n chunksz = self.__chunksize\n total_bytes = 0\n \n for x in range(self.__numchunks):\n #chunkfilename = bname + '-' + str(x+1) + self.__postfix\n chunkfilename = bname + ('-%03d' % (x+1)) + self.__postfix\n # kill residual file if it exists\n if os.path.exists(chunkfilename):\n os.remove(chunkfilename)\n \"\"\"\n if reading the last section, calculate correct\n chunk size.\n \"\"\"\n if x == self.__numchunks - 1:\n chunksz = fsize - total_bytes\n \n try:\n if self.__debug:\n print 'Writing file chunk: %s' % chunkfilename\n data = f.read(chunksz)\n total_bytes += len(data)\n chunkf = file(chunkfilename, 'wb')\n chunkf.write(data)\n chunkf.close()\n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n self.__flist.append(chunkfilename)\n self.writeLog(chunkfilename, self.md5(fileName=chunkfilename))\n except (OSError, IOError), e:\n print e\n continue\n except EOFError, e:\n print e\n break\n\n print '\\nSplit complete on file: %s into %d chunks\\n' % (self.__filename, self.__numchunks)\n self.__logfhandle.close()\n #self.__cat += \"> \" + self.__remotepath + \"/\" + self.__filename\n self.set_cat_statement()",
"def do_part_upload(args):\r\n # Multiprocessing args lameness\r\n bucket_name, mpu_id, fname, i, start, size, secure, max_tries, current_tries = args\r\n logger.debug(\"do_part_upload got args: %s\" % (args,))\r\n\r\n # Connect to S3, get the MultiPartUpload\r\n s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())\r\n s3.is_secure = secure\r\n bucket = s3.lookup(bucket_name)\r\n mpu = None\r\n for mp in bucket.list_multipart_uploads():\r\n if mp.id == mpu_id:\r\n mpu = mp\r\n break\r\n if mpu is None:\r\n raise Exception(\"Could not find MultiPartUpload %s\" % mpu_id)\r\n\r\n # Read the chunk from the file\r\n fp = open(fname, 'rb')\r\n fp.seek(start)\r\n data = fp.read(size)\r\n fp.close()\r\n if not data:\r\n raise Exception(\"Unexpectedly tried to read an empty chunk\")\r\n\r\n def progress(x,y):\r\n logger.debug(\"Part %d: %0.2f%%\" % (i+1, 100.*x/y))\r\n\r\n try:\r\n # Do the upload\r\n t1 = time.time()\r\n mpu.upload_part_from_file(StringIO(data), i+1, cb=progress)\r\n\r\n # Print some timings\r\n t2 = time.time() - t1\r\n s = len(data)/1024./1024.\r\n logger.info(\"Uploaded part %s (%0.2fM) in %0.2fs at %0.2fMBps\" % (i+1, s, t2, s/t2))\r\n except Exception, err:\r\n logger.debug(\"Retry request %d of max %d times\" % (current_tries, max_tries))\r\n if (current_tries > max_tries):\r\n logger.error(err)\r\n else:\r\n time.sleep(3)\r\n current_tries += 1\r\n do_part_download(bucket_name, mpu_id, fname, i, start, size, secure, max_tries, current_tries)",
"def test_small_invalid_separate_small_files(self):\n\n invalid = 'invalid uuid'\n\n index = GDCIndexClient(uri=base_url)\n bigs, smalls = index.separate_small_files(\n [invalid],\n HTTP_CHUNK_SIZE)\n\n assert index.get_access(invalid) == None\n assert index.get_filesize(invalid) == None\n assert index.get_md5sum(invalid) == None\n assert index.get_related_files(invalid) == []\n assert index.get_annotations(invalid) == []\n\n assert bigs == [invalid]\n assert smalls == []",
"def chunkFileUpload(self, fp, chunksize=1024 * 4096):\n parts = int(math.ceil(fp.stat().st_size / float(chunksize)))\n err = False\n maxchunksize = 1024 * 1024 * 100\n if chunksize >= maxchunksize:\n print(\n 'not uploaded: defined chunksize {0} is bigger than the allowed maximum {1}'.format(chunksize, maxchunksize))\n return None\n\n part = 0\n for part, chunk in enumerate(self.chunkedread(fp, chunksize),1):\n logger.info('({2})uploading part {0} of {1}'.format(part, parts, fp.name))\n files = {'file': (str(fp.name), chunk)}\n res = self._post(self.fullUrl('/chunked_upload?chunk={0}').format(part), files=files)\n\n print('finish, uploaded part {0} of {1} '.format(part, parts))\n res = self._post(self.fullUrl('chunked_upload/commit?filename={0}'.format(fp.name)))\n return self.getFile(res['file']['selfUrl']), self.getObject(res['relatedObject']['selfUrl'])\n\n # relObj = res['relatedObject']\n # obj = self.getObject(relObj['selfUrl'])\n # return obj",
"def _buff_partition(self, upload_buffer):\n left_buff = UploadBuffer(upload_buffer.start_offset)\n buff_start = upload_buffer.start_offset\n for idx, (intent, fragment_end) in enumerate(upload_buffer.iter_items()):\n candidate_size = fragment_end - buff_start\n if candidate_size > self.recommended_upload_part_size:\n right_fragment_size = candidate_size - self.recommended_upload_part_size\n left_buff.append(intent, fragment_end - right_fragment_size)\n return left_buff, upload_buffer.get_slice(\n start_idx=idx, start_offset=left_buff.end_offset\n )\n else:\n left_buff.append(intent, fragment_end)\n if candidate_size == self.recommended_upload_part_size:\n return left_buff, upload_buffer.get_slice(start_idx=idx + 1)\n\n return left_buff, UploadBuffer(left_buff.end_offset)",
"def _upload_part(\n self,\n bucket_id,\n file_id,\n part_upload_source: _TypeUploadSource,\n part_number,\n large_file_upload_state,\n finished_parts,\n encryption: EncryptionSetting,\n ):\n\n # b2_upload_part doesn't need SSE-B2. Large file encryption is decided on b2_start_large_file.\n if encryption is not None and encryption.mode == EncryptionMode.SSE_B2:\n encryption = None\n\n # Check if this part was uploaded before\n if finished_parts is not None and part_number in finished_parts:\n # Report this part finished\n part = finished_parts[part_number]\n large_file_upload_state.update_part_bytes(part_upload_source.get_content_length())\n\n # Return SHA1 hash\n return {'contentSha1': part.content_sha1}\n\n # Set up a progress listener\n part_progress_listener = PartProgressReporter(large_file_upload_state)\n\n # Retry the upload as needed\n exception_list = []\n with ExitStack() as stream_guard:\n part_stream = None\n\n def close_stream_callback(stream):\n if not stream.closed:\n stream.close()\n\n for _ in range(self.MAX_UPLOAD_ATTEMPTS):\n # if another part has already had an error there's no point in\n # uploading this part\n if large_file_upload_state.has_error():\n raise AlreadyFailed(large_file_upload_state.get_error_message())\n\n try:\n # reuse the stream in case of retry\n part_stream = part_stream or part_upload_source.open()\n # register stream closing callback only when reading is finally concluded\n stream_guard.callback(close_stream_callback, part_stream)\n\n content_length = part_upload_source.get_content_length()\n input_stream = ReadingStreamWithProgress(\n part_stream, part_progress_listener, length=content_length\n )\n if part_upload_source.is_sha1_known():\n content_sha1 = part_upload_source.get_content_sha1()\n else:\n input_stream = StreamWithHash(input_stream, stream_length=content_length)\n content_sha1 = HEX_DIGITS_AT_END\n # it is important that `len()` works on `input_stream`\n response = self.services.session.upload_part(\n file_id,\n part_number,\n len(input_stream),\n content_sha1,\n input_stream,\n server_side_encryption=encryption, # todo: client side encryption\n )\n if content_sha1 == HEX_DIGITS_AT_END:\n content_sha1 = input_stream.hash\n assert content_sha1 == response['contentSha1']\n return response\n except B2Error as e:\n if not e.should_retry_upload():\n raise\n exception_list.append(e)\n self.account_info.clear_bucket_upload_data(bucket_id)\n\n large_file_upload_state.set_error(str(exception_list[-1]))\n raise MaxRetriesExceeded(self.MAX_UPLOAD_ATTEMPTS, exception_list)",
"def __init__(self, mpu, original_size, download):\n super(OutOfBoundsFillingMultipartCopyUpload, self).__init__(mpu)\n self._mpu = mpu\n self._original_size = original_size\n self._download = download",
"def _choose_boto3_chunksize(file_obj):\n file_obj_size = getattr(file_obj, \"_fsize\", None)\n\n if file_obj_size:\n allowed_chunk_sizes = [size * 1024 ** 2 for size in range(10, 110, 10)]\n\n for chunk_size in allowed_chunk_sizes:\n if math.ceil(file_obj_size / chunk_size) < 10000:\n break\n else:\n max_file_size = chunk_size * 10000\n uncompressed = \"uncompressed \" if isinstance(file_obj, FASTXInterleave) else \"\"\n\n raise OneCodexException(\n \"File is too large to upload ({}size: {}, max: {})\".format(\n uncompressed, file_obj_size, max_file_size\n )\n )\n\n multipart_chunksize = chunk_size\n else:\n # default to 25 mb\n multipart_chunksize = 25 * 1024 ** 2\n\n return multipart_chunksize",
"def copy_in_chunks(self, chunk_size=None, throttle=None, start=None, limit=None):\n # On restart, foreign_keys exist, don't remake them\n self.create_triggers()\n\n self.chunk_size = chunk_size if chunk_size else self.db.config['DEFAULT_CHUNK_SIZE']\n throttle = throttle if throttle else self.db.config['DEFAULT_THROTTLE']\n\n if self.count == 0 or self.count != self.source.count:\n if not start:\n start = self.source.min_pk\n if not limit:\n limit = self.source.max_pk\n\n self.start_time = datetime.datetime.now()\n\n pointer = start\n if not (pointer and limit):\n pass\n else:\n while pointer < limit:\n self._copy_chunk(pointer)\n pointer = self._get_next_pk(pointer)\n self.log(start, pointer, limit)\n time.sleep(throttle)\n if pointer == limit:\n self._copy_chunk(pointer)\n self.log(start, pointer, limit)\n\n print('Copy complete! Adding referenced foreign keys')\n referenced_fks = [x for x in self.source.foreign_keys if x.referenced]\n self.add_foreign_keys(referenced_fks, override_table=self.name)\n return True",
"def chunk_input(self, input_files, chunksize):\n part_lists = [] # Lists of partial files\n known_nlines = None\n part_suffix = \"\"\n chunk_nlines = chunksize * 2\n\n for input_file in input_files:\n # Count number of lines in the file\n nlines = int(command.execute_with_output(\"wc -l %s\" % input_file)\n .strip().split()[0])\n # Number of lines should be the same in paired files\n if known_nlines is not None:\n msg = \"Mismatched line counts in supposedly paired files: {}\".format(\n input_files)\n assert nlines == known_nlines, msg\n known_nlines = nlines\n\n # Set number of pieces and names\n numparts = (nlines + chunk_nlines - 1) // chunk_nlines\n ndigits = len(str(numparts - 1))\n part_suffix = \"-chunksize-%d-numparts-%d-part-\" % (chunksize, numparts)\n out_prefix_base = os.path.basename(input_file) + part_suffix\n out_prefix = os.path.join(self.chunks_result_dir_local, out_prefix_base)\n\n # Split large file into smaller named pieces\n command.execute(\"split -a %d --numeric-suffixes -l %d %s %s\" %\n (ndigits, chunk_nlines, input_file, out_prefix))\n command.execute_with_retries(f\"aws s3 sync --only-show-errors {self.chunks_result_dir_local}/ {self.chunks_result_dir_s3}/ --exclude '*' --include '{out_prefix_base}*'\")\n\n # Get the partial file names\n partial_files = []\n paths = command.execute_with_output(\"ls %s*\" % out_prefix).rstrip().split(\"\\n\")\n for pf in paths:\n partial_files.append(os.path.basename(pf))\n\n # Check that the partial files match our expected chunking pattern\n pattern = \"{:0%dd}\" % ndigits\n expected_partial_files = [(out_prefix_base + pattern.format(i))\n for i in range(numparts)]\n msg = \"something went wrong with chunking: {} != {}\".format(\n partial_files, expected_partial_files)\n assert expected_partial_files == partial_files, msg\n part_lists.append(partial_files)\n\n # Ex: [[\"input_R1.fasta-part-1\", \"input_R2.fasta-part-1\"],\n # [\"input_R1.fasta-part-2\", \"input_R2.fasta-part-2\"],\n # [\"input_R1.fasta-part-3\", \"input_R2.fasta-part-3\"],...]\n input_chunks = [list(part) for part in zip(*part_lists)]\n return part_suffix, input_chunks",
"def _get_upload_part(self, upload_buffer):\n if upload_buffer.intent_count() == 1 and upload_buffer.get_intent(0).is_upload():\n intent = upload_buffer.get_intent(0)\n relative_offset = upload_buffer.start_offset - intent.destination_offset\n length = upload_buffer.length\n definition = UploadEmergePartDefinition(intent.outbound_source, relative_offset, length)\n else:\n subparts = []\n fragment_start = upload_buffer.start_offset\n for intent, fragment_end in upload_buffer.iter_items():\n relative_offset = fragment_start - intent.destination_offset\n length = fragment_end - fragment_start\n if intent.is_upload():\n subpart_class = LocalSourceUploadSubpart\n elif intent.is_copy():\n subpart_class = RemoteSourceUploadSubpart\n else:\n raise RuntimeError('This cannot happen!!!')\n subparts.append(subpart_class(intent.outbound_source, relative_offset, length))\n fragment_start = fragment_end\n definition = UploadSubpartsEmergePartDefinition(subparts)\n return EmergePart(definition)",
"def test_multipart_upload(self):\n\n syn = mock.Mock()\n md5_hex = \"ab123\"\n dest_file_name = \"foo\"\n content_type = \"text/plain\"\n storage_location_id = 3210\n result_file_handle_id = \"foo\"\n upload_side_effect = [\n mock.Mock(return_value={\"resultFileHandleId\": result_file_handle_id})\n ]\n\n # (file_size, in_part_size, in_max_threads, in_force_restart)\n # (out_max_threads, out_force_restart)\n tests = [\n # non-positive max threads corrected\n ((1234, DEFAULT_PART_SIZE, 0, False), (1, False)),\n # specify force_restart\n (\n (pow(2, 28), DEFAULT_PART_SIZE, 8, True),\n (8, True),\n ),\n # no max_threads, specified, should use default\n (\n (pow(2, 28), 1000, None, False),\n (pool_provider.DEFAULT_NUM_THREADS, False),\n ),\n # part size specified below min, should be raised\n (\n (1000, 1, 5, False),\n (5, False),\n ),\n # part size would exceed max number of parts,\n # should be adjusted accordingly\n (\n (pow(2, 36), MIN_PART_SIZE + 1, 8, True),\n (8, True),\n ),\n ]\n\n for (file_size, in_part_size, in_max_threads, in_force_restart), (\n out_max_threads,\n out_force_restart,\n ) in tests:\n upload_request = {\n \"concreteType\": \"org.sagebionetworks.repo.model.file.MultipartUploadRequest\",\n \"contentType\": content_type,\n \"contentMD5Hex\": md5_hex,\n \"fileName\": dest_file_name,\n \"fileSizeBytes\": file_size,\n \"generatePreview\": True,\n \"storageLocationId\": storage_location_id,\n \"partSizeBytes\": in_part_size,\n }\n\n result, upload_mock = self._multipart_upload_test(\n upload_side_effect,\n syn,\n dest_file_name,\n upload_request,\n mock.ANY,\n mock.ANY,\n max_threads=in_max_threads,\n force_restart=in_force_restart,\n )\n\n upload_mock.assert_called_once_with(\n syn,\n dest_file_name,\n upload_request,\n mock.ANY, # part_fn\n mock.ANY, # md5_fn,\n out_max_threads,\n out_force_restart,\n )",
"def choose_part_ranges(content_length, minimum_part_size):\n\n # If the file is at least twice the minimum part size, we are guaranteed\n # to be able to break it into multiple parts that are all at least\n # the minimum part size.\n assert minimum_part_size * 2 <= content_length\n\n # How many parts can we make?\n part_count = min(content_length // minimum_part_size, 10000)\n assert 2 <= part_count\n\n # All of the parts, except the last, are the same size. The\n # last one may be bigger.\n part_size = content_length // part_count\n last_part_size = content_length - (part_size * (part_count - 1))\n assert minimum_part_size <= last_part_size\n\n # Make all of the parts except the last\n parts = [(i * part_size, part_size) for i in range(part_count - 1)]\n\n # Add the last part\n start_of_last = (part_count - 1) * part_size\n last_part = (start_of_last, content_length - start_of_last)\n parts.append(last_part)\n\n return parts",
"def min_parts():\n # you must replace this with your own value\n return 1155",
"def min_parts():\n # you must replace this with your own value\n return 1155",
"def upload_chunk(self, request, **kwargs):\n import uuid\n\n self.method_check(request, allowed=[\"post\"])\n self.is_authenticated(request)\n\n if not self.check_dfo(request, kwargs[\"dfo_id\"]):\n return self.handle_error(\"Invalid object or access denied.\")\n\n checksum = request.headers.get(\"Checksum\", None)\n if checksum is None:\n checksum = request.META.get(\"Checksum\", None)\n if checksum is None:\n return self.handle_error(\"Missing 'Checksum' in header.\")\n\n content_range = request.headers.get(\"Content-Range\", None)\n if content_range is None:\n content_range = request.META.get(\"Content-Range\", None)\n if content_range is None:\n return self.handle_error(\"Missing 'Content-Range' in header.\")\n\n m = re.search(r\"^(\\d+)\\-(\\d+)\\/(\\d+)$\", content_range).groups()\n content_start = int(m[0])\n content_end = int(m[1])\n content_length = content_end-content_start\n if content_length > settings.CHUNK_MAX_SIZE:\n return self.handle_error(\"Chunk size is larger than max allowed.\")\n\n check = Chunk.objects.filter(\n dfo_id=kwargs[\"dfo_id\"],\n offset=content_start\n )\n if len(check) != 0:\n return self.handle_error(\"Chunk already uploaded.\")\n\n content_checksum = calc_checksum(settings.CHUNK_CHECKSUM, request.body)\n if content_checksum is None or content_checksum != checksum:\n return self.handle_error(\n \"Checksum does not match. {}:{}\".format(settings.CHUNK_CHECKSUM, content_checksum))\n\n if not os.path.exists(settings.CHUNK_STORAGE):\n try:\n os.mkdir(settings.CHUNK_STORAGE)\n except Exception as e:\n return self.handle_error(str(e))\n\n data_path = os.path.join(settings.CHUNK_STORAGE, kwargs[\"dfo_id\"])\n if not os.path.exists(data_path):\n try:\n os.makedirs(data_path, mode=0o770, exist_ok=True)\n os.chmod(data_path, 0o770)\n except Exception as e:\n return self.handle_error(str(e))\n\n chunk_id = str(uuid.uuid4())\n file_path = os.path.join(data_path, chunk_id)\n\n try:\n file = open(file_path, \"wb\")\n file.write(request.body)\n file.close()\n except Exception as e:\n return self.handle_error(str(e))\n\n dfo = DataFileObject.objects.get(id=kwargs[\"dfo_id\"])\n\n instrument = dfo.datafile.dataset.instrument\n if instrument is not None:\n instrument_id = instrument.id\n else:\n instrument_id = None\n\n try:\n chunk = Chunk.objects.create(\n chunk_id=chunk_id,\n dfo_id=kwargs[\"dfo_id\"],\n offset=content_start,\n size=content_length,\n instrument_id=instrument_id,\n user_id=request.user.id\n )\n except Exception as e:\n try:\n os.remove(file_path)\n except Exception as e:\n pass\n return self.handle_error(str(e))\n\n data = {\n \"success\": True,\n \"id\": chunk.id\n }\n\n return JsonResponse(data, status=200)",
"def multipart_push(self, upload_id, url, part_number, chunk_size, data, md5=None):\n path = self.base_path / url\n assert path.is_file(), f\"{self}: multipart upload file {path} does not exist.\"\n with path.open(\"r+b\") as stream:\n stream.seek((part_number - 1) * chunk_size)\n shutil.copyfileobj(data, stream, 1024 * 1024)\n return dict()",
"def part_lister(mpupload, part_number_marker=None):\r\n more_results = True\r\n part = None\r\n while more_results:\r\n parts = mpupload.get_all_parts(None, part_number_marker)\r\n for part in parts:\r\n yield part\r\n part_number_marker = mpupload.next_part_number_marker\r\n more_results= mpupload.is_truncated"
]
| [
"0.6936495",
"0.6747881",
"0.6268513",
"0.6234417",
"0.614132",
"0.6026663",
"0.600616",
"0.5888342",
"0.5825971",
"0.5810179",
"0.5752304",
"0.565045",
"0.56290716",
"0.5486159",
"0.54644954",
"0.5408568",
"0.54034483",
"0.5385147",
"0.5360366",
"0.53489244",
"0.5302736",
"0.52885604",
"0.528637",
"0.5211331",
"0.5205921",
"0.5203798",
"0.5203798",
"0.5193078",
"0.51820195",
"0.5157831"
]
| 0.6998303 | 0 |
Append optimized composite multipart copy upload. Uses original object as a pre uploaded composite part in case of append writes. In order to do so it adjusts the first of the already uploaded chunks. Uploads copy parts as regular parts using content of the original file. | def __init__(self, mpu, original_size, chunk_size, download):
super(AppendOptimizedCompositeMultipartCopyUpload, self).__init__(mpu)
self._mpu = mpu
self._original_size = original_size
self._chunk_size = chunk_size
self._download = download
self._copy_parts = []
self._first_chunk = sys.maxint
self._first_chunk_offset = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def _multipart_upload_from_buffer(self):\n # check to see if bucket needs to be created\n if self._create_bucket:\n # check whether the bucket exists\n bucket_list = await self._get_bucket_list()\n if not self._bucket in bucket_list:\n await self._conn_obj.conn.create_bucket(Bucket=self._bucket)\n\n # if the current part is 1 we have to create the multipart upload\n if self._current_part == 1:\n response = await self._conn_obj.conn.create_multipart_upload(\n Bucket = self._bucket,\n Key = self._path\n )\n self._upload_id = response['UploadId']\n # we need to keep a track of the multipart info\n self._multipart_info = {'Parts' : []}\n\n # upload from a buffer - do we need to split into more than one\n # multiparts?\n new_buffer = []\n for buffer_part in range(0, len(self._buffer)):\n # is the current part of the buffer larger than the maximum\n # upload size? split if it is\n data_buf = self._buffer[buffer_part]\n data_len = data_buf.tell()\n if data_len >= self._part_size:\n data_buf.seek(0)\n data_pos = 0\n # split the file up\n while data_pos < data_len:\n new_buffer.append(io.BytesIO())\n # copy the data - don't overstep the buffer\n if data_pos + self._part_size >= data_len:\n sub_data = data_buf.read(data_len-data_pos)\n else:\n sub_data = data_buf.read(\n self._part_size\n )\n new_buffer[-1].write(sub_data)\n # increment to next\n data_pos += self._part_size\n\n # free the old memory\n self._buffer[buffer_part].close()\n else:\n # copy the old buffer into a new one\n self._buffer[buffer_part].seek(0)\n new_buffer.append(io.BytesIO(self._buffer[buffer_part].read()))\n\n # close other buffers first\n for b in self._buffer:\n b.close()\n self._buffer = new_buffer\n\n tasks = []\n\n for buffer_part in range(0, len(self._buffer)):\n # seek in the BytesIO buffer to get to the beginning after the\n # writing\n self._buffer[buffer_part].seek(0)\n # upload here\n # schedule the uploads\n event_loop = asyncio.get_event_loop()\n task = event_loop.create_task(self._conn_obj.conn.upload_part(\n Bucket=self._bucket,\n Key=self._path,\n UploadId=self._upload_id,\n PartNumber=self._current_part + buffer_part,\n Body=self._buffer[buffer_part]\n ))\n tasks.append(task)\n\n # await the completion of the uploads\n res = await asyncio.gather(*tasks)\n for buffer_part in range(0, len(self._buffer)):\n # insert into the multipart info list of dictionaries\n part = res[buffer_part]\n self._multipart_info['Parts'].append(\n {\n 'PartNumber' : self._current_part + buffer_part,\n 'ETag' : part['ETag']\n }\n )\n\n # add the total number of uploads to the current part\n self._current_part += len(self._buffer)\n\n # reset all the byte buffers and their positions\n for buffer_part in range(0, len(self._buffer)):\n self._buffer[buffer_part].close()\n self._buffer = [io.BytesIO()]\n self._seek_pos = 0",
"def upload_all_parts(self):\n if not self.upload_id:\n raise RuntimeError(\"Attempting to use a multipart upload that has not been initiated.\")\n\n if self.file.name != \"<stdin>\":\n size_left = file_size = os.stat(self.file.name)[ST_SIZE]\n nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size and 1)\n debug(\"MultiPart: Uploading %s in %d parts\" % (self.file.name, nr_parts))\n else:\n debug(\"MultiPart: Uploading from %s\" % (self.file.name))\n\n\tself.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024\n\n seq = 1\n\tif self.file.name != \"<stdin>\":\n while size_left > 0:\n offset = self.chunk_size * (seq - 1)\n current_chunk_size = min(file_size - offset, self.chunk_size)\n size_left -= current_chunk_size\n labels = {\n 'source' : unicodise(self.file.name),\n 'destination' : unicodise(self.uri.uri()),\n 'extra' : \"[part %d of %d, %s]\" % (seq, nr_parts, \"%d%sB\" % formatSize(current_chunk_size, human_readable = True))\n }\n try:\n self.upload_part(seq, offset, current_chunk_size, labels)\n except:\n error(u\"Upload of '%s' part %d failed. Aborting multipart upload.\" % (self.file.name, seq))\n self.abort_upload()\n raise\n seq += 1\n else:\n while True:\n buffer = self.file.read(self.chunk_size)\n offset = self.chunk_size * (seq - 1)\n current_chunk_size = len(buffer)\n labels = {\n 'source' : unicodise(self.file.name),\n 'destination' : unicodise(self.uri.uri()),\n 'extra' : \"[part %d, %s]\" % (seq, \"%d%sB\" % formatSize(current_chunk_size, human_readable = True))\n }\n if len(buffer) == 0: # EOF\n break\n try:\n self.upload_part(seq, offset, current_chunk_size, labels, buffer)\n except:\n error(u\"Upload of '%s' part %d failed. Aborting multipart upload.\" % (self.file.name, seq))\n self.abort_upload()\n raise\n seq += 1\n\n debug(\"MultiPart: Upload finished: %d parts\", seq - 1)",
"def _concatenate_parts_to_file_for_pipe(self,\n outfile,\n image_parts,\n source_dir,\n debug=False):\n close_all_fds([outfile])\n part_count = len(image_parts)\n part_file = None\n try:\n for part in image_parts:\n self.log.debug(\"Concatenating Part:\" + str(part.filename))\n sha1sum = hashlib.sha1()\n part_file_path = source_dir + \"/\" + part.filename\n with open(part_file_path) as part_file:\n data = part_file.read(euca2ools.bundle.pipes._BUFSIZE)\n while data:\n sha1sum.update(data)\n outfile.write(data)\n outfile.flush()\n data = part_file.read(euca2ools.bundle.pipes._BUFSIZE)\n part_digest = sha1sum.hexdigest()\n self.log.debug(\n \"PART NUMBER:\" + str(image_parts.index(part) + 1) +\n \"/\" + str(part_count))\n self.log.debug('Part sha1sum:' + str(part_digest))\n self.log.debug('Expected sum:' + str(part.hexdigest))\n if part_digest != part.hexdigest:\n raise ValueError('Input part file may be corrupt:{0} '\n .format(part.filename),\n '(expected digest: {0}, actual: {1})'\n .format(part.hexdigest, part_digest))\n except IOError as ioe:\n # HACK\n self.log.debug('Error in _concatenate_parts_to_file_for_pipe.' +\n str(ioe))\n if not debug:\n return\n raise ioe\n finally:\n if part_file:\n part_file.close()\n self.log.debug('Concatentate done')\n self.log.debug('Closing write end of pipe after writing')\n outfile.close()",
"def do_part_copy(args):\r\n # Multiprocessing args lameness\r\n src_bucket_name, src_key_name, dest_bucket_name, mpu_id, part_num, start_pos, end_pos = args\r\n logger.debug(\"do_part_copy got args: %s\" % (args,))\r\n\r\n # Connect to S3, get the MultiPartUpload\r\n s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())\r\n dest_bucket = s3.lookup(dest_bucket_name)\r\n mpu = None\r\n for mp in dest_bucket.list_multipart_uploads():\r\n if mp.id == mpu_id:\r\n mpu = mp\r\n break\r\n if mpu is None:\r\n raise Exception(\"Could not find MultiPartUpload %s\" % mpu_id)\r\n\r\n # make sure we have a valid key\r\n src_bucket = s3.lookup( src_bucket_name )\r\n src_key = src_bucket.get_key( src_key_name )\r\n # Do the copy\r\n t1 = time.time()\r\n mpu.copy_part_from_key(src_bucket_name, src_key_name, part_num, start_pos, end_pos)\r\n\r\n # Print some timings\r\n t2 = time.time() - t1\r\n s = (end_pos - start_pos)/1024./1024.\r\n logger.info(\"Copied part %s (%0.2fM) in %0.2fs at %0.2fMbps\" % (part_num, s, t2, s/t2))",
"def join_chunks(self):\n if self.state == self.STATE_UPLOADING and self.total_chunks_uploaded == self.total_chunks:\n\n # create file and write chunks in the right order\n temp_file = open(self.full_path, \"wb\")\n for chunk in self.chunks.all():\n chunk_bytes = chunk.file.read()\n temp_file.write(chunk_bytes)\n temp_file.close()\n\n # set state as completed\n self.state = self.STATE_COMPLETED\n super(FlowFile, self).save()\n\n # delete chunks automatically if is activated in settings\n if FLOWJS_AUTO_DELETE_CHUNKS:\n self.chunks.all().delete()",
"def upload_part(self, seq, offset, chunk_size, labels, buffer = ''):\n # TODO implement Content-MD5\n debug(\"Uploading part %i of %r (%s bytes)\" % (seq, self.upload_id, chunk_size))\n headers = { \"content-length\": chunk_size }\n query_string = \"?partNumber=%i&uploadId=%s\" % (seq, self.upload_id)\n request = self.s3.create_request(\"OBJECT_PUT\", uri = self.uri, headers = headers, extra = query_string)\n response = self.s3.send_file(request, self.file, labels, buffer, offset = offset, chunk_size = chunk_size)\n self.parts[seq] = response[\"headers\"][\"etag\"]\n return response",
"def __init__(self, mpu, original_size, min_part_size, max_part_size):\n super(OutOfBoundsSplittingMultipartCopyUpload, self).__init__(mpu, min_part_size, max_part_size)\n self._original_size = original_size",
"def multipart_push(self, upload_id, url, part_number, chunk_size, data, md5=None):\n path = self.base_path / url\n assert path.is_file(), f\"{self}: multipart upload file {path} does not exist.\"\n with path.open(\"r+b\") as stream:\n stream.seek((part_number - 1) * chunk_size)\n shutil.copyfileobj(data, stream, 1024 * 1024)\n return dict()",
"def multipart(self):\n self.add_file_string('Multipart file')\n self.should_copy = False",
"def _upload_chunk(self, final=False):\n out = self.fs.session.post(\n self.location,\n data=self.buffer.getvalue(),\n headers={\"content-type\": \"application/octet-stream\"},\n )\n out.raise_for_status()\n return True",
"def _copy_chunk(self, last_pk):\n self.execute(self.commands.copy_chunk(\n self.name,\n self._join_cols(self.intersection.dest_columns),\n self._qualify(self.source.name, self.intersection.origin_columns),\n self.source.name,\n self.primary_key_column,\n last_pk,\n self.chunk_size\n ))\n self.commit()",
"def upload_part_copy(Bucket=None, CopySource=None, CopySourceIfMatch=None, CopySourceIfModifiedSince=None, CopySourceIfNoneMatch=None, CopySourceIfUnmodifiedSince=None, CopySourceRange=None, Key=None, PartNumber=None, UploadId=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, CopySourceSSECustomerAlgorithm=None, CopySourceSSECustomerKey=None, CopySourceSSECustomerKeyMD5=None, RequestPayer=None):\n pass",
"def __init__(self, mpu, min_part_size=5 * MB, max_part_size=5 * GB):\n super(SplittingMultipartCopyUpload, self).__init__(mpu)\n self._mpu = mpu\n self._min_part_size = min_part_size\n self._max_part_size = max_part_size",
"def _multipart_upload(self, credentials, src_file_path, artifact_file_path):\n try:\n headers = self._extract_headers_from_credentials(credentials.headers)\n # try to create the file\n self._retryable_adls_function(\n func=put_adls_file_creation,\n artifact_file_path=artifact_file_path,\n sas_url=credentials.signed_uri,\n headers=headers,\n )\n # next try to append the file\n futures = {}\n file_size = os.path.getsize(src_file_path)\n num_chunks = _compute_num_chunks(src_file_path, _MULTIPART_UPLOAD_CHUNK_SIZE)\n use_single_part_upload = num_chunks == 1\n for index in range(num_chunks):\n start_byte = index * _MULTIPART_UPLOAD_CHUNK_SIZE\n future = self.chunk_thread_pool.submit(\n self._retryable_adls_function,\n func=patch_adls_file_upload,\n artifact_file_path=artifact_file_path,\n sas_url=credentials.signed_uri,\n local_file=src_file_path,\n start_byte=start_byte,\n size=_MULTIPART_UPLOAD_CHUNK_SIZE,\n position=start_byte,\n headers=headers,\n is_single=use_single_part_upload,\n )\n futures[future] = index\n\n _, errors = _complete_futures(futures, src_file_path)\n if errors:\n raise MlflowException(\n f\"Failed to upload at least one part of {artifact_file_path}. Errors: {errors}\"\n )\n\n # finally try to flush the file\n if not use_single_part_upload:\n self._retryable_adls_function(\n func=patch_adls_flush,\n artifact_file_path=artifact_file_path,\n sas_url=credentials.signed_uri,\n position=file_size,\n headers=headers,\n )\n except Exception as err:\n raise MlflowException(err)",
"def upload(self, dest, overwrite=False):\n dest = normpath(dest)\n try:\n remote = get_remote(dest)\n except ValueError: # Nothing exists at dest, nothing to worry about.\n pass\n else: # Something exists here.\n if isinstance(remote, RemoteFile) and self.hash() == remote.hash:\n # Nothing to update.\n pdbox.info(\"%s and %s are identical\" % (self.path, remote.uri))\n return\n if not overwrite:\n raise ValueError(\"%s exists\" % remote.uri)\n\n # Uploading can either happen all at once (with a 150 MB limit),\n # or in chunks. If the file is smaller than the selected chunk size,\n # then try to upload in one go.\n chunksize = min(pdbox._args.get(\"chunksize\", 149.0), 149.0)\n pdbox.debug(\"Chunk size: %.2f MB\" % chunksize)\n if pdbox._args.get(\"dryrun\"):\n pdbox.info(\"Uploaded %s to %s\" % (self.path, dbx_uri(dest)))\n return None\n\n # Set the write mode.\n if overwrite:\n mode = dropbox.files.WriteMode.overwrite\n else:\n mode = dropbox.files.WriteMode.add\n\n chunk = int(chunksize * 1024 * 1024) # Convert B to MB.\n\n with open(self.path, \"rb\") as f:\n data = f.read()\n sz = len(data)\n\n # TODO: Progress bars.\n if sz < chunk: # One-shot upload.\n meta = execute(pdbox.dbx.files_upload, data, dest, mode)\n else: # Multipart upload.\n nchunks = math.ceil(sz / chunk)\n # Initiate the upload with just the first byte.\n start = execute(pdbox.dbx.files_upload_session_start, f[0])\n cursor = dropbox.files.UploadSessionCursor(start.session_id, 1)\n\n # Now just add each chunk.\n while sz - cursor.offset > chunk:\n pdbox.debug(\n \"Uploading chunk %d/%d\" % (cursor.offset % chunk, nchunks),\n )\n execute(\n pdbox.dbx.files_upload_session_append_v2,\n data[cursor.offset:cursor.offset + chunk],\n cursor,\n )\n cursor.offset += chunk\n\n # Upload the remaining to finish the transaction.\n meta = execute(\n pdbox.dbx.files_upload_session_finish,\n data[cursor.offset:],\n dropbox.files.CommitInfo(dest, mode),\n )\n\n pdbox.info(\"Uploaded %s to %s\" % (self.path, dbx_uri(dest)))\n return RemoteFile(None, meta=meta)",
"def complete_multipart_upload(self):\n debug(\"MultiPart: Completing upload: %s\" % self.upload_id)\n\n parts_xml = []\n part_xml = \"<Part><PartNumber>%i</PartNumber><ETag>%s</ETag></Part>\"\n for seq, etag in self.parts.items():\n parts_xml.append(part_xml % (seq, etag))\n body = \"<CompleteMultipartUpload>%s</CompleteMultipartUpload>\" % (\"\".join(parts_xml))\n\n headers = { \"content-length\": len(body) }\n request = self.s3.create_request(\"OBJECT_POST\", uri = self.uri, headers = headers, extra = \"?uploadId=%s\" % (self.upload_id))\n response = self.s3.send_request(request, body = body)\n\n return response",
"def putchunk(self, *args, **kwargs):\n return _image.image_putchunk(self, *args, **kwargs)",
"def __init__(self, mpu, original_size, download, chunk_size, min_chunk, max_chunk):\n super(ChunkedMultipartUpload, self).__init__(mpu)\n self._mpu = mpu\n self._original_size = original_size\n self._download = download\n self._chunk_size = chunk_size\n self._partial_chunks = {}\n self._min_chunk = min_chunk\n self._max_chunk = max_chunk",
"def append(self, file, idx):\n\n # print \"append %s %d\" % (file, idx)\n src = \"%s/%s\" % (self._dir, file)\n dst = \"%s/.%d.new\" % (self._tempdir, idx)\n copyfile(src, dst)\n result = self._run(\"%s --%d --block-size %d --bits %d --quiet --threads %d %s --mode %s --rehash %s %s\" %\n (self._ishakesumd, self._mode, self._block_size, self._output_bits, self._threads,\n self._profile, self._alg, self._hash, self._tempdir))\n os.remove(dst)\n return result",
"def do_part_upload(args):\r\n # Multiprocessing args lameness\r\n bucket_name, mpu_id, fname, i, start, size, secure, max_tries, current_tries = args\r\n logger.debug(\"do_part_upload got args: %s\" % (args,))\r\n\r\n # Connect to S3, get the MultiPartUpload\r\n s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())\r\n s3.is_secure = secure\r\n bucket = s3.lookup(bucket_name)\r\n mpu = None\r\n for mp in bucket.list_multipart_uploads():\r\n if mp.id == mpu_id:\r\n mpu = mp\r\n break\r\n if mpu is None:\r\n raise Exception(\"Could not find MultiPartUpload %s\" % mpu_id)\r\n\r\n # Read the chunk from the file\r\n fp = open(fname, 'rb')\r\n fp.seek(start)\r\n data = fp.read(size)\r\n fp.close()\r\n if not data:\r\n raise Exception(\"Unexpectedly tried to read an empty chunk\")\r\n\r\n def progress(x,y):\r\n logger.debug(\"Part %d: %0.2f%%\" % (i+1, 100.*x/y))\r\n\r\n try:\r\n # Do the upload\r\n t1 = time.time()\r\n mpu.upload_part_from_file(StringIO(data), i+1, cb=progress)\r\n\r\n # Print some timings\r\n t2 = time.time() - t1\r\n s = len(data)/1024./1024.\r\n logger.info(\"Uploaded part %s (%0.2fM) in %0.2fs at %0.2fMBps\" % (i+1, s, t2, s/t2))\r\n except Exception, err:\r\n logger.debug(\"Retry request %d of max %d times\" % (current_tries, max_tries))\r\n if (current_tries > max_tries):\r\n logger.error(err)\r\n else:\r\n time.sleep(3)\r\n current_tries += 1\r\n do_part_download(bucket_name, mpu_id, fname, i, start, size, secure, max_tries, current_tries)",
"def _flush_write_buffer(self):\n if self._buffer_file_size:\n self._write_counter += 1\n self.file.seek(0)\n self._multipart.upload_part_from_file(\n self.file,\n self._write_counter,\n headers=self._storage.headers\n )\n self.file.close()\n self.file = None",
"def __init__(self, mpu, length, min_part_number=1):\n super(TruncatingMultipartCopyUpload, self).__init__(mpu)\n self._mpu = mpu\n self._length = length\n self._min_part_number = min_part_number",
"def upload_chunked(self, chunk_size = 4 * 1024 * 1024):\n\n while self.offset < self.target_length:\n next_chunk_size = min(chunk_size, self.target_length - self.offset)\n if self.last_block == None:\n self.last_block = self.file_obj.read(next_chunk_size)\n\n try:\n (self.offset, self.upload_id) = self.client.upload_chunk(\n StringIO(self.last_block), next_chunk_size, self.offset, self.upload_id)\n self.last_block = None\n except ErrorResponse as e:\n reply = e.body\n if \"offset\" in reply and reply['offset'] != 0:\n if reply['offset'] > self.offset:\n self.last_block = None\n self.offset = reply['offset']",
"def create_part_copy(self, object_name, offset, size, multipart_id, part_number):\n\n return h3lib.create_part_copy(self._handle, object_name, offset, size, multipart_id, part_number, self._user_id)",
"def _put_object(self, sha: str) -> None:\n data = git.encode_object(sha)\n path = self._object_path(sha)\n self._trace(\"writing: %s\" % path)\n retries = 0\n mode = dropbox.files.WriteMode.overwrite\n\n if len(data) <= CHUNK_SIZE:\n while True:\n try:\n self._connection.files_upload(data, path, mode, strict_conflict=True, mute=True)\n except dropbox.exceptions.InternalServerError:\n self._trace(\"internal server error writing %s, retrying\" % sha)\n if retries < MAX_RETRIES:\n retries += 1\n else:\n raise\n else:\n break\n else:\n cursor = dropbox.files.UploadSessionCursor(offset=0)\n done_uploading = False\n\n while not done_uploading:\n try:\n end = cursor.offset + CHUNK_SIZE\n chunk = data[(cursor.offset) : end]\n\n if cursor.offset == 0:\n # upload first chunk\n result = self._connection.files_upload_session_start(chunk)\n cursor.session_id = result.session_id\n elif end < len(data):\n # upload intermediate chunks\n self._connection.files_upload_session_append_v2(chunk, cursor)\n else:\n # upload the last chunk\n commit_info = dropbox.files.CommitInfo(\n path, mode, strict_conflict=True, mute=True\n )\n self._connection.files_upload_session_finish(chunk, cursor, commit_info)\n done_uploading = True\n\n # advance cursor to next chunk\n cursor.offset = end\n\n except dropbox.files.UploadSessionOffsetError as offset_error:\n self._trace(\"offset error writing %s, retrying\" % sha)\n cursor.offset = offset_error.correct_offset\n if retries < MAX_RETRIES:\n retries += 1\n else:\n raise\n except dropbox.exceptions.InternalServerError:\n self._trace(\"internal server error writing %s, retrying\" % sha)\n if retries < MAX_RETRIES:\n retries += 1\n else:\n raise",
"def _copy(self):\n for d in self._current_chunk:\n self.out.write(d)",
"def put(self, item): \n if len(self.contents) < self.max_size:\n self.contents.append(item)\n elif len(self.contents) >= self.max_size:\n print \"Backpack Full.\"",
"def add_chunk(self, chunk):\n self.chunkbuffer.appendleft(chunk)",
"def test_appending(tmp_path):\n path = tmp_path / \"test_appending.hdf5\"\n\n c = ScalarField(UnitGrid([2]), data=1)\n storage = FileStorage(path)\n storage.start_writing(c)\n assert len(storage) == 0\n storage.append(c, 0)\n assert storage._file_state == \"writing\"\n assert len(storage) == 1\n storage.close()\n\n storage2 = FileStorage(path, write_mode=\"append\")\n storage2.start_writing(c)\n storage2.append(c, 1)\n storage2.close()\n\n assert len(storage2) == 2",
"def merge_vcf_chunks(out_dir, path_name, path_size, chunks, overwrite):\n vcf_path = os.path.join(out_dir, path_name + \".vcf\")\n if overwrite or not os.path.isfile(vcf_path):\n first = True\n for chunk_i, chunk in enumerate(chunks):\n clip_path = chunk_base_name(path_name, out_dir, chunk_i, \"_clip.vcf\")\n if os.path.isfile(clip_path):\n if first is True:\n # copy everything including the header\n run(\"cat {} > {}\".format(clip_path, vcf_path))\n first = False\n else:\n # add on everythin but header\n run(\"grep -v \\\"^#\\\" {} >> {}\".format(clip_path, vcf_path), check=False)\n \n # add a compressed indexed version\n if overwrite or not os.path.isfile(vcf_path + \".gz\"):\n run(\"bgzip -c {} > {}\".format(vcf_path, vcf_path + \".gz\"))\n run(\"tabix -f -p vcf {}\".format(vcf_path + \".gz\"))"
]
| [
"0.61642283",
"0.60305166",
"0.58840835",
"0.5873734",
"0.5802467",
"0.55642086",
"0.55241823",
"0.548686",
"0.5390297",
"0.536524",
"0.5341583",
"0.53328073",
"0.5317409",
"0.5303169",
"0.52485627",
"0.5222615",
"0.51945925",
"0.5164831",
"0.51564217",
"0.50854754",
"0.50838685",
"0.50625986",
"0.5045313",
"0.50329167",
"0.50211614",
"0.5021036",
"0.5009092",
"0.5007441",
"0.5007052",
"0.49802414"
]
| 0.6833404 | 0 |
Convert an input text file to an output Morse code file. Notes This function assumes the existence of a MORSE_CODE dictionary, containing a mapping between English letters and their corresponding Morse code. | def english_to_morse(
input_file: str = "lorem.txt",
output_file: str = "lorem_morse.txt"
): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def English2Morse ():\r\n \r\n morse_word = ''\r\n morse_list = []\r\n why = ''\r\n excp2 = True\r\n excp3 = True\r\n global dictionary\r\n global Morse_dict\r\n \r\n ## This part is building the Table, which is an dictionary,\r\n # that shows for each letter (the key), the equivalent dot-dash string (the value)\r\n # if newM is True, that means that the user has changed the symbols of the morse code, so, we are going to use the new table\r\n # if it is False, that means the uer did not change the symbols, so we can use the table wiht dash and dots\r\n # also, if the variable dictionary is false, it means that the user have not used the English2Morse function yet, so,\r\n ## we need to create the dictionary, otherwise, if it is true, the user have already created the dictionary,\r\n ## so it is not necessary to create it again\r\n # but, if the user changed the morse symbol, it is going to create the new dictionary\r\n if newM:\r\n Morse_Table = open('new_MorseTable.txt','r')\r\n else:\r\n if not dictionary:\r\n try:\r\n Morse_Table = open('MorseTable.txt','r')\r\n except:\r\n shell_connect.write('MorseTable.txt not found. Download it to continue.\\n','COMMENT')\r\n excp3 = False\r\n if excp3:\r\n if not dictionary:\r\n for line in Morse_Table:\r\n Morse_dict.update({line[0]:line[2:-1]})\r\n Morse_Table.close()\r\n dictionary = True\r\n ## take the name of the file which has english letters to translate the text into morse code\r\n #try until the user enter a file that exists\r\n while True:\r\n try:\r\n filename = input(\"Please, write the name of the file (with extension) which has letters: \\n\")\r\n E2M_file = open(filename,'r')\r\n break\r\n except:\r\n shell_connect.write('File does not exist, try again.\\n','COMMENT')\r\n # creates the file which is going to have the message in morse code\r\n morse_hidden_file = open(filename[:-4]+'_hidden.txt','w')\r\n # for each line in the file, take each word. For each word, transform the letters into morse code\r\n # Invariant: line is always less than or equal to the number of lines in the english to morse file\r\n for line in E2M_file:\r\n # If the lines begins with a space, then write the spaces in the file being created and remove them from the line being read\r\n while line[0] == ' ':\r\n morse_hidden_file.write(' ')\r\n line = line[1:]\r\n words_list = line.split()\r\n if words_list != []:\r\n # if the user made a mistake and chose a file which is already in morse code, an error message is shown\r\n if words_list[0][0] == var1 or words_list[0][0] == var2 or words_list[0][0] == '*':\r\n excp = False\r\n excpwhy = 'The file is not a letter file'\r\n shell_connect.write('\\nThis is not a letter file!\\n','COMMENT')\r\n break\r\n else:\r\n # for each letter in each word, print the correspondent morse code in the new file\r\n # Invariant: word is always less than or equal to number of words in words_list\r\n for word in words_list:\r\n # Invariant: letter is always less than or equal to the number of letters in word\r\n for letter in word:\r\n # if the letter is uppercase, add a '*' symbol in front of the letter and transform the lowercase of that letter into morse code\r\n try:\r\n if not letter.isupper():\r\n #transform the letter into morse code\r\n morse_word = morse_word + Morse_dict[letter] + ' '\r\n else:\r\n letter = letter.lower()\r\n morse_word = morse_word +'*'+ Morse_dict[letter] + ' '\r\n except:\r\n ##letter not found\r\n excp2 = False\r\n why = letter + ', '\r\n excpwhy = 'warning: {} not found and not printed'.format(why)\r\n morse_hidden_file.write(morse_word)\r\n morse_word = ''\r\n morse_hidden_file.write(' ')\r\n excp = True\r\n morse_hidden_file.write('\\n')\r\n else:\r\n morse_hidden_file.write('\\n')\r\n E2M_file.close()\r\n morse_hidden_file.close()\r\n # if excp is True, that means all went right, if it is False, that means that something went wrong\r\n if excp:\r\n if excp2:\r\n shell_connect.write('\\nEnglish to Morse sucessful!','STRING')\r\n else:\r\n shell_connect.write('\\nEnglish to Morse partially sucessful, some symbols not found were ignored.','KEYWORD')\r\n print('\\nThe file {} was created.'.format(filename[:-4]+'_hidden.txt'))\r\n else:\r\n shell_connect.write('\\nEnglish to Morse failed. '+excpwhy+'.\\n','COMMENT')",
"def morse_text(self, code):\n self.output.clear()\n text = code.split()\n for item in text:\n for keys, values in morse.items():\n if item == values:\n self.output.append(keys)\n return \"\".join(self.output)",
"def text_morse(self, code):\n self.output.clear()\n text = list(code)\n for item in text:\n for data in morse:\n if item == data:\n self.output.append(morse[data])\n return \" \".join(self.output)",
"def binary_Morse_to_text(text,morse_dict=AlfabetMorsa):\n\n word = \"\"\n for dl in range(0, len(text)):\n # print(tekst[dl])\n for key in morse_dict.keys():\n if morse_dict[key] == text[dl]:\n word = word + str(key)\n break\n return word",
"def test_string_to_morse():\n test_list = [\n (\"Sofia\", \"... --- ..-. .. .- \"),\n (\"We the people\",\n \".-- . \" +\n \" - .... . \" +\n \" .--. . --- .--. .-.. . \"),\n (\"SOPHIA\", \"... --- .--. .... .. .- \"),\n (\"EUGENIA\", \". ..- --. . -. .. .- \"),\n ]\n\n for txt, code in test_list:\n\n print 'encode: ', txt\n encoded = encode(txt)\n print encoded\n print len(code), len(encoded)\n assert code == encoded\n\n print 'decode: ', code\n decoded = decode(code)\n print decoded\n assert decoded == txt.upper()",
"def encode_morse(plaintext):\r\n if not isinstance(plaintext, str):\r\n return \"Plaintext is not a string!\"\r\n # Having confirmed it's a string, we convert it to uppercase - this will leave numbers and special characters untouched\r\n if not plaintext.isupper():\r\n plaintext_copy = plaintext.upper() #We don't want to mutate the input\r\n else:\r\n plaintext_copy = str(plaintext)\r\n plaintext_copy = whitespace_sorter(plaintext_copy) \r\n ciphertext = \"\" #This also has the effect of returning an empty string if an empty string is the input\r\n #We then do the actual translation by simply looking up the dictionary value\r\n for character in plaintext_copy:\r\n if character not in plaintext_characters:\r\n return \"ERROR: You can't encode the following character: \" + character\r\n ciphertext += morse_dict[character]\r\n return ciphertext[:-1] #Remove trailing /s\r",
"def decode_morse(ciphertext):\r\n if not isinstance(ciphertext, str):\r\n return \"Ciphertext is not a string!\"\r\n ciphertext_copy = str(ciphertext)\r\n if len(ciphertext) == 0: #Accounts for empty string\r\n return \"\"\r\n if ciphertext_copy[-1] != \" \":\r\n ciphertext_copy += \" \" #Accounts for user variation in final trailing whitespace - we need this final whitespace for the dictionary to work\r\n #This also has the effect of returning nonsense characters we can't decode later on\r\n plaintext = \"\" #Empty string solution\r\n morse_char = \"\" #This variable will hold each letter/character's Morse code\r\n for character in ciphertext_copy:\r\n if character == \" \": #Spaces are letter delimiters\r\n morse_char += character\r\n if morse_char in ciphertext_characters:\r\n plaintext += plaintext_characters[ciphertext_characters.index(morse_char)]\r\n morse_char = \"\" #Reset the holding variable\r\n else:\r\n return \"ERROR: I can't decode the following character: \" + morse_char + \"\\nYour decoded message thus far is: \" + whitespace_sorter(plaintext)\r\n #The nature of this return statement allows tests via assertion, but will also respond to print statements accordingly.\r\n else:\r\n morse_char += character #If it's not a letter delimiter, continue building the letter/character Morse code\r\n plaintext = whitespace_sorter(plaintext)\r\n return plaintext",
"def morseDecode(inputStringList):\r\n\treturn ''.join(MORSETRANSLATION.get(i.upper()) for i in inputStringList)",
"def _create_morse_code_audio(self, text):\n # The Morse-sender-dictionary letter keys are lower-case letters.\n lctext = text.lower()\n # Replace any newline characters with a space character.\n lctext = lctext.replace('\\n', ' ')\n # Loop and convert characters to Morse code audio.\n # All characters that are not in the Morse-sender-dictionary\n # and are not either a space or a tab character are discarded.\n silence_count = 0\n for c in lctext:\n if c in MorseCodeSender.MORSE_SENDER_DICT:\n code = MorseCodeSender.MORSE_SENDER_DICT[c]\n for dotdash in code:\n if dotdash == '.':\n # The symbol is a dot.\n self.sample_buffer.extend(self.dot_sample_buffer)\n else:\n # The symbol is a dash.\n self.sample_buffer.extend(self.dash_sample_buffer)\n # After each dot or dash, add one dot-duration of silence.\n self.sample_buffer.extend(self.silence_1_sample_buffer)\n # After each character, add 2 more dot-durations of silence\n # resulting in three dot-durations of silence after a letter.\n self.sample_buffer.extend(self.silence_2_sample_buffer)\n silence_count = 3\n else:\n # The letter is not in the Morse code dictionary. If the\n # letter is a space character or tab character, then make\n # sure there are 7 dot-durations of silence to create the\n # proper separation between words.\n if c == ' ' or c == '\\t':\n silence_length = 7 - silence_count\n if silence_length > 3:\n self.sample_buffer.extend(self.silence_4_sample_buffer)\n silence_length -= 4\n if silence_length > 1:\n self.sample_buffer.extend(self.silence_2_sample_buffer)\n silence_length -= 2\n if silence_length > 0:\n self.sample_buffer.extend(self.silence_1_sample_buffer)\n silence_length -= 1\n silence_count = 0",
"def decode_morsecode(s):\n return ''.join(map(lambda l: MORSE_MAPPING.get(l, '?'), filter(None,\n reduce(lambda acc, n: acc + [' '] + n, [w.split(SHORT_PAUSE) for w in s.split(LONG_PAUSE)]))))",
"def Morse2English():\r\n \r\n count = 1\r\n excp = True\r\n excp2 = True\r\n excp3 = True\r\n global tree\r\n global root\r\n \r\n # creating the binary tree with the letters and symbols from the MorseTable file\r\n # also, if the variable tree is false, it means that the user have not used the Morse2English function yet, so,\r\n ## we need to create the binary tree, otherwise, if it is true, the user have already created the tree,\r\n ## so it is not necessary to create it again\r\n # But, if the use changed the morse symbols, it is necessary to clean the tree and build it again\r\n \r\n if newM:\r\n Morse_Table = open('new_MorseTable.txt','r')\r\n else:\r\n if not tree:\r\n try:\r\n Morse_Table = open('MorseTable.txt','r')\r\n except:\r\n shell_connect.write('MorseTable.txt not found. Download it to continue.\\n','COMMENT')\r\n excp3 = False\r\n if excp3:\r\n if not tree:\r\n # for each line (which has a letter and a morse code), insert the letter into the binary tree using the morse code as a path\r\n for line in Morse_Table:\r\n root.insert(line[2:-1],line[0])\r\n Morse_Table.close()\r\n tree = True\r\n #c_m2e = False\r\n ## take the name of the file (with morse code) to translate the code into english letters\r\n #try until the user enter a file that exists\r\n while True:\r\n try:\r\n filename = input('Please, write the name of the file (with extension) which has the morse code: \\n')\r\n hidden_morse_file = open(filename,'r')\r\n break\r\n except:\r\n shell_connect.write('File does not exist, try again.\\n','COMMENT')\r\n # creates the file which is going to have the message in english letters\r\n unhide_letter_file = open(filename[:-4]+'_unhidden.txt','w')\r\n # for each line of the file, take each morse code \"path\", for each morse code \"path\", go to the binary tree and\r\n # find which letter that morse code path represents\r\n # Invariant: line is always less than or equal to the number of lines in hidden_morse_file\r\n for line in hidden_morse_file:\r\n while line[0] == ' ':\r\n unhide_letter_file.write(' ')\r\n line = line[1:]\r\n morse_letter = line.split(' ')\r\n morse_letter[-1] = morse_letter[-1][:-1]\r\n if morse_letter != ['']:\r\n # if the user made a mistake and chose a file which is already in english letters, an error message is shown\r\n if morse_letter[0][0] == var1 or morse_letter[0][0] == var2 or morse_letter[0][0] == '*':\r\n # for each dot/dash in the file, go to the binary tree and find what the correspondent\r\n # letter is and write it in the new file\r\n # Invariant: hidden_letter_path is always less than or equal to the number of dashes and dots in morse_letter\r\n for hidden_letter_path in morse_letter:\r\n try:\r\n if hidden_letter_path != '':\r\n # if it has a '*' symbol in front, print the letter capitalized\r\n if hidden_letter_path [0] != '*':\r\n unhide_letter_file.write(root.find_letter(hidden_letter_path))\r\n else:\r\n new_letter = root.find_letter(hidden_letter_path[1:])\r\n unhide_letter_file.write(new_letter.upper())\r\n else:\r\n count += 1\r\n if count%2 == 0:\r\n # that's the end of a word, print one space\r\n unhide_letter_file.write(' ')\r\n else:\r\n pass\r\n except:\r\n # morse code not found\r\n shell_connect.write('Morse code not found.\\n','COMMENT')\r\n excp2 = False\r\n break\r\n excpwhy = 'Something is wrong with the morse code.'\r\n unhide_letter_file.write('\\n')\r\n excp = True\r\n else:\r\n excp = False\r\n shell_connect.write('\\nThis is not a morse code file!\\n','COMMENT')\r\n excpwhy = 'The file is not a morse code file.'\r\n break\r\n else:\r\n unhide_letter_file.write('\\n')\r\n hidden_morse_file.close()\r\n unhide_letter_file.close()\r\n if excp:\r\n if excp2:\r\n shell_connect.write('\\nMorse to English sucessful!','STRING')\r\n else:\r\n shell_connect.write('\\nMorse to English partially sucessful, some morse codes not found were ignored.','KEYWORD')\r\n print('\\nThe file {} was created.'.format(filename[:-4]+'_unhidden.txt'))\r\n else:\r\n shell_connect.write('\\nMorse to English failed. {}\\n'.format(excpwhy),'COMMENT')",
"def morseCodeTest():\r\n\r\n\thello = ['....','.','.-..','.-..','---']\r\n\tprint(morseDecode(hello))",
"def mcc():\n morse = {\"A\": \".-\",\n \"B\": \"-...\",\n \"C\": \"-.-.\",\n \"D\": \"-..\",\n \"E\": \".\",\n \"F\": \"..-.\",\n \"G\": \"--.\",\n \"H\": \"....\",\n \"I\": \"..\",\n \"J\": \".---\",\n \"K\": \"-.-\",\n \"L\": \".-..\",\n \"M\": \"--\",\n \"N\": \"-.\",\n \"O\": \"---\",\n \"P\": \".--.\",\n \"Q\": \"--.-\",\n \"R\": \".-.\",\n \"S\": \"...\",\n \"T\": \"-\",\n \"U\": \"..-\",\n \"V\": \"...-\",\n \"W\": \".--\",\n \"X\": \"-..-\",\n \"Y\": \"-.--\",\n \"Z\": \"--..\",\n \"0\": \"-----\",\n \"1\": \".----\",\n \"2\": \"..---\",\n \"3\": \"...--\",\n \"4\": \"....-\",\n \"5\": \".....\",\n \"6\": \"-....\",\n \"7\": '--...',\n \"8\": \"---..\",\n \"9\": \"----.\",\n \".\": \".-.-.-\",\n ',': \"--..--\"}\n\n print(morse[input('enter character to be converted').upper()])\n\n print(\n f'{morse[input(\"1:\").upper()]} '\n f'{morse[input(\"2:\").upper()]} '\n f'{morse[input(\"3:\").upper()]} '\n f'{morse[input(\"4:\").upper()]} '\n f'{morse[input(\"5:\").upper()]} '\n f'{morse[input(\"6:\").upper()]}')",
"def translator(filename: str, outfile):\r\n progname = filename[:-3]\r\n vm_code = parser(filename)\r\n for line in vm_code:\r\n out_line = trans_line(line, progname)\r\n outfile.write(out_line) # write out_line to file\r",
"def encode_file_using_codes(file_name, letter_codes):\r\n contents = \"\"\r\n with open(file_name) as f:\r\n contents = f.read()\r\n file_name_encoded = file_name + \"_encoded\"\r\n with open(file_name_encoded, 'w') as fout:\r\n for c in contents:\r\n fout.write(letter_codes[c])\r\n print(\"Wrote encoded text to {}\".format(file_name_encoded))",
"def decode_file_using_codes(file_name_encoded, letter_codes):\r\n contents = \"\"\r\n with open(file_name_encoded) as f:\r\n contents = f.read()\r\n file_name_encoded_decoded = file_name_encoded + \"_decoded\"\r\n codes_to_letters = {v: k for k, v in letter_codes.items()}\r\n with open(file_name_encoded_decoded, 'w') as fout:\r\n num_decoded_chars = 0\r\n partial_code = \"\"\r\n while num_decoded_chars < len(contents):\r\n partial_code += contents[num_decoded_chars]\r\n num_decoded_chars += 1\r\n letter = codes_to_letters.get(partial_code)\r\n if letter:\r\n fout.write(letter)\r\n partial_code = \"\"\r\n print(\"Wrote decoded text to {}\".format(file_name_encoded_decoded))",
"def file_preprocessing(input_file, output_file):\n # print(\"processing file \" + input_file)z\n # replace the punctuations with space\n replace_punctuation = str.maketrans(string.punctuation, ' '*len(string.punctuation))\n # stemming\n stemmer = PorterStemmer()\n\n with open(input_file, 'r', encoding='utf-8', errors='replace') as inFile, open(output_file,'w') as outFile:\n for line in inFile:\n # replace punctuations\n # convert camel case into space separated\n # convert snake case into space separated\n # remove language keywords\n custom_stopwords = [\"ENDCOND\",\"PVSCL\", \"IFCOND\", \"EVAL\", \"ENDCOND\", \"ELSECOND\", \"ELSEIFCOND\", \"WINDOW\", \"FUNCTION\",\n \"CALLBACK\", \"ABWA\", \"ERROR\", \"TODO\", \"RESOLVE\", \"DOCUMENT\", \"CLASS\", \"LINE\", \"ELEMENT\", \"UTILS\",\n \"NEW\", \"IS\", \"EMPTY\",\"ANNOTATIONS\",\"ANNOTATION\",\"UTILS\",\"CURRENT\",\"TEXT\",\"GET\",\"NAME\",\"LISTERNER\",\n \"ADD\", \"EVENT\", \"CREATE\",\"FOR\", \"FIND\", \"LENGTH\", \"USER\", \"VALUE\", \"ALERT\", \"ALERTS\", \"ID\", \"HANDLER\",\n \"MESSAGE\", \"GROUP\", \"RETRIEVE\", \"MANAGER\", \"LANGUAGE\", \"CONTENT\", \"INIT\"]\n line_witout_puncs = ' '.join([snake_to_spaces(camel_to_spaces(word))\n for word in line.translate(replace_punctuation).split()\n if len(word) >=4 and word not in stopwords.words('english') #and #word.upper() not in (name.upper() for name in custom_stopwords)\n and word not in all_keywords])\n\n\n # stemming\n # singles = []\n # for plural in line_witout_puncs.split():\n # try:\n # singles.append(stemmer.stem(plural))\n # except UnicodeDecodeError:\n # print(plural)\n\n # line_stemmed = ' '.join(singles)\n # print(line_stemmed, file=outFile)\n print(line_witout_puncs.encode(\"utf-8\"), file=outFile)",
"def newMorseTable():\r\n excp3 = True\r\n global var1\r\n global var2\r\n global newM\r\n global dictionary\r\n global tree\r\n global root\r\n \r\n try:\r\n Morse_Table = open('MorseTable.txt','r')\r\n except:\r\n shell_connect.write('MorseTable.txt not found. Download it to continue.\\n','COMMENT')\r\n excp3 = False\r\n if excp3:\r\n var1 = '*'\r\n # The symbol cannot be '*' because that is the symbol that allow the computer recognizes capital letters\r\n # The symbol must be just one character\r\n while var1 == '*' or len(var1)>1:\r\n var1 = input(\"\\nPlease enter the first symbol in the morse code: \")\r\n if var1[0] == '*':\r\n shell_connect.write(\"\\nThe symbol must be different from '*'!\\n\",'COMMENT')\r\n elif len(var1)>1:\r\n shell_connect.write(\"\\nThe symbol must be ONE character!\\n\",'COMMENT')\r\n var2 = var1\r\n ## The symbols must be different from each other\r\n while var2 == var1 or var2[0] == '*' or len(var2)>1:\r\n var2 = input(\"\\nPlease enter the second symbol in the morse code: \")\r\n if var2 == var1:\r\n shell_connect.write('\\nThe second symbol must be different from the first one!\\n','COMMENT')\r\n elif var2 == '*':\r\n shell_connect.write(\"\\nThe symbol must be different from '*'!\\n\",'COMMENT')\r\n elif len(var2)>1:\r\n shell_connect.write(\"\\nThe symbol must be ONE character!\\n\",'COMMENT')\r\n else:\r\n pass\r\n # another Morse Table file is created using the new symbols\r\n new_Morse_Table = open('new_MorseTable.txt','w')\r\n # Invariant: line is always less than or equal to the number of lines in the Morse_Table file\r\n for line in Morse_Table:\r\n new_Morse_Table.write(line[:2])\r\n # Invariant: i is always a dash or a dot\r\n for i in line[2:-1]:\r\n if i == '.':\r\n new_Morse_Table.write(var1)\r\n elif i == '-':\r\n new_Morse_Table.write(var2)\r\n else:\r\n pass\r\n new_Morse_Table.write('\\n')\r\n Morse_Table.close()\r\n new_Morse_Table.close()\r\n # if the user changed the symbols, the variable newM is going to be True\r\n newM = True\r\n # If the user change the morse symbols, it is going to be necessary to change the dictionary\r\n #and the binary tree, so their boolean variables have to be set to false\r\n dictionary = False\r\n tree = False\r\n root = Node(' ')\r\n # you change the morse symbols, you have to go immediately to the E2M function\r\n # because the other morse code symbols wont work anymore\r\n # until you changed it to the same symbols like before\r\n print(\"\\n\\tEnglish to Morse function: \\n\")\r\n English2Morse()",
"def get_table_by_text(text: str) -> Dict[str, str]:\n for morse_table in MORSE_TABLES:\n letter_to_check = _get_first_letter_in_text(text)\n if letter_to_check.upper() in morse_table.keys():\n return morse_table\n if letter_to_check in morse_table.values():\n return MORSE_EN_CODE_DICT\n raise LanguageDoseNotSupported",
"def applyCoder(text, coder):\n res=''\n for ch in text:\n if ch in string.ascii_lowercase:\n res = res + coder[ch]\n elif ch in string.ascii_uppercase:\n res = res + coder[ch]\n else:\n res = res + ch\n return res",
"def code_mapper(file, idx):\n with open('./I94_SAS_Labels_Descriptions.SAS') as f:\n f_content = f.read()\n f_content = f_content.replace('\\t', '')\n f_content2 = f_content[f_content.index(idx):]\n f_content2 = f_content2[:f_content2.index(';')].split('\\n')\n f_content2 = [i.replace(\"'\", \"\") for i in f_content2]\n dic = [i.split('=') for i in f_content2[1:]]\n dic = dict([i[0].strip(), i[1].strip()] for i in dic if len(i) == 2)\n return dic",
"def encode(audio, video, output):\n check_call([\"mencoder\", \"-audiofile\", audio, \"-oac\", \"lavc\", \"-ovc\",\n \"lavc\", video, \"-o\", output], stdin=PIPE, stdout=PIPE, stderr=STDOUT)",
"def translate_files(input_file, output_file, translate_dict, delete_symbols):\n\n for line in input_file:\n result = translate(line, translate_dict, delete_symbols)\n output_file.write(result)",
"def semcor2R(args):\r\n input_files = list_files(*args.input_files)\r\n output_file = Path(args.output_file)\r\n senses = args.sense\r\n multiword = senses or args.multiword\r\n if senses and output_file == output_default / 'semcor2r.csv':\r\n output_file = output_default / 'semcor2r_semtagged.csv'\r\n with output_file.open('w') as file:\r\n file.write(\"\\t\".join([\"concordance\", \"file\", \"token_id\", \"wordform\", \"PoS\", \"lemma\"]))\r\n if senses:\r\n file.write('\\twnsn\\tsense_key')\r\n file.write('\\n')\r\n for input_file in input_files:\r\n corpus_file = CorpusFile(input_file)\r\n for word in corpus_file.text.find_all(['wf', 'punc']):\r\n index = 0\r\n if word.name == 'punc':\r\n index += 1\r\n continue\r\n if not multiword:\r\n for token in Token.from_tag(word).get_components():\r\n token_id = '/'.join([corpus_file.shortname, token.wordform, str(index)])\r\n if args.verbose and type(token.status)==tuple:\r\n report_token_status(token, token_id)\r\n file.write('\\t'.join([corpus_file.concordance, corpus_file.shortname, token_id, token.wordform, token.pos, token.lemma]) + '\\n')\r\n index += 1\r\n else:\r\n token = Token.from_tag(word)\r\n if senses and not token.has_senses:\r\n continue\r\n token_id = '/'.join([corpus_file.shortname, token.wordform, str(index)])\r\n if args.verbose and type(token.status)==tuple:\r\n report_token_status(token, token_id)\r\n file.write('\\t'.join([corpus_file.concordance, corpus_file.shortname, token_id, token.wordform, token.pos, token.lemma]))\r\n index += 1\r\n if senses:\r\n file.write('\\t{}\\t{}'.format(token.wnsn, token.sense_key))\r\n file.write('\\n')\r\n print('File \"{}\" processed.'.format(input_file.stem))",
"def semcor2run(args):\r\n input_files = list_files(*args.input_files)\r\n output_dir = Path(args.output_dir)\r\n if not output_dir.is_dir():\r\n try:\r\n output_dir.mkdir()\r\n except:\r\n print('Invalid output directory name. Files will be stored in default directory.', file = stderr)\r\n output_dir = output_default / 'running_text'\r\n output_dir.mkdir()\r\n multiword = args.multiword\r\n for input_file in input_files:\r\n corpus_file = CorpusFile(input_file)\r\n filename = corpus_file.shortname + '.txt'\r\n dirname = output_dir / corpus_file.concordance\r\n if not dirname.exists():\r\n dirname.mkdir()\r\n output_file_name = dirname / filename\r\n with output_file_name.open('w') as output_file:\r\n for paragraph in corpus_file.text.find_all('p'):\r\n for word in paragraph.find_all(['wf', 'punc']):\r\n if word.name == 'punc':\r\n output_file.write(word.string)\r\n elif not multiword:\r\n for token in Token.from_tag(word).get_components():\r\n output_file.write(' {}/{}'.format(token.wordform, token.pos))\r\n else:\r\n token = Token.from_tag(word)\r\n output_file.write(' {}/{}'.format(token.wordform, token.pos))\r\n output_file.write('\\n')",
"def decrypt(self, input_file, output_file):\n self.key %= 26\n plaintext = \"\"\n with open(input_file) as encrypted_text:\n self.text = encrypted_text.read()\n for char in self.text:\n if char.isalpha():\n if 65 <= ord(char) <= 90: #char is between A and Z\n if ord(char) - self.key >= 65: #65 = ord('A')\n plaintext += chr(ord(char) - self.key)\n elif ord(char) - self.key < 65:\n plaintext += chr(ord(char) - self.key + 26)\n if 97 <= ord(char) <= 122:\n if ord(char) - self.key >= 97:\n plaintext += chr(ord(char) - self.key)\n elif ord(char) - self.key < 97:\n plaintext += chr(ord(char) - self.key + 26)\n else:\n plaintext += char\n\n decrypted_file = open(output_file, 'w')\n decrypted_file.write(plaintext)\n print \"Created file: ces-decrypted.txt\"",
"def main():\r\n filename = sys.argv[1]\r\n codes = huffman_letter_codes_from_file_contents(filename)\r\n print(codes)\r\n encode_file_using_codes(filename, codes)\r\n decode_file_using_codes(filename + \"_encoded\", codes)",
"def main():\n\n args = get_args()\n input = args.input\n output = args.outfile\n howler = \"\"\n \n try:\n with open(input) as file:\n howler = file.read().rstrip().upper()\n \n if output:\n with open(output, 'w') as file:\n file.write(howler)\n else:\n print(howler)\n \n except IOError:\n howler = input.upper()\n print(howler)",
"def transcodetomp4(file_in, logger):\n\n import subprocess\n\n file_out = file_in.replace('.mkv', '.mp4')\n\n if os.path.isfile('/usr/bin/avconv'):\n\n convert_command = 'su securityspy -c \\\"/usr/bin/avconv -i \"{}\" -f mp4 -vcodec copy -acodec '.format(file_in) + \\\n 'libfaac -b:a 112k -ac 2 -y \"{}\"'.format(file_out) + \"\\\"\"\n\n try:\n subprocess.check_call(convert_command, shell=True)\n except subprocess.CalledProcessError:\n logger.error(\"The command to transcode: {} --- failed...\".format(convert_command))\n return file_in\n\n return file_out\n else:\n return file_in\n # fin",
"def _transform(self, original, code):\n\n msg = list(original)\n for k in range(len(msg)):\n\n if msg[k].isupper():\n j = ord(msg[k]) - ord(\"A\") # Determining correct index for new character.\n msg[k] = code[j]\n\n return \"\".join(msg)"
]
| [
"0.66860163",
"0.6397452",
"0.6393827",
"0.63401276",
"0.62655395",
"0.62346786",
"0.61487025",
"0.60022104",
"0.5958334",
"0.59045285",
"0.5865525",
"0.5748146",
"0.56561875",
"0.56045705",
"0.54538393",
"0.5409502",
"0.53338265",
"0.5236571",
"0.523552",
"0.52099687",
"0.5173674",
"0.5103941",
"0.5099868",
"0.5088851",
"0.50230914",
"0.50227284",
"0.4986342",
"0.4980368",
"0.49792537",
"0.49774864"
]
| 0.8085192 | 0 |
Determine the appropriate format string for the pie chart percentage label | def pie_pct_format(value):
return '' if value < 7 else '{}'.format(value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_text(self):\n # If percentage is zero, round it\n if self.percentage == 0:\n self.percentage = str(\"< 0.01\")\n text = str(self.percentage) + \"% in \" + self.name\n return text",
"def represent_percent(self, dlpkgs, numpkgs, length):\n if dlpkgs == 0:\n return '{0:^{1}s}'.format('-', length)\n else:\n return '{0:^{1}s}'.format(self.pct(dlpkgs, numpkgs), length)",
"def get_text(self):\n # If percentage is zero, round it\n if self.percentage == 0:\n self.percentage = str(\"< 0.01\")\n text = str(self.percentage) + \"% on line \" + self.line\n return text",
"def get_text(self):\n # If percentage is zero, round it\n if self.percentage == 0:\n self.percentage = str(\"< 0.01\")\n text = str(self.percentage) + \"% in \" + self.name\n text += \" [\" + self.file_name + \"]\"\n return text",
"def format(self, value):\r\n metric = {\r\n \"degree\": u'\\N{DEGREE SIGN}',\r\n \"percent\": u'%',\r\n \"meter\": u'm',\r\n \"klux\": u' L',\r\n \"none\": ''\r\n }[self.unit]\r\n if self.unit == \"percent\":\r\n value *= 100.0\r\n return u\"{:3.1f}{}\".format(value, metric)",
"def format_perc_table(df_perc_diff, label_projs):\n df_perc = df_perc_diff.pivot(index='project', columns='thresh').round(2)['perc_above']\n df_perc.rename(index=label_projs, inplace=True)\n df_perc = df_perc.applymap(lambda x : '{:.2f}%'.format(x))\n return df_perc",
"def format_percentage(num):\n return \"{}%\".format(num)",
"def calc_percent(byte_counter, data_len):\n if data_len is None or not data_len:\n # case where length is not present in metadata or zero\n return '---.-%'\n return '%6s' % ('%3.1f%%'\n % (float(byte_counter) / float(data_len) * 100.0))",
"def __str__(self):\n return \"{:.2f}%\".format(self.load())",
"def readable_percent(value, d):\n return \"%s %%\" % (str(round(100.0*float(value), int(d))))",
"def _get_label(self, division, elapsed):\n\t\tsecs = int(elapsed) % 60\n\n\t\tmins = int(elapsed) / 60\n\t\thrs = mins / 60\n\t\tdays = hrs / 24\n\t\tweeks = days / 7\n\n\t\tif division >= 7 * 24 * 60 * 60: # >1wk divisions: show weeks\n\t\t\treturn '%dw' % weeks\n\t\telif division >= 24 * 60 * 60: # >24h divisions: show days\n\t\t\treturn '%dd' % days\n\t\telif division >= 60 * 60: # >1h divisions: show hours\n\t\t\treturn '%dh' % hrs\n\t\telif division >= 5 * 60: # >5m divisions: show minutes\n\t\t\treturn '%dm' % mins\n\t\telif division >= 1: # >1s divisions: show minutes:seconds\n\t\t\treturn '%dm%02ds' % (mins, secs)\n\t\telif division >= 0.1: # >0.1s divisions: show seconds.0\n\t\t\treturn '%d.%ss' % (secs, str(int(10.0 * (elapsed - int(elapsed)))))\n\t\telif division >= 0.01: # >0.1s divisions: show seconds.0\n\t\t\treturn '%d.%02ds' % (secs, int(100.0 * (elapsed - int(elapsed))))\n\t\telse: # show seconds.00\n\t\t\treturn '%d.%03ds' % (secs, int(1000.0 * (elapsed - int(elapsed))))",
"def as_percentages(self):\n if self.e_pct is None:\n self.calculate_percentages()\n\n score_str = 'E/I: ' + str(self.e_pct) + '%/' + str(self.i_pct) + '%; '\n score_str += 'N/S: ' + str(self.n_pct) + '%/' + str(self.s_pct) + '%; '\n score_str += 'F/T: ' + str(self.f_pct) + '%/' + str(self.t_pct) + '%; '\n score_str += 'J/P: ' + str(self.j_pct) + '%/' + str(self.p_pct) + '%'\n return score_str",
"def count_string(counts_series, num_patients):\n output = \"\"\n for label in counts_series.keys():\n output += label + \" = %.0f\" % counts_series[label]\n percent = (counts_series[label] / num_patients) * 100\n output += ' (%.1f%%)\\n' % percent\n return output[:-1] # take off the final \\n",
"def func(pct, allvals):\n return str(format(round(pct/100.*np.sum(allvals), 2),\".2f\")) + \"€\"",
"def percent(value):\n return f\"{value:,.2f} %\"",
"def percent_formats(self) -> localedata.LocaleDataDict:\n return self._data['percent_formats']",
"def _percent(self, lines_total, lines_covered):\n\n if lines_total == 0:\n return '0.0'\n return str(float(float(lines_covered) / float(lines_total)))",
"def __str__(self):\n width = self.width\n if self.length == 0:\n percent = 1\n else:\n percent = max(self.value, 0) / self.length\n pg_char = self.pg_char\n ending = ' ' + (self.str_time_remaining()\n if self.timer else '{0} of {1} complete'.format(\n self.value, self.length))\n if width - len(ending) < 10 or self.has_output:\n self.width = 0\n if self.timer:\n return \"{0:.0%} complete: {1}\".format(\n percent, self.str_time_remaining())\n return \"{0:.0%} complete\".format(percent)\n num_of_chars = int(percent * self.width)\n pbar = '[' + pg_char*num_of_chars + \\\n ' '*(self.width-num_of_chars) + ']' + ending\n\n str_percent = ' {0:.0%} '.format(percent)\n\n return pbar[:self.width//2 - 2] \\\n + str_percent + pbar[self.width//2+len(str_percent) - 2:]",
"def represent_total_percent(self, length):\n numpkgs = self.totals['numpkgs']\n dlpkgs = self.totals['dlpkgs']\n return self.represent_percent(dlpkgs, numpkgs, length)",
"def format_percentage_json(num):\n return {\"Units\": \"%\", \"Value\": num}",
"def percent_str(part, total):\n return str(round(100 * float(part) / float(total), 2)) + '%'",
"def percentageColor(self):\n tup = None\n if self.percent >= 0.7:\n tup = Livebar.colors['green']\n elif self.percent >= 0.3 and self.percent < 0.7:\n tup = Livebar.colors['orange']\n elif self.percent >= 0.0 and self.percent < 0.3:\n tup = Livebar.colors['red']\n return tup",
"def value_to_percent(value):\n return ...",
"def unit_of_measurement(self):\n return \"%\"",
"def unit_of_measurement(self) -> str:\n return \"%\"",
"def unit_of_measurement(self) -> str:\n return \"%\"",
"def convert_percent_str(x):\n if x:\n return float(str(x).strip(\"% \"))\n return 0",
"def as_counts_and_pcts(self):\n if self.e_pct is None:\n self.calculate_percentages()\n\n score_str = 'E: ' + str(self.e_score) + '(' + str(self.e_pct) + '%)/'\n score_str += 'I: ' + str(self.i_score) + '(' + str(self.i_pct) + '%) - '\n score_str += 'N: ' + str(self.n_score) + '(' + str(self.n_pct) + '%)/'\n score_str += 'S: ' + str(self.s_score) + '(' + str(self.s_pct) + '%) - '\n score_str += 'F: ' + str(self.f_score) + '(' + str(self.f_pct) + '%)/'\n score_str += 'T: ' + str(self.t_score) + '(' + str(self.t_pct) + '%) - '\n score_str += 'J: ' + str(self.j_score) + '(' + str(self.j_pct) + '%)/'\n score_str += 'P: ' + str(self.p_score) + '(' + str(self.p_pct) + '%)'\n return score_str",
"def get_percentile_label(percentile: Union[float, int, str]) -> str:\n if isinstance(percentile, str):\n percentile = float(percentile)\n if percentile == round(percentile):\n percentile = round(percentile)\n if isinstance(percentile, float):\n percentile_str = f'{percentile:.2f}'\n # Add `...` to the label if the percentile value changed after rounding\n if float(percentile_str) != percentile:\n percentile_str += '...'\n percentile = percentile_str\n return f'{PERCENTILE} {percentile}'",
"def test_get_dup_labels_perc_all_valid(self):\r\n\r\n # No duplicates\r\n\r\n labels = ['seq1', 'seq2', 'seq3', 'seq4']\r\n\r\n actual_perc, dups = get_dup_labels_perc(labels)\r\n\r\n expected_perc = \"%1.3f\" % 0.0\r\n\r\n self.assertEqual(actual_perc, expected_perc)\r\n\r\n expected_dups = []\r\n\r\n self.assertEqual(dups, expected_dups)"
]
| [
"0.71216166",
"0.69674236",
"0.69005656",
"0.67435724",
"0.6666876",
"0.66204953",
"0.6384511",
"0.6376195",
"0.6370158",
"0.6365492",
"0.63114715",
"0.6275148",
"0.62578493",
"0.62169856",
"0.616937",
"0.61532456",
"0.6144259",
"0.6139536",
"0.6124622",
"0.6106336",
"0.6047762",
"0.6036133",
"0.60337895",
"0.5961062",
"0.59597194",
"0.59597194",
"0.59451115",
"0.59422517",
"0.59331363",
"0.5924785"
]
| 0.78149563 | 0 |
Create a valid authentication header either from username/password or a token if any were provided; return an empty dict otherwise | def create_auth_header(username=None, password=None, token=None, tenant=None):
headers = {}
if username and password:
credentials = b64encode(
'{0}:{1}'.format(username, password).encode('utf-8')
).decode('ascii')
headers = {
'Authorization':
'Basic ' + credentials
}
elif token:
headers = {'Authentication-Token': token}
if tenant:
headers['Tenant'] = tenant
return headers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _auth_headers(self):\n if self.token_str:\n return {'Authorization': 'Bearer {}'.format(self.token_str)}\n else:\n return {}",
"def make_auth_headers(email: str, password: str) -> Dict:\n auth_string = f\"{email}:{password}\"\n auth_binary = base64.b64encode(auth_string.encode())\n return {\"Authorization\": \"Basic \" + auth_binary.decode()}",
"def header_token(token):\n return {'Authorization': '{0} {1}'.format('JWT', token)}",
"def get_auth_header(self) -> Mapping[str, Any]:\n return {}",
"def auth_headers(current_user_token: str) -> Dict[str, str]:\n return {\"Authorization\": f\"Bearer {current_user_token}\"}",
"def create_auth_header(api_token):\n return {'Authorization': f'token {api_token}'}",
"def get_auth_header(self):\n if not self.verify():\n return None\n\n auth_val = self.encode_auth_header_val()\n if not auth_val:\n return None\n\n return {'Authorization': auth_val.replace('\\n', '')}",
"def parse_auth(header):\r\n try:\r\n method, data = header.split(None, 1)\r\n if method.lower() == 'basic':\r\n #TODO: Add 2to3 save base64[encode/decode] functions.\r\n user, pwd = touni(base64.b64decode(tob(data))).split(':',1)\r\n return user, pwd\r\n except (KeyError, ValueError):\r\n return None",
"def get_auth_headers(self) -> Dict:\n if self.__access_token:\n return {\n 'Authorization': self.__access_token,\n 'Api-Key': self.__api_key,\n 'X-Client-Name': __client_name__,\n 'X-Client-Version': __version__,\n 'X-Min-Version': __min_engine_version__\n }\n elif self.__license_key and self.__email and self.__password:\n return {\n 'Authorization': self.__calculate_basic_auth_value(),\n 'License-Key': self.__license_key,\n 'Api-Key': self.__api_key,\n 'X-Client-Name': __client_name__,\n 'X-Client-Version': __version__,\n 'X-Min-Version': __min_engine_version__\n }\n else:\n raise ValueError('Credentials are not configured')",
"def get_token_header(cls, token):\n if token is EMPTY_KNOX_TOKEN:\n return {}\n else:\n return {'HTTP_AUTHORIZATION': 'token {}'.format(token)}",
"def _make_header(self, token: str) -> dict:\n\n header = HEADER.copy()\n # modify to represent how to build the header\n header['Authorization'] = f\"Bearer {token}\"\n\n return header",
"def build_header(token: str = None):\n return {\n \"Content-Type\": \"application/json\",\n \"X-Auth-Token\": token or get_project_token(),\n }",
"def authentication_request():\n # Get the access token from the header\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n access_token = auth_header.split(' ')[1]\n except IndexError:\n return {\"message\": \"Token is malformed\"}, status.HTTP_401_UNAUTHORIZED\n else:\n access_token = ''\n\n return access_token",
"def token_header(token):\n message = '{token}:ignored'.format(token=token)\n return {'Authorization': 'Basic {code}'.format(\n code=base64.b64encode(message))}",
"def _token_header(token=None):\n if not token:\n return None\n\n message = '{token}:Ignored'.format(token=token)\n headers = {'Authorization': 'Basic {code}'.format(\n code=base64.b64encode(message))}\n return headers",
"def parse_basic_auth_header(self, request: Request) -> Tuple[str, str]:\n token = self.get_auth_token(request, \"Basic\")\n try:\n username, password = base64.b64decode(token.encode('ascii')).decode('utf-8').split(':')\n except Exception:\n raise TokenInvalidException\n return username, password",
"def _get_headers() -> dict:\n api_key = API_KEY_CRED_LOADER.load_credentials()\n api_secret = API_SECRET_CRED_LOADER.load_credentials()\n return {\"Authorization\": \"sso-key {}:{}\".format(api_key, api_secret)}",
"def get_auth_headers():\n\n auth_type = \"Basic\"\n if request.headers.get('UseXBasic'):\n auth_type = \"XBasic\"\n\n return {\n 'WWW-Authenticate': '%s realm=\"Login Required\"' % auth_type\n }",
"def buildHeader(self):\n if self.key:\n userString = self.user+b\":\"+self.key\n else:\n userString = self.user+b\":\"\n \n encodedUserString = b64encode(userString)\n decodedUserString = encodedUserString.decode(\"ascii\")\n self.basicAuthHeader = {\"Authorization\": \"Basic \" + decodedUserString}",
"def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n return \"authorization_header_missing\"\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n return \"invalid_header\"\n elif len(parts) == 1:\n return \"invalid_header\"\n elif len(parts) > 2:\n return \"invalid_header\"\n\n token = parts[1]\n return token",
"def authTest(token=None):\n if not token:\n token = bottle.request.get_header('X-Auth-Token')\n\n data = bottle.request.json\n if not token:\n user = data.get('user')\n password = data.get('password')\n\n query = odict(bottle.request.query.items())\n if not user or not password:\n user = query.get('user')\n password = query.get('password')\n\n if not token and (not user or not password):\n bottle.abort(400, \"Authentication credentials missing.\")\n\n result = odict(token=token,\n user=user,\n password=password,\n headers=odict(bottle.request.headers.items()),\n query=query,\n data=data,\n )\n return result",
"def authentication_header():\n with open(KEY_FILE, \"r\") as file:\n header = json.load(file)\n return header",
"def get_token_auth_header(params):\n auth = get_token(params)\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\", \"description\": \"Authorization header must start with Bearer\"}, 401)\n\n if len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\", \"description\": \"Token not found\"}, 401)\n\n if len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\", \"description\": \"Authorization header must be Bearer token\"}, 401)\n\n token = parts[1]\n return token",
"def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n print(auth)\n\n if not auth:\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n \n parts = auth.split()\n \n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token",
"def init_headers(token):\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + token\n }\n return headers",
"def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, 401)\n elif auth.split()[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, 401)\n elif len(auth.split()) == 1:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be include type and token.'\n }, 401)\n elif len(auth.split()) > 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be Bearer token.'\n }, 401)\n else:\n token = auth.split()[1]\n return token",
"def get_api_header(token):\n return {\n 'Authorization': 'Token ' + str(token)}",
"def get_token_auth_header():\n # Get authorization form request header\n auth = request.headers.get('Authorization', None)\n # Check if authorization header exists\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is MISSING!'\n }, abort(401))\n # If bearer token, then first part of string = 'bearer'\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\"'\n }, abort(401))\n # Authorization header string length must be 2\n elif len(parts) != 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be a BEARER token'\n }, abort(401))\n\n token = parts[1]\n return token",
"def create_authorization_header(username, password, creation=None, nonce=None):\n \n digest, creation, nonce = create_password_digest(password, creation, nonce)\n \n header = 'UsernameToken Username=\"%s\", PasswordDigest=\"%s\", Created=\"%s\", Nonce=\"%s\"' % (\n username, digest, creation, nonce\n )\n \n return (header, creation, nonce)",
"def authenticate(self, request):\n auth = get_authorization_header(request).split()\n\n if not auth or auth[0].lower() != b\"basic\":\n return None\n\n if len(auth) == 1:\n raise AuthenticationFailed(\n \"Invalid Basic authorization header. No credentials provided.\"\n )\n elif len(auth) > 2:\n raise AuthenticationFailed(\n \"Invalid Basic authorization header. Credentials string should not contain spaces.\"\n )\n\n try:\n auth_parts = (\n base64.b64decode(auth[1]).decode(HTTP_HEADER_ENCODING).split(\":\")\n )\n except (TypeError, UnicodeDecodeError, binascii.Error):\n raise AuthenticationFailed(\n \"Invalid Basic authorization header. Credentials not correctly base64 encoded.\"\n )\n\n username, password = (\n auth_parts if len(auth_parts) >= 2 else (auth_parts[0], None)\n )\n if password:\n if settings.API_BASIC_AUTH:\n return DRFBasicAuthentication().authenticate_credentials(\n username, password, request\n )\n else:\n raise AuthenticationFailed(\n \"Basic authorization with a password is not allowed; use an API token instead.\"\n )\n else:\n # Treat the username as a token; pass it on to `knox.TokenAuthentication`\n token = username.encode(\"utf-8\")\n return TokenAuthentication().authenticate_credentials(token)"
]
| [
"0.72454095",
"0.7221148",
"0.71215636",
"0.7103372",
"0.7097047",
"0.6983618",
"0.6947449",
"0.69248074",
"0.69133794",
"0.687671",
"0.6841492",
"0.683843",
"0.6820057",
"0.6804868",
"0.67723274",
"0.6760751",
"0.667692",
"0.66578674",
"0.6633724",
"0.6623119",
"0.6621205",
"0.6609578",
"0.6601041",
"0.65804183",
"0.65694696",
"0.6564613",
"0.6560475",
"0.65529925",
"0.6526494",
"0.65264267"
]
| 0.77106714 | 0 |
Sets the setting of this Software. | def setting(self, setting):
self._setting = setting | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_setting(self, name, value):\n w = self.choices['which']\n if w == 'global_default':\n return self.settings.set_global_default(name, value)\n elif w == 'project_default':\n return self.settings.set_project_default(name, value)\n elif w == 'global_variant':\n return self.settings.set_global_variant(self.choices['variant'],\n name, value)\n elif w == 'project_variant':\n return self.settings.set_project_variant(self.choices['variant'],\n name, value)\n elif w == 'project_package_default':\n return self.settings.set_project_package_default(\n self.choices['package'], name, value)\n elif w == 'project_package_variant':\n return self.settings.set_project_package_variant(\n self.choices['package'], self.choices['variant'], name, value)\n elif w == 'project_package_target':\n return self.settings.set_project_package_target(\n self.choices['package'], self.choices['target'], name, value)\n else:\n raise AssertionError(w)",
"def settings(self, value):\n self._settings = value",
"def do_set(self, setting: str):\n if self._real_module is None:\n print(\"Set command depends on using a module. See 'use' for help.\")\n return\n\n splitted_input = setting.split()\n if len(splitted_input) < 2:\n print(\"Invalid argument to split\")\n else:\n key = splitted_input[0]\n value = \" \".join(splitted_input[1:])\n self._real_module.set(key, value)",
"def set_setting(self, setting, value):\n return self.do_rpc(\"set_setting\", key=key, value=value)",
"def set_settings_devices(self):\n self.set_thermostat, self.set_humidifier, self.set_sprinklers, self.set_ventilation = self.settings[3:]",
"def SetSettings (self, settings) :\n\t\treturn self.run(\"SetSettings\", settings)",
"def integration_setting(self, integration_setting):\n\n self._integration_setting = integration_setting",
"def __setattr__(self, name, value):\n # Can't set namespace variables\n if name.startswith('_'):\n raise ValueError('Settings cannot start with an underscore')\n\n if name in self._settings:\n # Set an existing setting's value\n if isinstance(value, Setting):\n raise ValueError('Settings cannot be redefined')\n self._settings[name].set(value)\n else:\n # Create a new setting\n if not isinstance(value, Setting):\n raise ValueError(\n 'Settings must be defined before they can be assigned',\n )\n self._settings[name] = value",
"def set(self, key, value):\n key_str = self.optionxform(key)\n value_str = to_unicode(value)\n self._cache.pop(key_str, None)\n option_key = {\n 'product': self.product,\n 'section': self.name,\n 'option': key_str,\n }\n try:\n setting = ProductSetting(self.env, option_key)\n except ResourceNotFound:\n if value is not None:\n # Insert new record in the database\n setting = ProductSetting(self.env)\n setting._data.update(option_key)\n setting._data['value'] = value_str\n self.env.log.debug('Writing option %s', setting._data)\n setting.insert()\n else:\n if value is None:\n # Delete existing record from the database\n # FIXME : Why bother with setting overriden\n self.overridden[key] = True\n setting.delete()\n else:\n # Update existing record\n setting._data['value'] = value\n setting.update()",
"def settings(self, settings):\n\n self._settings = settings",
"def set_config(self): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['ambient_min'] = self.ambient_min.get()\n self.settings['soil_1'] = self.smc1.get()\n self.settings['soil_2'] = self.smc2.get()\n self.settings['soil_3'] = self.smc3.get()\n self.settings['soil_4'] = self.smc4.get()\n self.settings['overhead_level'] = self.overhead_level.get()\n\n # Save settings to config file in case of reboot / power-loss\n print \"UPDATING SETTINGS FILE\"\n with open(self.settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!",
"def Set(self,value):\n if value:\n onoff = 0x01\n else:\n onoff = 0x00\n self.Bus.Write_uInt8(self.Address,0x20+self.Pin, onoff)",
"def __setattr__(self, name: str, value: Any) -> None:\n if name.isupper():\n self._settings[name] = value\n super().__setattr__(name, value)",
"def set_flag(self, set_flag):\n\n self._set_flag = set_flag",
"def setValue(self, valueName, valueSetting):\n\t\tself.settings[valueName][0] = valueSetting",
"def __setitem__(self, key, value):\n self.settings.set(key, value)",
"async def __set(ctx: commands.Context, setting: str, value: str):\n settings = ctx.bot.app_settings\n valid_settings = settings.USER_FACING_SETTINGS\n found = [key for key in valid_settings if key.startswith(setting)]\n if len(found) == 1:\n setting = found[0]\n else:\n await ctx.send(f'Invalid setting \"{setting}\". Valid choices are:'\n f' [{\", \".join(valid_settings)}]')\n return\n\n valid_values = settings.get_valid_values(setting)\n if not settings.set(setting, value, valid_values):\n if valid_values:\n await ctx.send(f'invalid value, use [{\", \".join(valid_values)}]')\n return\n\n # Reload library when needed\n if setting in ['language', 'system', 'mode']:\n ctx.bot.reload_library()\n\n # Reload cogs when needed\n if setting in ['system', 'mode']:\n try:\n logging.info('%s triggered a cogs reload.', ctx.author)\n await ctx.send(f'{ctx.message.author.mention} triggered a mode change.')\n ctx.bot.reload_cogs()\n except (commands.ExtensionNotLoaded,\n commands.ExtensionNotFound,\n commands.NoEntryPointError,\n commands.ExtensionFailed):\n # Inform User that reload was not successful\n message_error = 'Error on reloading cogs.'\n logging.error(message_error)\n await ctx.send(message_error)\n return\n\n message_success = f'{setting} changed to \"{value}\".'\n logging.info(message_success)\n await ctx.send(message_success)\n return",
"def Set(self,value):\n self.Bus.Write_uInt8(self.Address,0x50+self.Pin,value)",
"def set(name):\n set_config(name)",
"def setSettings(self):\r\n # 根据默认参数设置,根据是否使用config来设定参数\r\n if self.__config__[\"config\"] is False:\r\n self.json.setChecked(False)\r\n self.json_path.setEnabled(False)\r\n self.json_select.setEnabled(False)\r\n\r\n tem = [self.l_line, self.r_line, self.p_line]\r\n [x.setEnabled(True) for x in tem]\r\n\r\n for key, value in self.elements.items():\r\n key.setEnabled(True)\r\n\r\n # 设定程序或者json文件的路径\r\n if self.__config__[\"exe\"]:\r\n self.executable.setText(self.__config__[\"exe\"])\r\n else:\r\n self.executable.clear()\r\n if self.__config__[\"config_path\"]:\r\n self.json_path.setText(self.__config__[\"config_path\"])\r\n else:\r\n self.json_path.clear()\r\n \r\n # 设定其他参数\r\n if self.__config__[\"paras\"]:\r\n for key, value in self.__config__[\"paras\"].items():\r\n element = self.parameters[key]\r\n if value not in (\"::\", \"\"):\r\n element.setEnabled(True)\r\n\r\n key1 = get_key_by_value(self.elements, element)\r\n if key1:\r\n key1.setEnabled(True)\r\n key1.setChecked(True)\r\n\r\n if isinstance(element, QLineEdit):\r\n element.setText(value)\r\n elif isinstance(element, QComboBox):\r\n index = element.findText(value, Qt.MatchFixedString)\r\n if index >= 0:\r\n element.setCurrentIndex(index)",
"def units_setting(self, units_setting):\n\n self._units_setting = units_setting",
"def setSetting(self,settingId, value):\r\n settingXmlFile = xbmc.translatePath('special://profile/addon_data/' + self.addonId + '/settings.xml')\r\n tree = ET.parse(settingXmlFile)\r\n root = tree.getroot()\r\n srchStr = './/setting[@id=\"' + settingId + '\"]'\r\n root.find(srchStr).set('value', str(value))\r\n tree.write(settingXmlFile, method = 'xml')\r\n pass",
"def change_setting(self, key, val):\n if isinstance(val, bool):\n payload = 'on' if val else 'off'\n else:\n payload = val\n return self._request('post',\n 'fifo_command.php?cmd={}%20{}'.format(key,\n payload))",
"def set_setting(key, value):\n with sublime_haskell_settings as settings:\n settings[key] = value\n get_settings().set(key, value)\n save_settings()",
"def set_param(self, param_value):\n with open(\"settings.txt\", \"r\") as f:\n filedata = f.read()\n settings = [_.split(\"=\") for _ in filedata.split(\"\\n\")]\n for setting in settings:\n if len(setting) < 2: # if blank line\n continue\n if setting[0] == self.param:\n\n setting[1] = param_value\n\n with open(\"settings.txt\", \"w\") as f:\n for setting in settings:\n if len(setting) < 2: # if blank line\n continue\n f.write(setting[0] + \"=\" + setting[1] + \"\\n\")",
"def set_param(self):\n with open(\"settings.txt\", \"r\") as f:\n filedata = f.read()\n settings = [_.split(\"=\") for _ in filedata.split(\"\\n\")]\n for setting in settings:\n if len(setting) < 2: # if blank line\n continue\n if setting[0] == self.param:\n setting[1] = str(self.param_value)\n\n with open(\"settings.txt\", \"w\") as f:\n for setting in settings:\n if len(setting) < 2: # if blank line\n continue\n f.write(setting[0] + \"=\" + setting[1] + \"\\n\")",
"def set(self, value: str):\n self.openshift.do_action(\"set\", [\"env\", self.environ.resource_type, self.deployment, f\"{self.name}={value}\"])\n # pylint: disable=protected-access\n self.environ.wait_for_resource(self.deployment)",
"def set_pref(self, name, value):\r\n pass",
"def set_setpoint(self, value):\n value = value * self.conf['PSICONV']\n log.debug(\"Set pressure regulator %d to %f\", self.id_, value)\n self.synth.cbox.set_dac(self.id_, value)",
"def set_hardware_control(self, value):\n self.widgets['hardware_control'].setChecked(value)\n self._under_hardware_control = value"
]
| [
"0.6803539",
"0.6797325",
"0.6656029",
"0.65470356",
"0.6305723",
"0.62939876",
"0.6245357",
"0.6234438",
"0.6230997",
"0.62296474",
"0.6160666",
"0.61122584",
"0.60903925",
"0.60892445",
"0.6085018",
"0.6071015",
"0.60507715",
"0.6010427",
"0.5959746",
"0.5945527",
"0.5885073",
"0.5868644",
"0.58473307",
"0.5834229",
"0.5832652",
"0.5827985",
"0.5808228",
"0.580046",
"0.5754249",
"0.5749256"
]
| 0.7087973 | 0 |
y_hat is the output tensor from the network y is the label tensor (no embedding) returns the mask to use for negating the padding | def label_mask(y, y_hat):
mask = torch.ones(len(y), np.shape(y)[1])
for i in range(len(y[0])):
try:
y_hat_index = np.where(y_hat[:,i]==1)[0][0]
y_index = np.where(y[:,i]==1)[0][0]
index = max(y_hat_index, y_index)
mask[index:, i] = 0
except:
pass
return mask | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_loss(self, y, y_hat):\r\n return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=y_hat))",
"def softmax_cross_entropy_loss(self, y, y_hat):\n batch_size = y.shape[0]\n return -(y - y_hat) / batch_size",
"def backward(cls, y, y_hat):\n sum_of_outputs = np.sum(y_hat, axis=1)\n correct_outputs = y_hat[range(len(y)), y]\n\n d_layer = y_hat\n d_layer[range(len(y)), y] = (correct_outputs - sum_of_outputs) / sum_of_outputs\n d_layer /= len(y)\n return d_layer",
"def softmax_cross_entropy(y, y_hat):\n loss = cross_entropy(y, softmax(y_hat))\n\n filter_ = ~tf.math.is_finite(loss)\n replace_ = tf.zeros_like(loss)\n\n return tf.where(filter_, replace_, loss)",
"def mask_nan(y_true, y_pred):\n notnan_true = K.cast(~tf.math.is_nan(y_true), \"float32\")\n num_notnan = K.sum(K.flatten(notnan_true))\n y_pred = tf.math.multiply(y_pred, notnan_true)\n\n # We need to use tf.where to do this substitution, because when trying to\n # multiply with just the notnan_true masks,\n # NaN*0 = NaN, so NaNs are not removed\n y_true = K.cast(\n tf.where(~tf.math.is_nan(y_true), y_true, tf.zeros_like(y_true)), \"float32\"\n )\n return y_pred, y_true, num_notnan",
"def mask_sensitivy(Y, Y_pred): \n cm = confusion_matrix(Y.reshape(-1), Y_pred.reshape(-1))\n return sensitivity(cm)",
"def attention_bias_ignore_padding(memory_padding):\n\tret = tf.multiply(memory_padding, -1e18)\n\treturn tf.expand_dims(tf.expand_dims(ret, axis=1), axis=1)",
"def _generator_loss(self, y_hat):\n\n l = -tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.zeros(tf.shape(y_hat)),logits = y_hat ))\n print('generatorloss shape',tf.shape(l))\n return l",
"def loss_hole(self, mask, y_true, y_pred):\n return self.l1((1-mask) * y_true, (1-mask) * y_pred)",
"def decoding_error_rate(y, y_hat):\n return tf.reduce_mean(tf.math.abs(y - binarize(y_hat)), axis=[1])",
"def loss_func(y, y_hat):\n # TODO: implement the function. \n # Consider these functions: `tf.square`, `tf.reduce_sum`\n\n loss = tf.reduce_sum(tf.square(y_hat - y))\n\n return loss",
"def false_neg(yt, yp) -> Any:\n from keras import backend as K\n return K.sum(K.cast((1 - yp) * (0 + yt) > 0.5, 'float')) / K.maximum(1.0, K.sum(0 + yt))",
"def smooth_negative_labels(y):\n return y + (np.random.random(y.shape) * 0.1)",
"def binary_cross_entropy(Y, Y_hat, epsilon=1e-8):\n \n m = Y.shape[0]\n \n # make data safe\n Y_hat = np.clip(Y_hat, a_min=epsilon, a_max=(1 - epsilon))\n \n # calc cost\n cost = (1 / m) * np.nansum(-np.log(Y_hat) * Y - np.log(1 - Y_hat) * (1 - Y))\n cost = np.squeeze(cost)\n \n # calc gradient\n dY_hat = -(Y / Y_hat) + (1 - Y) / (1 - Y_hat)\n \n return cost, dY_hat",
"def mask_nan_keep_loss(y_true, y_pred):\n y_pred, y_true, num_notnan = mask_nan(y_true, y_pred)\n loss = K.sum((K.flatten(y_pred) - K.flatten(y_true)) ** 2) / num_notnan\n return tf.where(~tf.math.is_nan(loss), loss, 0)",
"def L1(yhat, y):\n\n loss = np.sum(np.abs(y - yhat))\n \n return loss",
"def get_positive_mask(labels):\n batch_shape = tf.shape(labels)[0]\n mask_1 = tf.logical_not(get_negative_mask(labels))\n mask_2 = tf.logical_not(tf.eye(batch_shape, dtype=tf.bool))\n return tf.logical_and(mask_1, mask_2)",
"def forward(self, y, mask_y, h):\n y = y.transpose(1, 0) # batch x T x dim\n\n mask_y = mask_y.transpose(1, 0) # batch x T\n Wy = torch.bmm(y, self.W_y.unsqueeze(0).expand(y.size(0), *self.W_y.size())) # batch x T x dim\n Wh = torch.mm(h, self.W_h) # batch x dim\n\n M = torch.tanh(Wy + Wh.unsqueeze(1).expand(Wh.size(0), y.size(1), Wh.size(1))) # batch x T x dim\n alpha = torch.bmm(M, self.W_alpha.unsqueeze(0).expand(y.size(0), *self.W_alpha.size())).squeeze(-1) # batch x T\n\n alpha = alpha + (-1000.0 * (1. - mask_y)) # To ensure probability mass doesn't fall on non tokens\n alpha = F.softmax(alpha, dim=1)\n r = torch.bmm(alpha.unsqueeze(1), y).squeeze(1) # batch x dim\n\n h_star = self.combine_last(r, h)\n\n return h_star, alpha",
"def weighted_bce(y_hat, target, labels, weight_multiplier):\n weight = labels.clone()\n weight.requires_grad = False\n weight[weight < 1] = 2\n weight[weight == 3] = 4\n weight = 2 / weight\n\n # apply mask\n weight = weight * weight_multiplier\n return F.binary_cross_entropy(y_hat, target, weight)",
"def cross_entropy(y, y_hat):\n return -tf.math.log(\n tf.gather_nd(y_hat, tf.reshape(y, (-1, 1)), batch_dims=1)\n )",
"def forward(cls, y, y_hat):\n return - np.sum(y * np.ma.log(y_hat).filled(0))",
"def loss(y, y_hat):\n residual = 0\n for i,yy in enumerate(y_hat):\n residual += (y[i] - yy)**2\n\n return residual",
"def TripletSemiHardLoss(y_true, y_pred, device, margin=1.0):\n\n labels, embeddings = y_true, y_pred\n\n # Reshape label tensor to [batch_size, 1].\n lshape = labels.shape\n labels = torch.reshape(labels, [lshape[0], 1])\n\n pdist_matrix = pairwise_distance_torch(embeddings, device)\n\n # Build pairwise binary adjacency matrix.\n adjacency = torch.eq(labels, labels.transpose(0, 1))\n # Invert so we can select negatives only.\n adjacency_not = adjacency.logical_not()\n\n batch_size = labels.shape[0]\n\n # Compute the mask.\n pdist_matrix_tile = pdist_matrix.repeat(batch_size, 1)\n adjacency_not_tile = adjacency_not.repeat(batch_size, 1)\n\n transpose_reshape = pdist_matrix.transpose(0, 1).reshape(-1, 1)\n greater = pdist_matrix_tile > transpose_reshape\n\n mask = adjacency_not_tile & greater\n\n # final mask\n mask_step = mask.to(dtype=torch.float32)\n mask_step = mask_step.sum(axis=1)\n mask_step = mask_step > 0.0\n mask_final = mask_step.reshape(batch_size, batch_size)\n mask_final = mask_final.transpose(0, 1)\n\n adjacency_not = adjacency_not.to(dtype=torch.float32)\n mask = mask.to(dtype=torch.float32)\n\n # negatives_outside: smallest D_an where D_an > D_ap.\n axis_maximums = torch.max(pdist_matrix_tile, dim=1, keepdim=True)\n masked_minimums = (\n torch.min(\n torch.mul(\n pdist_matrix_tile - axis_maximums[0],\n mask,\n ),\n dim=1,\n keepdim=True,\n )[0]\n + axis_maximums[0]\n )\n negatives_outside = masked_minimums.reshape([batch_size, batch_size])\n negatives_outside = negatives_outside.transpose(0, 1)\n\n # negatives_inside: largest D_an.\n axis_minimums = torch.min(pdist_matrix, dim=1, keepdim=True)\n masked_maximums = (\n torch.max(\n torch.mul(\n pdist_matrix - axis_minimums[0],\n adjacency_not,\n ),\n dim=1,\n keepdim=True,\n )[0]\n + axis_minimums[0]\n )\n negatives_inside = masked_maximums.repeat(1, batch_size)\n\n semi_hard_negatives = torch.where(\n mask_final,\n negatives_outside,\n negatives_inside,\n )\n\n loss_mat = margin + pdist_matrix - semi_hard_negatives\n\n mask_positives = adjacency.to(dtype=torch.float32) - torch.diag(\n torch.ones(batch_size)\n ).to(device)\n num_positives = mask_positives.sum()\n\n triplet_loss = (\n torch.max(\n torch.mul(loss_mat, mask_positives),\n torch.tensor([0.0]).to(device),\n )\n ).sum() / num_positives\n triplet_loss = triplet_loss.to(dtype=embeddings.dtype)\n return triplet_loss",
"def reduce_y(y, mask):\n return y",
"def get_y_true(self):\n return self.label[:self.steps * self.batch_size, :]",
"def cross_entropy_loss(self, y, y_hat):\n if y.ndim == 1:\n batch_size = 1\n else:\n batch_size = y.shape[0]\n delta = 1e-7\n return -np.sum(y * np.log(y_hat + delta)) / batch_size",
"def pwcnet_loss(y, y_hat_pyr, opts):\n # Use a different norm based on the training mode we're in (training vs fine-tuning)\n norm_order = 2 if opts['loss_fn'] == 'loss_multiscale' else 1\n\n with tf.name_scope(opts['loss_fn']):\n total_loss = 0.\n _, gt_height, _, _ = tf.unstack(tf.shape(y))\n\n # Add individual pyramid level losses to the total loss\n for lvl in range(opts['pyr_lvls'] - opts['flow_pred_lvl'] + 1):\n _, lvl_height, lvl_width, _ = tf.unstack(tf.shape(y_hat_pyr[lvl]))\n\n # Scale the full-size groundtruth to the correct lower res level\n scaled_flow_gt = tf.image.resize_bilinear(y, (lvl_height, lvl_width))\n scaled_flow_gt /= tf.cast(gt_height / lvl_height, dtype=tf.float32)\n\n # Compute the norm of the difference between scaled groundtruth and prediction\n if opts['use_mixed_precision'] is False:\n y_hat_pyr_lvl = y_hat_pyr[lvl]\n else:\n y_hat_pyr_lvl = tf.cast(y_hat_pyr[lvl], dtype=tf.float32)\n norm = tf.norm(scaled_flow_gt - y_hat_pyr_lvl, ord=norm_order, axis=3)\n level_loss = tf.reduce_mean(tf.reduce_sum(norm, axis=(1, 2)))\n\n # Scale total loss contribution of the loss at each individual level\n total_loss += opts['alphas'][lvl] * tf.pow(level_loss + opts['epsilon'], opts['q'])\n\n return total_loss",
"def _discriminator_loss(self, y, y_hat):\n\n l1 = tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.ones(tf.shape(y)),logits = y)\n l2 = tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.zeros(tf.shape(y_hat)),logits = y_hat)\n l = tf.reduce_mean(l1+l2)\n print('_discriminator_loss shape,', tf.shape(l))\n return l",
"def masked_bilinearsigmoid_cross_entropy(preds, labels, mask, negative_mask):\r\n\r\n loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)\r\n mask += negative_mask\r\n mask = tf.cast(mask, dtype=tf.float32)\r\n # mask /= tf.reduce_mean(mask)\r\n mask = tf.reshape(mask, shape=[79924])\r\n loss *= mask\r\n return tf.reduce_mean(loss)",
"def masked_softmax(input_layer,n_nodes, batch_size):\n mask_lower = K.theano.tensor.tril(K.ones((n_nodes, n_nodes)))\n mask_upper = \\\n K.theano.tensor.triu(-100. * K.ones((n_nodes, n_nodes)), 1)\n mask_layer = mask_lower * input_layer + mask_upper\n mask_layer = mask_layer + 0 * K.eye(n_nodes)[0:n_nodes, 0:n_nodes]\n mask_layer = \\\n K.reshape(mask_layer, (batch_size * n_nodes, n_nodes))\n softmax_layer = K.softmax(mask_layer)\n output_layer = K.reshape(softmax_layer, (batch_size, n_nodes, n_nodes))\n return output_layer"
]
| [
"0.62096536",
"0.6175731",
"0.61632735",
"0.61611694",
"0.5887584",
"0.588355",
"0.5860502",
"0.5847948",
"0.5832571",
"0.5807852",
"0.5803117",
"0.5799957",
"0.5775149",
"0.5761061",
"0.57461977",
"0.57444036",
"0.57373565",
"0.57236624",
"0.57219243",
"0.5694064",
"0.56818753",
"0.5679184",
"0.5670833",
"0.5670602",
"0.5668295",
"0.5656638",
"0.56507146",
"0.562358",
"0.5620189",
"0.56187844"
]
| 0.7869807 | 0 |
x is the training data tensor (no embedding) returns the mask to use for negating the padding effect on the attention add this mask before taking the softmax! | def attention_mask(x):
mask = torch.zeros(len(x), len(x[0]))
for i in range(len(x)):
try:
index = np.where(x[i]==1)[0][0]
mask[i][index:] = -np.inf
except:
pass
return mask | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def attention_mask(model, x):\n config = model.config\n input_mask = model.inputs[\"input_mask\"]\n final_mask = model.builder.customOp(opName=\"AttentionMask\",\n opVersion=1,\n domain=\"ai.graphcore\",\n inputs=[input_mask, x],\n attributes={\"dataType\": model.config.popart_dtype})[0]\n final_mask = model.detach(final_mask)\n return final_mask",
"def softmax_masked(x, mask=None):\n if mask is None:\n sm = K.softmax(x)\n else:\n casted_mask = K.cast(mask, dtype=x.dtype)\n # subtract min first so that, after masking, the masked elements are the smallest\n z = (x - K.min(x, axis=1, keepdims=True)) * casted_mask\n # Now subtract max of non-masked elements, and 0 for masked, in order\n # to prevent exponentiating a very large number\n e = K.exp(z - K.max(z, axis=1, keepdims=True)) * casted_mask\n s = K.sum(e, axis=1, keepdims=True) + K.epsilon()\n sm = e/s\n return sm",
"def masked_softmax(input_layer,n_nodes, batch_size):\n mask_lower = K.theano.tensor.tril(K.ones((n_nodes, n_nodes)))\n mask_upper = \\\n K.theano.tensor.triu(-100. * K.ones((n_nodes, n_nodes)), 1)\n mask_layer = mask_lower * input_layer + mask_upper\n mask_layer = mask_layer + 0 * K.eye(n_nodes)[0:n_nodes, 0:n_nodes]\n mask_layer = \\\n K.reshape(mask_layer, (batch_size * n_nodes, n_nodes))\n softmax_layer = K.softmax(mask_layer)\n output_layer = K.reshape(softmax_layer, (batch_size, n_nodes, n_nodes))\n return output_layer",
"def call(self, x, mask=None):\n u_i = K.tanh(K.dot(x, self.W) + self.b)\n v = K.squeeze(K.dot(u_i, K.expand_dims(self.u)), axis=-1)\n attention = softmax_masked(v, mask)\n\n return attention",
"def channel_padding(x):\n #keras.backend.concatenate([x, tf.zeros_like(x)], axis=-1)\n x0=keras.layers.Activation('sigmoid')(x)\n return keras.backend.concatenate([x, x0], axis=-1)",
"def _get_mask(self, x):\n x_mask = Variable(torch.zeros(x.size(0), self.max_seq_len).byte())\n return x_mask.cuda() if self.use_cuda else x_mask",
"def forward(self, x, x_mask):\n x_flat = x.view(-1, x.size(-1))\n scores = self.linear(x_flat).view(x.size(0), x.size(1))\n scores.data.masked_fill_(x_mask.data, -float('inf'))\n alpha = F.softmax(scores)\n return alpha",
"def masked_softmax(X, valid_lens):\n # `X`: 3D tensor, `valid_lens`: 1D or 2D tensor\n if valid_lens is None:\n return nn.functional.softmax(X, dim=-1)\n else:\n shape = X.shape\n if valid_lens.dim() == 1:\n valid_lens = torch.repeat_interleave(valid_lens, shape[1])\n else:\n valid_lens = valid_lens.reshape(-1)\n # On the last axis, replace masked elements with a very large negative\n # value, whose exponentiation outputs 0\n X = sequence_mask(X.reshape(-1, shape[-1]), valid_lens,\n value=-1e6)\n return nn.functional.softmax(X.reshape(shape), dim=-1)",
"def masked_softmax(X, valid_lens):\n # `X`: 3D tensor, `valid_lens`: 1D or 2D tensor\n if valid_lens is None:\n return nn.functional.softmax(X, dim=-1)\n else:\n shape = X.shape\n if valid_lens.dim() == 1:\n valid_lens = torch.repeat_interleave(valid_lens, shape[1])\n else:\n valid_lens = valid_lens.reshape(-1)\n # On the last axis, replace masked elements with a very large negative\n # value, whose exponentiation outputs 0\n X = sequence_mask(X.reshape(-1, shape[-1]), valid_lens,\n value=-1e6)\n return nn.functional.softmax(X.reshape(shape), dim=-1)",
"def predict(self, x):\n if self.training:\n self.eval()\n\n with torch.no_grad():\n output = self.forward(x)\n\n if self.classes > 1:\n probs = torch.softmax(output, dim=1)\n else:\n probs = torch.sigmoid(output)\n\n probs = probs.squeeze(0)\n tf = transforms.Compose(\n [\n transforms.ToPILImage(),\n transforms.Resize(x.size[1]),\n transforms.ToTensor()\n ]\n )\n full_mask = tf(probs.cpu()) \n\n return full_mask",
"def mask_out(self, x, lengths):\n params = self.params\n slen, bs = x.size()\n\n # define target words to predict\n if params.sample_alpha == 0:\n pred_mask = np.random.rand(slen, bs) <= params.word_pred\n pred_mask = torch.from_numpy(pred_mask.astype(np.uint8))\n else:\n x_prob = params.mask_scores[x.flatten()]\n n_tgt = math.ceil(params.word_pred * slen * bs)\n tgt_ids = np.random.choice(len(x_prob), n_tgt, replace=False, p=x_prob / x_prob.sum())\n pred_mask = torch.zeros(slen * bs, dtype=torch.uint8)\n pred_mask[tgt_ids] = 1\n pred_mask = pred_mask.view(slen, bs)\n\n # do not predict padding\n pred_mask[x == params.pad_index] = 0\n pred_mask[0] = 0 # TODO: remove\n\n # mask a number of words == 0 [8] (faster with fp16)\n if params.fp16:\n pred_mask = pred_mask.view(-1)\n n1 = pred_mask.sum().item()\n n2 = max(n1 % 8, 8 * (n1 // 8))\n if n2 != n1:\n pred_mask[torch.nonzero(pred_mask).view(-1)[:n1 - n2]] = 0\n pred_mask = pred_mask.view(slen, bs)\n # assert pred_mask.sum().item() % 8 == 0\n\n # generate possible targets / update x input\n pred_mask = pred_mask.bool()\n _x_real = x[pred_mask]\n if len(_x_real) == 0:\n pred_mask[0, 0] = 1\n _x_real = x[pred_mask]\n _x_rand = _x_real.clone().random_(params.n_words)\n _x_mask = _x_real.clone().fill_(params.mask_index)\n probs = torch.multinomial(params.pred_probs, len(_x_real), replacement=True)\n _x = _x_mask * (probs == 0).long() + _x_real * (probs == 1).long() + _x_rand * (probs == 2).long()\n x = x.masked_scatter(pred_mask, _x)\n\n assert 0 <= x.min() <= x.max() < params.n_words\n assert x.size() == (slen, bs)\n assert pred_mask.size() == (slen, bs)\n\n return x, _x_real, pred_mask",
"def softmax_wt_mask(value, mask):\n numerator = tf.exp(value) * mask\n sum_ = tf.reduce_sum(numerator, 1, keep_dims=True)\n return numerator / sum_ # broadcastable",
"def get_threshold_mask(hparams, x):\n\n axis = list(range(1, x.shape.ndims))\n min_val = tf.reduce_min(x, axis=axis, keepdims=True)\n max_val = tf.reduce_max(x, axis=axis, keepdims=True)\n thresh = min_val + hparams.threshold_factor * (max_val - min_val)\n cond = tf.less(x, thresh)\n return tf.where(cond, tf.zeros(tf.shape(x)), tf.ones(tf.shape(x)))",
"def softmax(x):\r\n sum_c = np.sum(np.exp(x), axis=1)\r\n sum_c = np.expand_dims(sum_c, axis=1)\r\n pred_x = np.divide(np.exp(x), sum_c)\r\n return pred_x",
"def softmax(x):\r\n e_x = np.exp(x - np.expand_dims(np.max(x, axis=-1), axis=-1))\r\n return e_x / np.expand_dims(e_x.sum(axis=-1), axis=-1) # only difference\r",
"def compute_mask(t, padding_idx=0):\n mask = torch.ne(t, padding_idx).float()\n return mask",
"def masked_attention_q(e, padding_mask):\n attn_dist = nn_ops.softmax(e)+1e-8 # take softmax. shape (batch_size, attn_length)\n attn_dist *= padding_mask # apply mask\n masked_sums = tf.reduce_sum(attn_dist, axis=1) # shape (batch_size)\n return attn_dist / tf.reshape(masked_sums, [-1, 1]) # re-normalize",
"def masked_softmax(logits, mask, dim):\n exp_mask = (1 - tf.cast(mask, 'float')) * (-1e30) # -large where there's padding, 0 elsewhere\n masked_logits = tf.add(logits, exp_mask) # where there's padding, set logits to -large\n prob_dist = tf.nn.softmax(masked_logits, dim)\n return masked_logits, prob_dist",
"def masked_softmax(logits, mask, dim):\n exp_mask = (1 - tf.cast(mask, 'float')) * (-1e30) # -large where there's padding, 0 elsewhere\n masked_logits = tf.add(logits, exp_mask) # where there's padding, set logits to -large\n prob_dist = tf.nn.softmax(masked_logits, dim)\n return masked_logits, prob_dist",
"def forward(self, x: Tensor, mask: Tensor) -> Tensor:\n x_norm = self.layer_norm(x)\n h = self.src_src_att(x_norm, x_norm, x_norm, mask)\n h = self.dropout(h) + x\n o = self.feed_forward(h)\n return o",
"def compute_attention_mask(x_mask, mem_mask, x_word_dim, key_word_dim):\r\n if x_mask is None and mem_mask is None:\r\n return None\r\n elif x_mask is None or mem_mask is None:\r\n raise NotImplementedError()\r\n\r\n x_mask = tf.cast(x_mask,dtype=bool)\r\n mem_mask = tf.cast(tf.transpose(mem_mask,perm=[0,2,1]), dtype=bool)\r\n join_mask = tf.logical_and(x_mask, mem_mask)\r\n return join_mask",
"def masked_softmax(tensor, mask):\n tensor_shape = tensor.size()\n reshaped_tensor = tensor.view(-1, tensor_shape[-1])\n\n # Reshape the mask so it matches the size of the input tensor.\n while mask.dim() < tensor.dim():\n mask = mask.unsqueeze(1)\n mask = mask.expand_as(tensor).contiguous().float()\n reshaped_mask = mask.view(-1, mask.size()[-1])\n\n result = nn.functional.softmax(reshaped_tensor * reshaped_mask, dim=-1)\n result = result * reshaped_mask\n # 1e-13 is added to avoid divisions by zero.\n result = result / (result.sum(dim=-1, keepdim=True) + 1e-13)\n\n return result.view(*tensor_shape)",
"def forward(self, x, x_mask):\n # No padding necessary.\n if x_mask.data.sum() == 0:\n return self._forward_unpadded(x, x_mask)\n # Pad if we care or if its during eval.\n if self.padding or not self.training:\n return self._forward_padded(x, x_mask)\n # We don't care.\n return self._forward_unpadded(x, x_mask)",
"def forward(self, x: Tensor) -> Tensor:\n x1 = x[:, 0]\n x2 = x[:, 1]\n features1, features2 = self.encoder(x1), self.encoder(x2)\n features = [features2[i] - features1[i] for i in range(1, len(features1))]\n features.insert(0, features2[0])\n decoder_output = self.decoder(*features)\n masks: Tensor = self.segmentation_head(decoder_output)\n return masks",
"def forward(self, x, mask):\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n #print('encoder')\n #print(x.shape)\n return self.sublayer[1](x, self.feed_forward)",
"def masked_softmax(scores, mask):\r\n numerator = tf.exp(tf.subtract(scores, tf.reduce_max(scores, 1, keep_dims=True))) * mask\r\n denominator = tf.reduce_sum(numerator, 1, keep_dims=True)\r\n weights = tf.div(numerator, denominator)\r\n return weights",
"def mask_logits(x, mask):\n return x + -1e30 * (1 - mask)",
"def _softmax(x):\n e = K.exp(x - K.max(x, axis=-1, keepdims=True))\n s = K.sum(e, axis=-1, keepdims=True)\n return e / s",
"def get_padding_mask(inputs, padding_value=0):\n mask = tf.cast(tf.equal(inputs, padding_value), 'float32') \n mask = mask[:, tf.newaxis, tf.newaxis, :]\n return mask",
"def forward(self, x: Tensor) -> Tensor:\n x1 = x[:, 0]\n x2 = x[:, 1]\n features1, features2 = self.encoder(x1), self.encoder(x2)\n features = [\n torch.cat([features2[i], features1[i]], dim=1)\n for i in range(1, len(features1))\n ]\n features.insert(0, features2[0])\n decoder_output = self.decoder(*features)\n masks: Tensor = self.segmentation_head(decoder_output)\n return masks"
]
| [
"0.7421151",
"0.7217545",
"0.7167106",
"0.7079211",
"0.6706677",
"0.6682437",
"0.66159326",
"0.6599417",
"0.65964437",
"0.65885895",
"0.6551673",
"0.6500141",
"0.6487599",
"0.6473447",
"0.6453691",
"0.64491236",
"0.640918",
"0.6409067",
"0.6409067",
"0.63933337",
"0.63802826",
"0.63471967",
"0.63454926",
"0.63339376",
"0.6333171",
"0.63285935",
"0.6273352",
"0.6258364",
"0.62422335",
"0.62332374"
]
| 0.7250042 | 1 |
Remove securityzone if exist and is not in use | def sec_zone_absent(module, session, endpoint, my_sz):
if not my_sz:
return True, False, {'label': '',
'id': '',
'msg': 'security-zone does not exist'}
if not module.check_mode:
aos_delete(session, endpoint, my_sz['id'])
return True, True, my_sz
return True, False, my_sz | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove(self):\n\n self.call(method='removeZoneRecord', args=[self.domainname, self.subdomain, self.record_id])",
"def SecurityZone(self) -> _n_6_t_7:",
"def SecurityZone(self) -> _n_6_t_7:",
"def sec_zone(module):\n margs = module.params\n\n endpoint = 'blueprints/{}/security-zones'.format(margs['blueprint_id'])\n\n name = margs.get('name', None)\n uuid = margs.get('id', None)\n vni_id = margs.get('vni_id', None)\n vlan_id = margs.get('vlan_id', None)\n\n if vni_id:\n try:\n vni_id = int(vni_id)\n except ValueError:\n module.fail_json(msg=\"Invalid ID: must be an integer\")\n\n errors = validate_vni_id(vni_id)\n\n if errors:\n module.fail_json(msg=errors)\n\n if vlan_id:\n try:\n vlan_id = int(vlan_id)\n except ValueError:\n module.fail_json(msg=\"Invalid ID: must be an integer\")\n\n errors = validate_vlan_id(vlan_id)\n\n if errors:\n module.fail_json(msg=errors)\n\n sz_data = aos_get(margs['session'], endpoint)\n my_sz = {}\n\n if not uuid:\n\n for k, v in sz_data['items'].items():\n if v['label'] == name:\n my_sz = v\n else:\n\n for k, v in sz_data['items'].items():\n if v['id'] == uuid:\n my_sz = v\n\n if margs['state'] == 'absent':\n success, changed, results = sec_zone_absent(module, margs['session'],\n endpoint, my_sz)\n\n elif margs['state'] == 'present':\n success, changed, results = sec_zone_present(module, margs['session'],\n endpoint, my_sz, vni_id,\n vlan_id)\n\n if success:\n module.exit_json(changed=changed, name=results['label'],\n id=results['id'], value=results)\n else:\n module.fail_json(msg=results)",
"def _remove_sandbox(self):\n if self._sandbox.exists():\n shutil.rmtree(str(self._sandbox), ignore_errors=True)",
"def remove_grass_wkspc(gisdbase):\r\n if os.path.exists(gisdbase):\r\n try:\r\n shutil.rmtree(gisdbase, True)\r\n except OSError:\r\n return False\r\n return True",
"def delete_this_region(self):",
"def test_destroy_container_privilege(self):\n pass",
"def remove(name: Optional[str], interactive: bool):\n exists = interactive or name\n\n if not exists:\n return utils.print_help_msg(remove)\n\n if name and interactive:\n return console.print(\"Cannot use both flags at the same time.:x:\")\n\n if name:\n utils.validate_timezone(name)\n\n utils.remove_timezone(interactive, name)",
"def unshare_from_zone(self, zone):\n if isinstance(zone, basestring):\n zone = self.project.get_flow().get_zone(zone)\n zone.remove_shared(self)",
"def remove( self, zone ):\n if zone.space is None:\n raise KeyError( \"zone not in space octree!\" )\n\n # remove zone from space node's contained set\n zone.space.zones.remove( zone )\n\n # set zone's containing space to none\n zone.space = None",
"def remove_ssm(self, ssm_image):\n pass",
"def remove_basic(ctx, global_ip, local_ip):\n\n # Verify the ip address format \n if is_valid_ipv4_address(local_ip) is False:\n ctx.fail(\"Given local ip address {} is invalid. Please enter a valid local ip address !!\".format(local_ip))\n\n if is_valid_ipv4_address(global_ip) is False:\n ctx.fail(\"Given global ip address {} is invalid. Please enter a valid global ip address !!\".format(global_ip))\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n entryFound = False\n table = 'STATIC_NAT'\n key = global_ip\n dataKey = 'local_ip'\n \n data = config_db.get_entry(table, key)\n if data:\n if data[dataKey] == local_ip:\n config_db.set_entry(table, key, None)\n entryFound = True\n\n if entryFound is False:\n click.echo(\"Trying to delete static nat entry, which is not present.\")",
"def remove_fc_zone(connection_info):\n if connection_info:\n vol_type = connection_info.get('driver_volume_type', None)\n if vol_type == 'fibre_channel':\n if connection_info['data'].get('initiator_target_map'):\n zm = create_zone_manager()\n if zm:\n LOG.debug(\"remove_fc_zone connection info: %(conninfo)s.\",\n {'conninfo': connection_info})\n zm.delete_connection(connection_info)",
"def del_reservation(self, src, dst):\n\n # PART 1, TASK 4.1 remove the reservation from the switch, controller and update links capacities.",
"def remove_host_from_zone(self, host, zone):\n _agg = self.verify_zone_exists(zone)\n if not _agg:\n raise NovaNoSuchZone(zone)\n if not self.verify_host_exists(host):\n raise NovaNoSuchHost(host)\n if not self.verify_host_in_zone(host, zone):\n raise NovaHostNotInZone(host, zone)\n _agg.remove_host(host)\n if not zone in self.get_host_zones(host):\n return True\n else:\n print\"Seems like host %s was not added to zone %s\" % (host, zone)\n return False",
"def remove_ca_certs_from_systemwide_ca_store(self):\n\n raise NotImplementedError()",
"def pre_security_group_delete(self, resource_id):\n pass",
"def __removeBackup(self):\n pass #FIXME!!",
"def remove():",
"def purge_files(zone_id, zone_name, files):\n cf = CloudFlare.CloudFlare()\n urls = normalize_urls(zone_name, files)\n click.echo(urls)\n return cf.zones.purge_cache.delete(zone_id, data={'files': urls})",
"def vpc_cleanup(vpcid):\n if not vpcid:\n return\n print('Removing VPC ({}) from AWS'.format(vpcid))\n ec2 = boto3.resource('ec2')\n ec2client = ec2.meta.client\n vpc = ec2.Vpc(vpcid)\n\n os.system('cls' if os.name == 'nt' else 'clear') \n print('#######################################################')\n print('# Hold Onto Your Hat Deletion In Progress........... #')\n print('######################################################')\n # delete any instances\n for subnet in vpc.subnets.all():\n print('Termination In Progress')\n for instance in subnet.instances.all():\n instance.terminate()\n print('Please Wait')\n instance.wait_until_terminated()\n print('Instances Gone')\n\n # delete our security groups\n for sg in vpc.security_groups.all():\n if sg.group_name != 'default':\n sg.delete()\n print('Security Groups Gone')\n\n filter=[{\"Name\": \"vpc-id\", \"Values\": [ vpcid ]}]\n natg = client.describe_nat_gateways(Filter=filter)['NatGateways']\n for nat in natg:\n if not (nat['State'] in [\"deleted\",\"deleting\"]):\n # if not (nat['State'] in [\"deleted\",\"deleting\"]):\n print(\"Deleting NAT gateway {}\".format(nat['NatGatewayId']))\n try:\n client.delete_nat_gateway(NatGatewayId=nat['NatGatewayId'])\n waiter = client.get_waiter('nat_gateway_available')\n waiter.wait(Filters=[\n {\n 'Name': 'state',\n 'Values': \n [\n 'deleted',\n ]\n },{\n 'Name': 'nat-gateway-id',\n 'Values': [\n nat['NatGatewayId'],\n ]\n },\n ])\n except:\n pass\n\n # detach and delete all gateways associated with the vpc\n for gw in vpc.internet_gateways.all():\n vpc.detach_internet_gateway(InternetGatewayId=gw.id)\n gw.delete()\n print(\"Internet Gateway Gone\")\n\n\n rtl = vpc.route_tables.all()\n count = sum(1 for _ in rtl)\n while count > 1:\n\n #delete all route table associations\n for rt in rtl:\n for rta in rt.associations:\n if not rta.main:\n print(\"Deleting route table associations\")\n rta.delete()\n\n for r in rt.routes:\n try:\n x = r.delete()\n print(\" Route Deleted\")\n # print(x)\n except:\n pass\n try:\n rt.delete()\n print(\"Table Deleted\")\n except:\n pass\n \n rtl = vpc.route_tables.all()\n count = sum(1 for _ in rtl)\n\n print('Route Tables Gone')\n\n # delete our endpoints\n for ep in ec2client.describe_vpc_endpoints(Filters=[{\n 'Name': 'vpc-id',\n 'Values': [ vpcid ]\n }])['VpcEndpoints']:\n ec2client.delete_vpc_endpoints(VpcEndpointIds=[ep['VpcEndpointId']])\n print('Endpoints Gone')\n\n\n # delete any vpc peering connections\n for vpcpeer in ec2client.describe_vpc_peering_connections(Filters=[{\n 'Name': 'requester-vpc-info.vpc-id',\n 'Values': [ vpcid ]\n }] )['VpcPeeringConnections']:\n ec2.VpcPeeringConnection(vpcpeer['VpcPeeringConnectionId']).delete()\n print('VPC Connections Gone')\n\n # delete non-default network acls\n for netacl in vpc.network_acls.all():\n if not netacl.is_default:\n netacl.delete()\n\n # delete network interfaces\n for subnet in vpc.subnets.all():\n for interface in subnet.network_interfaces.all():\n interface.delete()\n subnet.delete()\n print('Network Interfaces Gone')\n\n # finally, delete the vpc\n ec2client.delete_vpc(VpcId=vpcid)\n print('##############################')\n print('#### My Work Here Is Done ####')\n print('##############################')",
"def check_exists(name):\n if arcpy.Exists(name):\n arcpy.Delete_management(name)\n return",
"def remove_secgroup(self, name=None):\n if self.cloudman:\n self.cloudman.network.delete_security_group(name)\n g = self.list_secgroups(name=name)\n return len(g) == 0\n else:\n raise ValueError(\"cloud not initialized\")",
"def sec_zone_present(module, session, endpoint, my_sz, vni_id, vlan_id):\n margs = module.params\n\n if not my_sz:\n\n if 'name' not in margs.keys():\n return False, False, {\"msg\": \"name required to create a new \"\n \"security-zone\"}\n\n new_sz = {\"sz_type\": \"evpn\",\n \"label\": margs['name'],\n \"vrf_name\": margs['name']}\n\n if vni_id:\n new_sz[\"vni_id\"] = vni_id\n\n if vlan_id:\n new_sz[\"vlan_id\"] = vlan_id\n\n if not module.check_mode:\n resp = aos_post(session, endpoint, new_sz)\n\n new_sz['id'] = resp['id']\n\n return True, True, new_sz\n\n return True, False, new_sz\n\n else:\n if vni_id or vlan_id:\n\n endpoint_put = \"{}/{}\".format(endpoint, my_sz['id'])\n\n new_sz = {\"sz_type\": \"evpn\",\n \"label\": my_sz['label'],\n \"vrf_name\": my_sz['vrf_name'],\n \"id\": my_sz['id']}\n\n if vni_id:\n new_sz[\"vni_id\"] = vni_id\n\n if vlan_id:\n new_sz[\"vlan_id\"] = vlan_id\n\n if not module.check_mode:\n aos_put(session, endpoint_put, new_sz)\n\n return True, True, new_sz\n\n return True, False, new_sz\n\n return True, False, my_sz",
"def remove_zonerecord(self, record_id=None, remove_all=False):\n\n if record_id:\n r = ZoneRecord(domainname=self.domainname, subdomain=self.subdomain, record_id=record_id)\n r.remove()\n elif remove_all:\n for r in self.get_zonerecords():\n r.remove()",
"def shiva_the_destroyer():\n with settings(warn_only=True):\n run('rm -Rf %(path)s' % env)\n run('rm -Rf %(log_path)s' % env)\n sudo('rm %(apache_config_path)s' % env)\n reboot()",
"def test_aws_service_api_vm_security_group_delete(self):\n pass",
"def mac_pool_remove(handle, name, parent_dn=\"org-root\"):\r\n dn = parent_dn + '/mac-pool-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n else:\r\n raise ValueError(\"MAC Pool is not available\")",
"def pre_subnet_delete(self, resource_id):\n pass"
]
| [
"0.5807829",
"0.5751043",
"0.5751043",
"0.5531846",
"0.55279076",
"0.5481809",
"0.5456523",
"0.54271185",
"0.53969085",
"0.5374144",
"0.53658533",
"0.53476596",
"0.5314556",
"0.529601",
"0.5249035",
"0.52129596",
"0.5201034",
"0.51905715",
"0.5188096",
"0.51529765",
"0.5135171",
"0.51202476",
"0.5110591",
"0.5106193",
"0.5096016",
"0.5081732",
"0.50739294",
"0.5073061",
"0.5065995",
"0.5064672"
]
| 0.70903176 | 0 |
Create new securityzone or modify existing pool | def sec_zone_present(module, session, endpoint, my_sz, vni_id, vlan_id):
margs = module.params
if not my_sz:
if 'name' not in margs.keys():
return False, False, {"msg": "name required to create a new "
"security-zone"}
new_sz = {"sz_type": "evpn",
"label": margs['name'],
"vrf_name": margs['name']}
if vni_id:
new_sz["vni_id"] = vni_id
if vlan_id:
new_sz["vlan_id"] = vlan_id
if not module.check_mode:
resp = aos_post(session, endpoint, new_sz)
new_sz['id'] = resp['id']
return True, True, new_sz
return True, False, new_sz
else:
if vni_id or vlan_id:
endpoint_put = "{}/{}".format(endpoint, my_sz['id'])
new_sz = {"sz_type": "evpn",
"label": my_sz['label'],
"vrf_name": my_sz['vrf_name'],
"id": my_sz['id']}
if vni_id:
new_sz["vni_id"] = vni_id
if vlan_id:
new_sz["vlan_id"] = vlan_id
if not module.check_mode:
aos_put(session, endpoint_put, new_sz)
return True, True, new_sz
return True, False, new_sz
return True, False, my_sz | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sec_zone(module):\n margs = module.params\n\n endpoint = 'blueprints/{}/security-zones'.format(margs['blueprint_id'])\n\n name = margs.get('name', None)\n uuid = margs.get('id', None)\n vni_id = margs.get('vni_id', None)\n vlan_id = margs.get('vlan_id', None)\n\n if vni_id:\n try:\n vni_id = int(vni_id)\n except ValueError:\n module.fail_json(msg=\"Invalid ID: must be an integer\")\n\n errors = validate_vni_id(vni_id)\n\n if errors:\n module.fail_json(msg=errors)\n\n if vlan_id:\n try:\n vlan_id = int(vlan_id)\n except ValueError:\n module.fail_json(msg=\"Invalid ID: must be an integer\")\n\n errors = validate_vlan_id(vlan_id)\n\n if errors:\n module.fail_json(msg=errors)\n\n sz_data = aos_get(margs['session'], endpoint)\n my_sz = {}\n\n if not uuid:\n\n for k, v in sz_data['items'].items():\n if v['label'] == name:\n my_sz = v\n else:\n\n for k, v in sz_data['items'].items():\n if v['id'] == uuid:\n my_sz = v\n\n if margs['state'] == 'absent':\n success, changed, results = sec_zone_absent(module, margs['session'],\n endpoint, my_sz)\n\n elif margs['state'] == 'present':\n success, changed, results = sec_zone_present(module, margs['session'],\n endpoint, my_sz, vni_id,\n vlan_id)\n\n if success:\n module.exit_json(changed=changed, name=results['label'],\n id=results['id'], value=results)\n else:\n module.fail_json(msg=results)",
"def handle_region(self, region, args):\n result = [CHECKMARK, str(region), \"created security group '{}'\".format(GROUP_NAME)]\n\n try:\n # Create the security group\n response = region.conn.create_security_group(\n Description='Security group for Alia replicas and clients.',\n GroupName=GROUP_NAME,\n )\n\n # Get the newly created group id\n group_id = response[\"GroupId\"]\n\n # Allow all network traffic from within the security group\n response = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 0, \"ToPort\": 65535,\n \"UserIdGroupPairs\": [\n {\n \"GroupId\": group_id,\n \"Description\": \"allow all traffic from the same group\",\n }\n ]\n }\n ]\n )\n\n # Open Alia-specific ports for access\n reponse = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 22, \"ToPort\": 22,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"allow remote SSH access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 3264, \"ToPort\": 3285,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"external Alia service access\",\n }\n ],\n \"Ipv6Ranges\": [\n {\n \"CidrIpv6\": \"::/0\",\n \"Description\": \"external Alia service IPv6 access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 5356, \"ToPort\": 5356,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"research services access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 4157, \"ToPort\": 4157,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"master services access\",\n }\n ]\n },\n ]\n )\n\n\n except Exception as e:\n result[0] = CROSSMARK\n result[2] = str(e)\n\n\n return result",
"def pool_create(self, pool_name):\n self.core.api.os.shell.cmd('{0} add apppool /name:\"{1}\"'.format(\n self.APP_CMD, pool_name\n ))",
"def pre_floating_ip_pool_create(self, resource_dict):\n pass",
"def create():\n\n # remember what is created or not\n vpc = False\n igw = False\n sg = False\n sub = False\n vm = False\n\n vpc = _create_resource('vpc', CidrBlock=args.cidr, InstanceTenancy='default')\n igw = _create_resource('igw')\n\n if vpc and igw:\n _attach_vpc_igw(vpc=_existing.vpc, igw=_existing.igw)\n else:\n print('Cannot attach an igw to a vpc as at least one of them could not be created.')\n\n if vpc:\n sg = _create_resource(\n 'sg',\n GroupName=args.role,\n Description='SG for ' + args.role,\n VpcId=getattr(_existing.vpc, 'id', None)\n )\n else:\n print('Cannot create a sg as the vpc to attach it to could not be created.')\n\n if sg:\n _add_ingress_rules()\n else:\n print('Cannot create ingress rule as the sg could not be created.')\n\n if vpc:\n sub = _create_resource(\n 'sub',\n VpcId=getattr(_existing.vpc, 'id', None),\n CidrBlock=args.cidr\n )\n else:\n print('Cannot create a subnet as the vpc to attach it to could not be created.')\n\n if vpc and sub:\n _link_route_table()\n else:\n print('Cannot link subnet and VPC in the route table as vpc or sub not created.')\n\n if sub and sg:\n vm = _create_resource(\n 'vm',\n ImageId=args.ami,\n MinCount=1,\n MaxCount=1,\n KeyName=args.keypair,\n InstanceType=args.instance,\n # Note that there will be no internal name.\n # To get one, create first a DHCP options set and associate it with the VPC.\n NetworkInterfaces=[{\n 'AssociatePublicIpAddress': True,\n 'DeviceIndex': 0, # needs to be 0 to get a public IP\n 'SubnetId': getattr(_existing.sub, 'id', None),\n 'Groups': [getattr(_existing.sg, 'id', None)],\n }],\n )\n else:\n print('Cannot create an instance as the sub or sg to use could not be created.')\n\n if vm:\n if not dry:\n print('Waiting for the instance to be up and running, usually done in less than 45 seconds...')\n _existing.vm.wait_until_running()\n _tag_volume()\n print('you can reach your VM at ' + _existing.vm.public_ip_address)\n\n else:\n print('VM not created for some reason.')",
"def post_floating_ip_pool_create(self, resource_dict):\n pass",
"def SecurityZone(self) -> _n_6_t_7:",
"def SecurityZone(self) -> _n_6_t_7:",
"def test_aws_service_api_vm_security_group_put(self):\n pass",
"def change_zone_ip(config, section, new_ip):\n\n a_name = config.get(section, \"a_name\")\n apikey = config.get(section, \"apikey\")\n ttl = int(config.get(section, \"ttl\"))\n zone_id = get_zone_id(config, section)\n\n zone_record = {'name': a_name, 'value': new_ip, 'ttl': ttl, 'type': 'A'}\n\n new_zone_ver = api.domain.zone.version.new(apikey, zone_id)\n\n # clear old A record (defaults to previous verison's\n api.domain.zone.record.delete(apikey, zone_id, new_zone_ver,\n {'type': 'A', 'name': a_name})\n\n # Add in new A record\n api.domain.zone.record.add(apikey, zone_id, new_zone_ver, zone_record)\n\n # Set new zone version as the active zone\n api.domain.zone.version.set(apikey, zone_id, new_zone_ver)",
"def pre_subnet_create(self, resource_dict):\n pass",
"def create_pool(self, device, tier, poolname):\n print \"Adding pool %s...\" % poolname\n pool = device.findRemoteStoragePool(StoragePoolPredicates.name(poolname))\n pool.setTier(tier)\n pool.save()\n return pool",
"def pre_instance_ip_create(self, resource_dict):\n pass",
"def copy_to_region(self, region, name=None):\r\n if region.name == self.region:\r\n raise BotoClientError('Unable to copy to the same Region')\r\n conn_params = self.connection.get_params()\r\n rconn = region.connect(**conn_params)\r\n sg = rconn.create_security_group(name or self.name, self.description)\r\n source_groups = []\r\n for rule in self.rules:\r\n grant = rule.grants[0]\r\n for grant in rule.grants:\r\n if grant.name:\r\n if grant.name not in source_groups:\r\n source_groups.append(grant.name)\r\n sg.authorize(None, None, None, None, grant)\r\n else:\r\n sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port,\r\n grant.cidr_ip)\r\n return sg",
"def post_security_group_create(self, resource_dict):\n pass",
"def add_pool(ctx, pool_name, global_ip_range, global_port_range):\n\n if len(pool_name) > 32:\n ctx.fail(\"Invalid pool name. Maximum allowed pool name is 32 characters !!\")\n\n # Verify the ip address range and format\n ip_address = global_ip_range.split(\"-\")\n if len(ip_address) > 2:\n ctx.fail(\"Given ip address range {} is invalid. Please enter a valid ip address range !!\".format(global_ip_range))\n elif len(ip_address) == 2:\n if is_valid_ipv4_address(ip_address[0]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[0]))\n\n if is_valid_ipv4_address(ip_address[1]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[1]))\n\n ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))\n ipHighLimit = int(ipaddress.IPv4Address(ip_address[1]))\n if ipLowLimit >= ipHighLimit:\n ctx.fail(\"Given ip address range {} is invalid. Please enter a valid ip address range !!\".format(global_ip_range))\n else:\n if is_valid_ipv4_address(ip_address[0]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[0]))\n ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))\n ipHighLimit = int(ipaddress.IPv4Address(ip_address[0]))\n\n # Verify the port address range and format\n if global_port_range is not None: \n port_address = global_port_range.split(\"-\")\n\n if len(port_address) > 2:\n ctx.fail(\"Given port address range {} is invalid. Please enter a valid port address range !!\".format(global_port_range))\n elif len(port_address) == 2:\n if is_valid_port_address(port_address[0]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[0]))\n\n if is_valid_port_address(port_address[1]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[1]))\n\n portLowLimit = int(port_address[0])\n portHighLimit = int(port_address[1])\n if portLowLimit >= portHighLimit:\n ctx.fail(\"Given port address range {} is invalid. Please enter a valid port address range !!\".format(global_port_range))\n else:\n if is_valid_port_address(port_address[0]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[0]))\n else:\n global_port_range = \"NULL\"\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n entryFound = False\n table = \"NAT_POOL\"\n key = pool_name\n dataKey1 = 'nat_ip'\n dataKey2 = 'nat_port'\n\n data = config_db.get_entry(table, key)\n if data:\n if data[dataKey1] == global_ip_range and data[dataKey2] == global_port_range:\n click.echo(\"Trying to add pool, which is already present.\")\n entryFound = True\n\n pool_dict = config_db.get_table(table) \n if len(pool_dict) == 16:\n click.echo(\"Failed to add pool, as already reached maximum pool limit 16.\")\n entryFound = True\n\n # Verify the Ip address is overlapping with any Static NAT entry\n if entryFound == False:\n static_dict = config_db.get_table('STATIC_NAT')\n if static_dict:\n for staticKey, staticValues in static_dict.items():\n global_ip = \"---\"\n local_ip = \"---\"\n nat_type = \"dnat\"\n\n if isinstance(staticKey, str) is True:\n global_ip = staticKey\n else:\n continue\n\n local_ip = staticValues[\"local_ip\"]\n\n if \"nat_type\" in staticValues:\n nat_type = staticValues[\"nat_type\"]\n\n if nat_type == \"snat\":\n global_ip = local_ip\n\n ipAddress = int(ipaddress.IPv4Address(global_ip))\n if (ipAddress >= ipLowLimit and ipAddress <= ipHighLimit):\n ctx.fail(\"Given Ip address entry is overlapping with existing Static NAT entry !!\")\n\n if entryFound == False:\n config_db.set_entry(table, key, {dataKey1: global_ip_range, dataKey2 : global_port_range})",
"def sgup(sg=\"sg_external_ssh\"):\n ip = os.popen(\"/usr/bin/curl ifconfig.co 2>/dev/null\").readline().strip()\n print(\"My Public IP is : \"+ip)\n client = boto3.client(\"ec2\")\n ippermissions = client.describe_security_groups(GroupNames = [ sg ])[\"SecurityGroups\"][0][\"IpPermissions\"]\n print(\"Revoking old IP from group \"+sg)\n client.revoke_security_group_ingress(GroupName = sg, IpPermissions = ippermissions)\n printr(\"Adding new IP to group \"+sg)\n client.authorize_security_group_ingress(GroupName=sg, IpProtocol=\"-1\", FromPort=0, ToPort=0, CidrIp=ip+\"/32\")",
"def post_instance_ip_create(self, resource_dict):\n pass",
"def create_ipsecpolicy(self, body=None):\r\n return self.post(self.ipsecpolicies_path, body=body)",
"def create_sg(vpc_id, description, group_name):\n client = boto3.client('ec2')\n security_group = str(group_name + \"_sg\")\n\n # get the security groups\n idle_sg = get_sg()\n\n print(idle_sg)\n print(security_group)\n\n # if security group doesnt exist, create it\n if security_group not in idle_sg:\n print(\"Creating SG\")\n return client.create_security_group(\n Description=description,\n GroupName=security_group,\n VpcId=vpc_id\n )\n return get_sg_id(security_group)",
"def test_deploy_instance_with_new_network_and_sec_group(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_sec_group_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 249\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n sec_group_name=sec_group_name)",
"def pre_security_group_create(self, resource_dict):\n pass",
"def rule_40_can_create_sg(session):\n\n def try_create(session, side):\n res, conn_vpc = session[\"config\"][side][\"res\"], session[\"conn\"][side](\"vpc\")\n subnet = conn_vpc.get_all_subnets([res[\"subnet_id\"]])[0]\n\n try:\n conn_vpc.create_security_group(\n \"foo\", \"bar\", vpc_id = subnet.vpc_id, dry_run = True)\n except EC2ResponseError as e:\n if 412 != e.status:\n raise e\n\n try_create(session, \"server\")\n try_create(session, \"client\")\n\n return True",
"def create_vlan_pool(self, vlan_pool_name, allocation_mode):\n VlanInstP_mo = VlanInstP('uni/infra/', vlan_pool_name, allocation_mode)\n self.commit(VlanInstP_mo)\n return VlanInstP_mo",
"def create(subnetModeDetails):\n\n # Remove id as it's created automatically\n if 'id' in subnetModeDetails:\n del subnetModeDetails['id']\n\n schema = SubnetModeSchema()\n new_subnetMode = schema.load(subnetModeDetails, session=db.session)\n db.session.add(new_subnetMode)\n db.session.commit()\n\n # Serialize and return the newly created deployment\n # in the response\n data = schema.dump(new_subnetMode)\n return data, 201",
"def dvs_port_security_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n instances = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(3)\n access_point, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n\n ips = [os_conn.get_nova_instance_ip(i, net_name=self.inter_net_name)\n for i in instances]\n ip_pair = dict.fromkeys([access_point_ip])\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(4)\n ips = []\n for instance in instances:\n port = os_conn.neutron.create_port({\n \"port\": {\n \"network_id\": default_net.id,\n \"device_id\": instance.id\n }})['port']\n ips.append(port['fixed_ips'][0]['ip_address'])\n\n self.show_step(5)\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair, result_of_command=1)",
"def _activate_new_zone(self):\n if ((not hasattr(self, '_current_zone')) or (not self._current_zone)) or ((not hasattr(self, '_new_zone_version_number')) or (not self._new_zone_version_number)):\n raise GandiApiException(\"Can't update record, no cloned zone available.\")\n success = self._api.domain.zone.version.set(self._api_key, self._current_zone['id'], \n self._new_zone_version_number)\n if not success:\n raise GandiApiException('Failed to activate new zone;')\n else:\n logging.info('New zone version activated.')",
"def post_subnet_create(self, resource_dict):\n pass",
"def pre_loadbalancer_pool_create(self, resource_dict):\n pass",
"def create_or_fetch_vpc(self, region, zone):\n\n # refresh client region scope if region changed.\n if self.region and self.region != region:\n self.vpc_client = ibm.client(region=region)\n self.region = region\n self.zone = zone\n reused_vpc_data = None\n # pylint: disable=line-too-long\n vpcs_filtered_by_tags_and_region = self.search_client.search(\n query=f\"type:vpc AND tags:{self.cluster_name} AND region:{self.region}\",\n fields=[\"tags\", \"region\", \"type\"],\n limit=1000,\n ).get_result()[\"items\"]\n for vpc in vpcs_filtered_by_tags_and_region:\n vpc_id = vpc[\"crn\"].rsplit(\":\", 1)[-1]\n vpc_data = self.get_vpc_data(vpc_id, self.region)\n if vpc_data[\"status\"] == \"available\":\n reused_vpc_data = vpc_data\n break\n # found vpc tagged with cluster name in the required region\n if reused_vpc_data:\n # using self.region since tagged vpc is in the same region\n subnets = self.get_vpc_subnets(reused_vpc_data, self.region)\n subnet_in_zone = next(\n (subnet for subnet in subnets if subnet[\"zone\"][\"name\"] == self.zone),\n None,\n )\n # found a subnet in the required zone\n if subnet_in_zone:\n subnet_id = subnet_in_zone[\"id\"]\n public_gateway = subnet_in_zone.get(\"public_gateway\")\n if not public_gateway:\n public_gateway = self.create_public_gateway(\n reused_vpc_data[\"id\"], self.zone, subnet_in_zone\n )\n # tagged vpc found doesn't have a subnet in the required zone\n else:\n subnet_data = self.create_subnet(reused_vpc_data[\"id\"], self.zone)\n subnet_id = subnet_data[\"id\"]\n public_gateway = self.create_public_gateway(\n reused_vpc_data[\"id\"], self.zone, subnet_data\n )\n\n # add missing security group rules if needed\n security_group = reused_vpc_data.get(\"default_security_group\")\n if security_group:\n sg_id = security_group[\"id\"]\n self.add_missing_sg_rules(sg_id)\n\n # managed to reuse found VPC\n logger.info(\n f\"Reusing VPC {reused_vpc_data['id']} named: {reused_vpc_data['name']}\"\n )\n return {\n \"vpc_id\": reused_vpc_data[\"id\"],\n \"subnet_id\": subnet_id,\n \"security_group_id\": sg_id,\n }\n\n # delete a tagged vpc that doesn't meet requirements\n if reused_vpc_data:\n self.delete_vpc(reused_vpc_data[\"id\"], self.region)\n # create a new vpc\n vpc_tags = self.create_vpc()\n return vpc_tags"
]
| [
"0.607716",
"0.5653735",
"0.56347424",
"0.56218237",
"0.56128204",
"0.5576644",
"0.5575975",
"0.5575975",
"0.55006844",
"0.54984695",
"0.54873765",
"0.546849",
"0.54667926",
"0.5455745",
"0.5440565",
"0.54360205",
"0.5427527",
"0.54229987",
"0.54038274",
"0.53950787",
"0.5361489",
"0.53586125",
"0.5321476",
"0.52897453",
"0.5279889",
"0.52720416",
"0.52562106",
"0.52413404",
"0.52340657",
"0.5223232"
]
| 0.60932606 | 0 |
Main function to create, change or delete security zones within an AOS blueprint | def sec_zone(module):
margs = module.params
endpoint = 'blueprints/{}/security-zones'.format(margs['blueprint_id'])
name = margs.get('name', None)
uuid = margs.get('id', None)
vni_id = margs.get('vni_id', None)
vlan_id = margs.get('vlan_id', None)
if vni_id:
try:
vni_id = int(vni_id)
except ValueError:
module.fail_json(msg="Invalid ID: must be an integer")
errors = validate_vni_id(vni_id)
if errors:
module.fail_json(msg=errors)
if vlan_id:
try:
vlan_id = int(vlan_id)
except ValueError:
module.fail_json(msg="Invalid ID: must be an integer")
errors = validate_vlan_id(vlan_id)
if errors:
module.fail_json(msg=errors)
sz_data = aos_get(margs['session'], endpoint)
my_sz = {}
if not uuid:
for k, v in sz_data['items'].items():
if v['label'] == name:
my_sz = v
else:
for k, v in sz_data['items'].items():
if v['id'] == uuid:
my_sz = v
if margs['state'] == 'absent':
success, changed, results = sec_zone_absent(module, margs['session'],
endpoint, my_sz)
elif margs['state'] == 'present':
success, changed, results = sec_zone_present(module, margs['session'],
endpoint, my_sz, vni_id,
vlan_id)
if success:
module.exit_json(changed=changed, name=results['label'],
id=results['id'], value=results)
else:
module.fail_json(msg=results) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sec_zone_present(module, session, endpoint, my_sz, vni_id, vlan_id):\n margs = module.params\n\n if not my_sz:\n\n if 'name' not in margs.keys():\n return False, False, {\"msg\": \"name required to create a new \"\n \"security-zone\"}\n\n new_sz = {\"sz_type\": \"evpn\",\n \"label\": margs['name'],\n \"vrf_name\": margs['name']}\n\n if vni_id:\n new_sz[\"vni_id\"] = vni_id\n\n if vlan_id:\n new_sz[\"vlan_id\"] = vlan_id\n\n if not module.check_mode:\n resp = aos_post(session, endpoint, new_sz)\n\n new_sz['id'] = resp['id']\n\n return True, True, new_sz\n\n return True, False, new_sz\n\n else:\n if vni_id or vlan_id:\n\n endpoint_put = \"{}/{}\".format(endpoint, my_sz['id'])\n\n new_sz = {\"sz_type\": \"evpn\",\n \"label\": my_sz['label'],\n \"vrf_name\": my_sz['vrf_name'],\n \"id\": my_sz['id']}\n\n if vni_id:\n new_sz[\"vni_id\"] = vni_id\n\n if vlan_id:\n new_sz[\"vlan_id\"] = vlan_id\n\n if not module.check_mode:\n aos_put(session, endpoint_put, new_sz)\n\n return True, True, new_sz\n\n return True, False, new_sz\n\n return True, False, my_sz",
"def SecurityZone(self) -> _n_6_t_7:",
"def SecurityZone(self) -> _n_6_t_7:",
"def securityzones(self, securityzone_id, data, tenant_id=None, api_version=\"v2.0\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/securityzones/{}\".format(api_version,\n tenant_id,\n securityzone_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)",
"def Run(self, args):\n project = properties.VALUES.core.project.Get(required=True)\n zone = {}\n zone['dnsName'] = args.dns_name\n zone['name'] = args.zone\n zone['description'] = args.description\n\n really = console_io.PromptContinue('Creating %s in %s' % (zone, project))\n if not really:\n return\n\n dns = self.context['dns']\n request = dns.managedZones().create(project=project, body=zone)\n try:\n result = request.execute()\n return result\n except errors.HttpError as error:\n raise exceptions.HttpException(util.GetError(error))\n except errors.Error as error:\n raise exceptions.ToolException(error)",
"def main():\n module = AnsibleModule(\n argument_spec=dict(\n session=dict(required=True, type='dict'),\n blueprint_id=dict(required=True,),\n name=dict(required=False),\n id=dict(required=False),\n state=dict(required=False,\n choices=['present', 'absent'],\n default=\"present\",),\n vni_id=dict(required=False),\n vlan_id=dict(required=False),\n ),\n mutually_exclusive=[('name', 'id')],\n required_one_of=[('name', 'id')],\n supports_check_mode=True\n )\n\n sec_zone(module)",
"def sec_zone_absent(module, session, endpoint, my_sz):\n if not my_sz:\n return True, False, {'label': '',\n 'id': '',\n 'msg': 'security-zone does not exist'}\n\n if not module.check_mode:\n aos_delete(session, endpoint, my_sz['id'])\n\n return True, True, my_sz\n\n return True, False, my_sz",
"def main():\n if len(sys.argv) != 5:\n print ('usage: %s <SRC_USER::SRC_PASSWD@@SRC_HOST> '\n '<DEST_USER:DEST_PASSWD@DEST_HOST> SRC_GW DEST_GW\\n'\n ' where\\n'\n ' HOST Aviatrix Controller hostname or IP\\n'\n ' USER Aviatrix Controller login username\\n'\n ' PASSWORD Aviatrix Controller login password\\n'\n ' GW name of a provisioned gateway\\n' % sys.argv[0])\n sys.exit(1)\n\n # connect to both controllers\n src_controller = get_controller_from_argument(sys.argv[1])\n dst_controller = get_controller_from_argument(sys.argv[2])\n\n # find the source gateway\n gw_name = sys.argv[3]\n src_gwy = src_controller.get_gateway_by_name('admin', gw_name)\n if not src_gwy:\n print 'Source gateway %s not found\\n' % (gw_name)\n return\n\n # find the destination gateway\n gw_name = sys.argv[4]\n dst_gwy = dst_controller.get_gateway_by_name('admin', gw_name)\n if not dst_gwy:\n print 'Destination gateway %s not found\\n' % (gw_name)\n return\n\n # clone the firewall policies and the FQDN filters\n clone_fw_rules(src_controller, src_gwy, dst_controller, dst_gwy)\n clone_fqdn_rules(src_controller, src_gwy, dst_controller, dst_gwy)",
"def main():\n config = get_config(CONFIG_FILENAME)\n print(\"Creating IAM role\")\n role = create_iam_role(config)\n print(\"Creating redshift cluster\")\n create_redshift_cluster(config, role)",
"def main():\n\n #01. Importing AWS parameters\n config = configparser.ConfigParser()\n config.read_file(open('dwh.cfg'))\n\n KEY = config.get('AWS','KEY')\n SECRET = config.get('AWS','SECRET')\n\n DB_CLUSTER_TYPE = config.get(\"CLUSTER\",\"DB_CLUSTER_TYPE\")\n DB_NUM_NODES = config.get(\"CLUSTER\",\"DB_NUM_NODES\")\n DB_NODE_TYPE = config.get(\"CLUSTER\",\"DB_NODE_TYPE\")\n\n DB_CLUSTER_IDENTIFIER = config.get(\"CLUSTER\",\"DB_CLUSTER_IDENTIFIER\")\n DB_NAME = config.get(\"CLUSTER\",\"DB_NAME\")\n DB_USER = config.get(\"CLUSTER\",\"DB_USER\")\n DB_PASSWORD = config.get(\"CLUSTER\",\"DB_PASSWORD\")\n DB_PORT = config.get(\"CLUSTER\",\"DB_PORT\")\n\n DB_IAM_ROLE_NAME = config.get(\"CLUSTER\", \"DB_IAM_ROLE_NAME\")\n \n print(\"Creating clients for AWS Services\")\n\n #02. Creating clients for AWS Services\n ec2 = boto3.resource (\n 'ec2',\n region_name='us-west-2',\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n \n s3 = boto3.resource('s3',\n region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\n iam = boto3.client (\n 'iam',\n region_name='us-west-2',\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\n redshift = boto3.client (\n 'redshift',\n region_name='us-west-2',\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\n #03. Creating IAM role\n try:\n sparkifyRole = iam.create_role (\n Path='/',\n RoleName=DB_IAM_ROLE_NAME,\n Description='Allows Redshift clusters to call AWS Services on your behalf.',\n AssumeRolePolicyDocument=json.dumps ({\n 'Statement': [{\n 'Action': 'sts:AssumeRole',\n 'Effect': 'Allow',\n 'Principal': {'Service': 'redshift.amazonaws.com'}\n }],\n 'Version': '2012-10-17'\n })\n )\n\n except Exception as e:\n print(e)\n\n #04. Ataching policy for IAM role\n iam.attach_role_policy (\n RoleName=DB_IAM_ROLE_NAME,\n PolicyArn='arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess'\n )['ResponseMetadata']['HTTPStatusCode']\n \n roleArn = iam.get_role(RoleName=DB_IAM_ROLE_NAME)['Role']['Arn']\n print(roleArn)\n\n #05. Creating redshift cluster\n try:\n print('Creating cluster...')\n response = redshift.create_cluster( \n\n #Hardware parameters\n ClusterType=DB_CLUSTER_TYPE,\n NodeType=DB_NODE_TYPE,\n NumberOfNodes=int(DB_NUM_NODES),\n\n #Identifiers & credentials parameters\n DBName=DB_NAME,\n ClusterIdentifier=DB_CLUSTER_IDENTIFIER,\n MasterUsername=DB_USER,\n MasterUserPassword=DB_PASSWORD, \n\n #Role parameters\n IamRoles=[roleArn]\n )\n print ('Cluster will be created on redshift console.')\n \n except Exception as e:\n print(e)\n \n #06. Open an incoming TCP port to access the cluster endpoint\n myClusterProps = redshift.describe_clusters(ClusterIdentifier=DB_CLUSTER_IDENTIFIER)['Clusters'][0]\n\n try:\n print('Opening an incoming TCP port to access the cluster endpoint...')\n vpc = ec2.Vpc(id=myClusterProps['VpcId'])\n defaultSg = list(vpc.security_groups.all())[0]\n print(defaultSg)\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DB_PORT),\n ToPort=int(DB_PORT)\n )\n print('TCP port opened.')\n except Exception as e:\n print(e)",
"def mainProv(event, portId, ipAddr=''):\n\tif event == 'create':\n\t\thostName = genHostname(ipAddr)\n\t\tktabUpdate(hostName,action='create')\n\t\tdnsUpdate(portId=portId, ipAddr=ipAddr, action='create')\n\tif event == 'destroy':\n\t\thostName = dnsUpdate(portId, action='delete')\n\t\tktabUpdate(hostName, action='delete')",
"def generate_config(context):\n project = context.properties['projectId']\n zone_resource_name = context.properties['resourceName']\n\n resources = []\n\n zone_resource = {\n 'name': zone_resource_name,\n # https://cloud.google.com/dns/docs/reference/v1/managedZones\n 'type': 'gcp-types/dns-v1:managedZones',\n 'properties': {\n 'description': 'Routes googleapis.com to restricted.googleapis.com VIP',\n 'dnsName': 'googleapis.com.',\n 'project': project,\n 'visibility': 'private',\n 'privateVisibilityConfig': {\n 'kind': 'dns#managedZonePrivateVisibilityConfig',\n 'networks': [{\n 'kind': 'dns#managedZonePrivateVisibilityConfigNetwork',\n 'networkUrl': context.properties['network']\n }]\n }\n }\n }\n\n # If a dependsOn property was passed in, the network should depend on that.\n if 'dependsOn' in context.properties:\n zone_resource['metadata'] = {\n 'dependsOn': context.properties['dependsOn']\n }\n resources.append(zone_resource)\n\n # Configure the DNS Zone. The two additions below will create Change records which will create ResourceRecordSets.\n # This follows the structure described here: https://cloud.google.com/vpc-service-controls/docs/set-up-private-connectivity#configuring-dns\n resources.append({\n 'name': 'cname-record',\n # https://cloud.google.com/dns/docs/reference/v1/changes/create\n 'action': 'gcp-types/dns-v1:dns.changes.create',\n 'metadata': {\n 'runtimePolicy': [\n 'CREATE',\n ],\n },\n 'properties': {\n 'project': project,\n 'managedZone': '$(ref.{}.name)'.format(zone_resource_name),\n 'additions': [{\n 'name': '*.googleapis.com.',\n 'type': 'CNAME',\n 'ttl': 300,\n 'rrdatas': [ 'restricted.googleapis.com.' ]\n }]\n }\n })\n\n resources.append({\n 'name': 'a-record',\n # https://cloud.google.com/dns/docs/reference/v1/changes/create\n 'action': 'gcp-types/dns-v1:dns.changes.create',\n 'metadata': {\n 'runtimePolicy': [\n 'CREATE',\n ],\n },\n 'properties': {\n 'project': project,\n 'managedZone': '$(ref.{}.name)'.format(zone_resource_name),\n 'additions': [{\n 'name': 'restricted.googleapis.com.',\n 'type': 'A',\n 'ttl': 300,\n 'rrdatas': [\n '199.36.153.4',\n '199.36.153.5',\n '199.36.153.6',\n '199.36.153.7'\n ]\n }]\n }\n })\n\n return {'resources': resources}",
"def handle_region(self, region, args):\n result = [CHECKMARK, str(region), \"created security group '{}'\".format(GROUP_NAME)]\n\n try:\n # Create the security group\n response = region.conn.create_security_group(\n Description='Security group for Alia replicas and clients.',\n GroupName=GROUP_NAME,\n )\n\n # Get the newly created group id\n group_id = response[\"GroupId\"]\n\n # Allow all network traffic from within the security group\n response = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 0, \"ToPort\": 65535,\n \"UserIdGroupPairs\": [\n {\n \"GroupId\": group_id,\n \"Description\": \"allow all traffic from the same group\",\n }\n ]\n }\n ]\n )\n\n # Open Alia-specific ports for access\n reponse = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 22, \"ToPort\": 22,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"allow remote SSH access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 3264, \"ToPort\": 3285,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"external Alia service access\",\n }\n ],\n \"Ipv6Ranges\": [\n {\n \"CidrIpv6\": \"::/0\",\n \"Description\": \"external Alia service IPv6 access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 5356, \"ToPort\": 5356,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"research services access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 4157, \"ToPort\": 4157,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"master services access\",\n }\n ]\n },\n ]\n )\n\n\n except Exception as e:\n result[0] = CROSSMARK\n result[2] = str(e)\n\n\n return result",
"def main():\n\n\n fab_list = get_fabric_list(SANNAV_IP_ADDRESS, SANNAV_FOS_USERNAME, SANNAV_FOS_PASSWORD)\n\n # Print all known facts about the fabrics and the switches\n # Comment out this print statement if this code will be used to generate\n # an Ansible Tower inventory.\n print(json.dumps(fab_list))\n\n # This section of code formats the results to be in a format acceptable to Ansible Tower (awx).\n # To use it, unblock the following block of code and comment out the preceeding print statement.\n\n _ = \"\"\"\n toAwx = {'_meta': {'hostvars': {}}}\n\n for fabric in fab_list[\"Fabrics\"]:\n toAwx[fabric[\"name\"]] = { 'hosts': []}\n for switch in fabric[\"Switches\"]:\n toAwx[fabric[\"name\"]]['hosts'].append(switch['ipAddress'])\n print(json.dumps(toAwx));\n \"\"\"",
"def zone(enum_path):\n lip = socket.gethostbyname('mygen')\n sipb = socket.gethostbyname('prv_rsa')\n sip1 = socket.gethostbyname('public8')\n #34101 Modified realmIP to endpoint IP so that SETUP/INVITE message will go through enum_realm\n h323b = socket.gethostbyname('private8')\n h323c = socket.gethostbyname('public10')\n #34101 Added due to new entry in e164.zone file\n sip2 = socket.gethostbyname('private7')\n sip3 = socket.gethostbyname('public1')\n #32363 Enum realm IP needs to passed for GK\n h323_gk = socket.gethostbyname('enum_realm')\n #43997 Modified due to change in resource\n endpointIP = socket.gethostbyname('public3')\n pub_rsa_ip = socket.gethostbyname('pub_rsa')\n pvt1_ip = socket.gethostbyname('private1')\n pvt2_ip = socket.gethostbyname('private2')\n pvt3_ip = socket.gethostbyname('private3')\n pvt5_ip = socket.gethostbyname('private5')\n\n #45529 - IPAddress of proxy will be replaced in zone file\n proxy_ip = socket.gethostbyname('ast_sipproxy')\n\n if enum_path == '/var/opt/nextest/tdb/production_components.qms/multiple_DNS.qms/':\n\n #To edit abc.zone for MDNS suite\n zone_path = enum_path + 'abc.zone'\n try:\n os.system('sudo cp %s /var/lib/named/abc.zone' %zone_path )\n os.system('sudo chown -R test:users /var/lib/named/abc.zone') \n except Exception, e:\n msg = \"file error: %s\" % str(e)\n log.error(\"%s\" %str(msg))\n \n try:\n zfile=open('/var/lib/named/abc.zone',\"r\")\n zList = zfile.readlines()\n zfile.close()\n \n for i in zList:\n if i.__contains__('private1_ip'):\n str1 = i.replace('private1_ip',pvt1_ip)\n ind = zList.index(i) \n zList[ind] = str1\n elif i.__contains__('private2_ip'):\n str1 = i.replace('private2_ip',pvt2_ip)\n ind = zList.index(i)\n zList[ind] = str1\n elif i.__contains__('private3_ip'):\n str1 = i.replace('private3_ip',pvt3_ip)\n ind = zList.index(i)\n zList[ind] = str1\n elif i.__contains__('private5_ip'):\n str1 = i.replace('private5_ip',pvt5_ip)\n ind = zList.index(i)\n zList[ind] = str1\n elif i.__contains__('pub_rsa'):\n str1 = i.replace('pub_rsa',pub_rsa_ip)\n ind = zList.index(i)\n zList[ind] = str1\n \n zfile = open('/var/lib/named/abc.zone',\"w\")\n zfile.writelines(zList)\n zfile.close()\n except Exception, e:\n msg = \"file error: %s\" % str(e)\n log.error('File /production_components.qms/multiple_DNS.qms/abc.zone does not exist %s' %str(msg))\n\n elif enum_path == '/var/opt/nextest/tdb/production_components.qms/bt_feature.qms/sipoptionchanges.qms/':\n\n zone_path = enum_path + 'e164.zone'\n try:\n os.system('sudo cp %s /var/lib/named/e164.zone' %zone_path )\n os.system('sudo chown -R test:users /var/lib/named/e164.zone')\n except Exception, e:\n msg = \"file error: %s\"%str(e)\n log.error(\"%s\"%str(msg))\n\n try:\t \n # Get IPs\n sipopt_proxy_ip = socket.gethostbyname('sipproxy')\n sipopt_boston_ip = socket.gethostbyname('boston')\n sipopt_portland_ip = socket.gethostbyname('portland')\n sipopt_maryland_ip = socket.gethostbyname('maryland')\n sipopt_portland2_ip = socket.gethostbyname('public1')\n\n zfile = open('/var/lib/named/e164.zone',\"r\")\n zList = zfile.readlines()\n zfile.close()\n\n # Replace IPs\n for i in zList:\n if i.__contains__('SIPPROXY_IP'):\n str1 = i.replace('SIPPROXY_IP',sipopt_proxy_ip)\n ind = zList.index(i)\n zList[ind] = str1\n elif i.__contains__('PHONE_BOSTON'):\n str1 = i.replace('PHONE_BOSTON',sipopt_boston_ip)\n ind = zList.index(i)\n zList[ind] = str1\n elif i.__contains__('IP_PORTLAND'):\n str1 = i.replace('IP_PORTLAND',sipopt_portland_ip)\n ind = zList.index(i)\n zList[ind] = str1\n elif i.__contains__('IP_MARYLAND'):\n str1 = i.replace('IP_MARYLAND',sipopt_maryland_ip)\n ind = zList.index(i)\n zList[ind] = str1\n elif i.__contains__('IP2_PORTLAND'):\n str1 = i.replace('IP2_PORTLAND',sipopt_portland2_ip)\n ind = zList.index(i)\n zList[ind] = str1\n \n zfile = open('/var/lib/named/e164.zone',\"w\")\n zfile.writelines(zList)\n zfile.close()\n except Exception, e:\n\t msg = \"file error: %s\" % str(e)\n log.error(\"File /production_components.qms/bt_feature.qms/sipoptionchanges.qms/e164.zone does not exist %s\"%str(msg))\n \n else:\n\n zone_path = enum_path + 'e164.zone'\n #31291 - Included sudo to copy the Zone file\n try:\n os.system('sudo cp %s /var/lib/named/e164.zone' %zone_path )\n #32363 - Done changes to change the ownership\n os.system('sudo chown -R test:users /var/lib/named/e164.zone') \n except Exception, e:\n msg = \"file error: %s\" % str(e)\n #32363 Modified to resolve string formatting error\n log.error(\"%s\" %str(msg))\n \n try:\n zfile=open('/var/lib/named/e164.zone',\"r\")\n zList = zfile.readlines()\n zfile.close()\n \n for i in zList:\n if i.__contains__('localip'):\n str1 = i.replace('localip',lip)\n ind = zList.index(i) \n zList[ind] = str1\n elif i.__contains__('sip1ip'):\n str1 = i.replace('sip1ip',sip1)\n ind = zList.index(i)\n zList[ind] = str1\n #34101 - Added this code due to new entry in e164.Zone file\n elif i.__contains__('sip2ip'):\n str1 = i.replace('sip2ip',sip2)\n ind = zList.index(i)\n zList[ind] = str1\n elif i.__contains__('sip3ip'):\n str1 = i.replace('sip3ip',sip3)\n ind = zList.index(i)\n zList[ind] = str1 \n elif i.__contains__('sipbip'):\n str1 = i.replace('sipbip',sipb)\n ind = zList.index(i)\n zList[ind] = str1\n elif i.__contains__('h323bip'):\n str1 = i.replace('h323bip',h323b)\n ind = zList.index(i)\n zList[ind] = str1\n elif i.__contains__('h323cip'):\n str1 = i.replace('h323cip',h323c)\n ind = zList.index(i)\n zList[ind] = str1\n elif i.__contains__('h323gkip'):\n str1 = i.replace('h323gkip',h323_gk)\n ind = zList.index(i)\n zList[ind] = str1\n elif i.__contains__('endpointIP'):\n str1 = i.replace('endpointIP',endpointIP)\n ind = zList.index(i)\n zList[ind] = str1\n #45529\n elif i.__contains__('proxyip'):\n str1 = i.replace('proxyip',proxy_ip)\n ind = zList.index(i)\n zList[ind] = str1\n #143160\n elif i.__contains__('private1_ip'):\n str1 = i.replace('private1_ip',pvt1_ip)\n ind = zList.index(i)\n zList[ind] = str1\n elif i.__contains__('pub_rsa'):\n str1 = i.replace('pub_rsa',pub_rsa_ip)\n ind = zList.index(i)\n zList[ind] = str1\n \n \n zfile = open('/var/lib/named/e164.zone',\"w\")\n zfile.writelines(zList)\n zfile.close()\n except Exception, e:\n msg = \"file error: %s\" % str(e)\n #31291 - Changed the path of the file\n #32363 Modified to resolve string formatting error\n log.error('File /production_components.qms/enum.qms/e164.zone does not exist %s' %str(msg))",
"def configureEFS(self):\n methodName = \"configureEFS\"\n \n TR.info(methodName,\"STARTED configuration of EFS\")\n # Create the EFS provisioner service account\n\n \"\"\"\n oc create -f efs-configmap.yaml -n default\n oc create serviceaccount efs-provisioner\n oc create -f efs-rbac-template.yaml\n oc create -f efs-storageclass.yaml\n oc create -f efs-provisioner.yaml\n oc create -f efs-pvc.yaml\n \"\"\"\n \n # self.updateTemplateFile(workerocs,'${az1}', self.zones[0])\n self.updateTemplateFile(\"/ibm/templates/efs/efs-configmap.yaml\",'${file-system-id}',self.EFSID)\n self.updateTemplateFile(\"/ibm/templates/efs/efs-configmap.yaml\",'${aws-region}',self.region)\n self.updateTemplateFile(\"/ibm/templates/efs/efs-configmap.yaml\",'${efsdnsname}',self.EFSDNSName)\n\n self.updateTemplateFile(\"/ibm/templates/efs/efs-provisioner.yaml\",'${file-system-id}',self.EFSID)\n self.updateTemplateFile(\"/ibm/templates/efs/efs-provisioner.yaml\",'${aws-region}',self.region)\n\n TR.info(methodName,\"Invoking: oc create -f efs-configmap.yaml -n default\")\n cm_cmd = \"oc create -f /ibm/templates/efs/efs-configmap.yaml -n default\"\n retcode = call(cm_cmd, shell=True)\n if (retcode != 0):\n TR.info(methodName,\"Invoking: oc create -f efs-configmap.yaml -n default %s\" %retcode)\n raise Exception(\"Error calling oc. Return code: %s\" % retcode)\n #endIf\n\n TR.info(methodName,\"Invoking: oc create serviceaccount efs-provisioner\")\n sa_cmd = \"oc create serviceaccount efs-provisioner\"\n retcode = call(sa_cmd, shell=True)\n if (retcode != 0):\n raise Exception(\"Error calling oc. Return code: %s\" % retcode)\n #endIf\n\n TR.info(methodName,\"Invoking: oc create -f efs-rbac-template.yaml\")\n rbac_cmd = \"oc create -f /ibm/templates/efs/efs-rbac-template.yaml\"\n retcode = call(rbac_cmd, shell=True)\n if (retcode != 0):\n raise Exception(\"Error calling oc. Return code: %s\" % retcode)\n #endIf\n\n TR.info(methodName,\"Invoking: oc create -f efs-storageclass.yaml\")\n sc_cmd = \"oc create -f /ibm/templates/efs/efs-storageclass.yaml\"\n retcode = call(sc_cmd, shell=True)\n if (retcode != 0):\n raise Exception(\"Error calling oc. Return code: %s\" % retcode)\n #endIf\n \n TR.info(methodName,\"Invoking: oc create -f efs-provisioner.yaml\")\n prov_cmd = \"oc create -f /ibm/templates/efs/efs-provisioner.yaml\"\n retcode = call(prov_cmd, shell=True)\n if (retcode != 0):\n raise Exception(\"Error calling oc. Return code: %s\" % retcode)\n #endIf\n \n TR.info(methodName,\"Invoking: oc create -f efs-pvc.yaml\")\n pvc_cmd = \"oc create -f /ibm/templates/efs/efs-pvc.yaml\"\n retcode = call(pvc_cmd, shell=True)\n if (retcode != 0):\n raise Exception(\"Error calling oc. Return code: %s\" % retcode)\n #endIf \n \n TR.info(methodName,\"COMPLETED configuration of EFS.\")",
"def appsec_create(config, contract_id, group_id, by, activate, csv, email):\n logger.info('Start Akamai CLI onboard')\n _, wrap_api = init_config(config)\n util = utility.utility()\n util_waf = utility_waf.wafFunctions()\n\n appsec_main = Generic(contract_id, group_id, csv, by)\n # override default\n appsec_main.activate = activate\n if email:\n appsec_main.notification_emails = [email]\n\n # validate contract and group id first\n if not contract_id.startswith('ctr_'):\n sys.exit(logger.error('Contract ID must have prefix ctr_'))\n if not group_id.startswith('grp_'):\n sys.exit(logger.error('Group ID must have prefix grp_'))\n\n _, selectable_hostnames, selectable_df = wrap_api.get_selectable_hostnames(contract_id[4:], group_id[4:], appsec_main.network)\n show_df = util.validate_appsec_pre_create(appsec_main, wrap_api, util_waf, selectable_df)\n\n # start onboarding security config\n if util.valid:\n\n prev_waf_config = 0\n appsec_onboard = []\n for i in show_df.index:\n # populate property onboard data\n waf_config = show_df['waf_config_name'][i]\n policy = show_df['policy'][i]\n public_hostnames = show_df['hostname'][i]\n logger.debug(f'{waf_config} {policy} {public_hostnames}')\n onboard = Property(contract_id, group_id, waf_config, policy)\n if len(public_hostnames) > 0:\n onboard.public_hostnames = public_hostnames\n if by == 'propertyname':\n onboard.waf_target_hostnames = show_df['waf_target_hostname'][i]\n\n # validate hostnames and remove invalid hostnames\n invalid_hostnames = list({x for x in onboard.public_hostnames if x not in selectable_hostnames})\n if len(invalid_hostnames) > 1:\n onboard.public_hostnames = list(filter(lambda x: x not in invalid_hostnames, onboard.public_hostnames))\n if len(onboard.public_hostnames) == 0:\n logger.warning(f'Web security configuration {waf_config} - SKIPPING')\n logger.info(f'{invalid_hostnames} are not selectable hostnames')\n break\n if invalid_hostnames and len(onboard.public_hostnames) > 0:\n logger.warning(f'{invalid_hostnames} are not selectable hostnames for {waf_config}')\n\n # start onboarding security config\n if waf_config != prev_waf_config:\n if util_waf.create_waf_config(wrap_api, onboard):\n prev_waf_config_id = onboard.onboard_waf_config_id\n prev_waf_config_version = onboard.onboard_waf_config_version\n prev_waf_config = waf_config\n if activate:\n # popolate AppSec data\n appsec = AppSec(waf_config, onboard.onboard_waf_config_id, onboard.onboard_waf_config_version, [email])\n appsec_onboard.append(appsec)\n else:\n sys.exit(logger.error('Fail to create waf config'))\n else:\n # add hostnames to new policy\n onboard.onboard_waf_config_id = prev_waf_config_id\n onboard.onboard_waf_config_version = prev_waf_config_version\n output = []\n for hostname in onboard.public_hostnames:\n member = {}\n member['hostname'] = hostname\n output.append(member)\n payload = {}\n payload['hostnameList'] = output\n payload['mode'] = 'append'\n logger.debug(output)\n resp = wrap_api.modifyWafHosts(onboard.onboard_waf_config_id, onboard.onboard_waf_config_version, json.dumps(payload))\n if resp.status_code != 200:\n logger.error(resp.json())\n\n if util_waf.create_waf_policy(wrap_api, onboard):\n if by == 'propertyname':\n if util_waf.create_waf_match_target(wrap_api, onboard, onboard.waf_target_hostnames):\n pass\n else:\n if util_waf.create_waf_match_target(wrap_api, onboard):\n pass\n else:\n sys.exit(logger.error('Fail to create waf policy'))\n\n # activating\n if activate:\n time.sleep(5)\n util_waf.activate_and_poll(wrap_api, appsec_onboard, activate)\n util.log_cli_timing()",
"def main():\n # Creating resources/clients for all needed infrastructure: EC2, IAM, Redshift\n ec2 = create_client('ec2', boto3.resource)\n iam = create_client('iam', boto3.client)\n redshift = create_client('redshift', boto3.client)\n \n # Create needed IAM / ARN roles for Redshift\n create_iam_role(iam)\n arn_role = create_arn_role(iam)\n \n # Create cluster and await its completion\n create_redshift_cluster(redshift, arn_role)\n cluster_props = query_redshift_status(redshift)\n \n # Get endpoint into to allow querying\n info = get_redshift_endpoint_info(redshift, cluster_props)\n print(info)\n # TODO: Save info to aws.cfg\n \n # Update security groups to ACTUALLY allow querying\n update_cluster_security_group(ec2, cluster_props)\n \n # Test connection to see that everything (hopefully) went well\n test_connection()\n \n # End of main\n return",
"def main():\n progname = sys.argv[0]\n args = sys.argv[1:]\n argc = len(args)\n if argc < 3:\n print(\"Usage:\\n{} ROLE_ARN REGION CF_STACK_NAME\\n\".format(progname))\n sys.exit(1)\n else:\n role_arn = args[0]\n region = args[1]\n cf_stack_name = args[2]\n sess = get_session(role_arn, region)\n res = {}\n res[\"status\"] = delete_cf_stack(sess, cf_stack_name)\n dump_pretty(res)",
"def azs_lookup(session, lambda_compatible_only=False):\n if session is None:\n return []\n\n client = session.client('ec2')\n response = client.describe_availability_zones()\n # SH Removing Hack as subnet A is already in Production and causes issues trying to delete\n # We will strip out subnets A and C when creating the lambdas.\n #rtn = [(z[\"ZoneName\"], z[\"ZoneName\"][-1]) for z in response[\"AvailabilityZones\"] if z['ZoneName'] != 'us-east-1a']\n rtn = [(z[\"ZoneName\"], z[\"ZoneName\"][-1]) for z in response[\"AvailabilityZones\"]]\n\n if lambda_compatible_only:\n current_account = get_account_id_from_session(session)\n for az in rtn.copy():\n if az[1] == 'c' and current_account == hosts.PROD_ACCOUNT:\n rtn.remove(az)\n if az[1] == 'a' and current_account == hosts.DEV_ACCOUNT:\n rtn.remove(az)\n return rtn",
"def init():\n\n @click.command(name='configure-dns')\n @click.option('--cors-origin', help='CORS origin for API.')\n @click.option(\n '--krb-realm', help='Kerberos realm',\n envvar='TREADMILL_KRB_REALM',\n required=False\n )\n @click.option(\n '--ipa-certs', required=False, envvar='TREADMILL_IPA_CERTS',\n callback=aws_cli.handle_context_opt,\n is_eager=True,\n default='/etc/ipa/ca.crt',\n expose_value=False\n )\n @click.option(\n '--ipa-domain', required=False,\n envvar='IPA_DOMAIN',\n callback=treadmill_aws.cli.handle_context_opt,\n is_eager=True,\n expose_value=False\n )\n def configure_dns(cors_origin, krb_realm):\n \"\"\"Configure DNS cell records.\"\"\"\n\n ctx = CellCtx(cors=cors_origin, krb_realm=krb_realm)\n cellname = context.GLOBAL.cell\n\n ipa_client = awscontext.GLOBAL.ipaclient\n idnsname = 'zk.{}'.format(cellname)\n\n admin_cell = admin.Cell(context.GLOBAL.ldap.conn)\n cell = admin_cell.get(cellname)\n\n masters = ','.join(['{}:{}'.format(m['hostname'], m['zk-client-port'])\n for m in cell['masters']])\n scheme = cell.get('zk-auth-scheme')\n if not scheme:\n scheme = 'zookeeper'\n\n zkurl = '{scheme}://{username}@{hostports}/treadmill/{cell}'.format(\n scheme=scheme,\n username=ctx.proid,\n hostports=masters,\n cell=cellname\n )\n\n found = False\n try:\n current_rec = ipa_client.get_dns_record(idnsname)\n except ipaclient.NotFoundError:\n current_rec = None\n\n if current_rec:\n for record in current_rec['txtrecord']:\n if record != zkurl:\n _LOGGER.info(\n 'Deleting stale TXT record: %s %s', idnsname, record\n )\n ipa_client.delete_txt_record(idnsname, record)\n else:\n found = True\n\n if found:\n _LOGGER.info('Zookeeper TXT records up to date: %s : %s',\n idnsname, zkurl)\n return\n\n ipa_client.add_txt_record(idnsname, zkurl)\n\n return configure_dns",
"def sgup(sg=\"sg_external_ssh\"):\n ip = os.popen(\"/usr/bin/curl ifconfig.co 2>/dev/null\").readline().strip()\n print(\"My Public IP is : \"+ip)\n client = boto3.client(\"ec2\")\n ippermissions = client.describe_security_groups(GroupNames = [ sg ])[\"SecurityGroups\"][0][\"IpPermissions\"]\n print(\"Revoking old IP from group \"+sg)\n client.revoke_security_group_ingress(GroupName = sg, IpPermissions = ippermissions)\n printr(\"Adding new IP to group \"+sg)\n client.authorize_security_group_ingress(GroupName=sg, IpProtocol=\"-1\", FromPort=0, ToPort=0, CidrIp=ip+\"/32\")",
"def configureOCS(self,icpdInstallLogFile):\n methodName = \"configureOCS\"\n TR.info(methodName,\" Start configuration of OCS for CPD\")\n workerocs = \"/ibm/templates/ocs/workerocs.yaml\"\n workerocs_1az = \"/ibm/templates/ocs/workerocs1AZ.yaml\"\n if(len(self.zones)==1):\n shutil.copyfile(workerocs_1az,workerocs)\n self.updateTemplateFile(workerocs,'${az1}', self.zones[0])\n self.updateTemplateFile(workerocs,'${ami_id}', self.amiID)\n self.updateTemplateFile(workerocs,'${instance-type}', self.OCSInstanceType)\n self.updateTemplateFile(workerocs,'${instance-count}', self.NumberOfOCS)\n self.updateTemplateFile(workerocs,'${region}', self.region)\n self.updateTemplateFile(workerocs,'${cluster-name}', self.ClusterName)\n self.updateTemplateFile(workerocs, 'CLUSTERID', self.clusterID)\n self.updateTemplateFile(workerocs,'${subnet-1}',self.PrivateSubnet1ID)\n \n\n if(len(self.zones)>1):\n self.updateTemplateFile(workerocs,'${az2}', self.zones[1])\n self.updateTemplateFile(workerocs,'${az3}', self.zones[2])\n self.updateTemplateFile(workerocs,'${subnet-2}',self.PrivateSubnet2ID)\n self.updateTemplateFile(workerocs,'${subnet-3}',self.PrivateSubnet3ID)\n\n create_ocs_nodes_cmd = \"oc create -f \"+workerocs\n TR.info(methodName,\"Create OCS nodes\")\n try:\n retcode = check_output(['bash','-c', create_ocs_nodes_cmd])\n time.sleep(600)\n TR.info(methodName,\"Created OCS nodes %s\" %retcode)\n except CalledProcessError as e:\n TR.error(methodName,\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output)) \n \n ocs_nodes = []\n get_ocs_nodes = \"oc get nodes --show-labels | grep storage-node |cut -d' ' -f1 \"\n try:\n ocs_nodes = check_output(['bash','-c',get_ocs_nodes])\n nodes = ocs_nodes.split(\"\\n\")\n TR.info(methodName,\"OCS_NODES %s\"%nodes)\n except CalledProcessError as e:\n TR.error(methodName,\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output)) \n i =0\n while i < len(nodes)-1:\n TR.info(methodName,\"Labeling for OCS node %s \" %nodes[i])\n label_cmd = \"oc label nodes \"+nodes[i]+\" cluster.ocs.openshift.io/openshift-storage=''\"\n try: \n retcode = check_output(['bash','-c', label_cmd])\n TR.info(methodName,\"Label for OCS node %s returned %s\" %(nodes[i],retcode))\n except CalledProcessError as e:\n TR.error(methodName,\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output)) \n i += 1\n\n\n deploy_olm_cmd = \"oc create -f /ibm/templates/ocs/deploy-with-olm.yaml\"\n TR.info(methodName,\"Deploy OLM\")\n try:\n retcode = check_output(['bash','-c', deploy_olm_cmd]) \n time.sleep(300)\n TR.info(methodName,\"Deployed OLM %s\" %retcode)\n except CalledProcessError as e:\n TR.error(methodName,\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output)) \n create_storage_cluster_cmd = \"oc create -f /ibm/templates/ocs/ocs-storagecluster.yaml\"\n TR.info(methodName,\"Create Storage Cluster\")\n try:\n retcode = check_output(['bash','-c', create_storage_cluster_cmd]) \n time.sleep(600)\n TR.info(methodName,\"Created Storage Cluster %s\" %retcode)\n except CalledProcessError as e:\n TR.error(methodName,\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output)) \n install_ceph_tool_cmd = \"curl -s https://raw.githubusercontent.com/rook/rook/release-1.1/cluster/examples/kubernetes/ceph/toolbox.yaml|sed 's/namespace: rook-ceph/namespace: openshift-storage/g'| oc apply -f -\"\n TR.info(methodName,\"Install ceph toolkit\")\n try:\n retcode = check_output(['bash','-c', install_ceph_tool_cmd]) \n TR.info(methodName,\"Installed ceph toolkit %s\" %retcode)\n except CalledProcessError as e:\n TR.error(methodName,\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output)) \n TR.info(methodName,\"Configuration of OCS for CPD completed\")",
"def dvs_port_security_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n instances = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(3)\n access_point, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n\n ips = [os_conn.get_nova_instance_ip(i, net_name=self.inter_net_name)\n for i in instances]\n ip_pair = dict.fromkeys([access_point_ip])\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(4)\n ips = []\n for instance in instances:\n port = os_conn.neutron.create_port({\n \"port\": {\n \"network_id\": default_net.id,\n \"device_id\": instance.id\n }})['port']\n ips.append(port['fixed_ips'][0]['ip_address'])\n\n self.show_step(5)\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair, result_of_command=1)",
"def appprotect_waf_setup(request, kube_apis, test_namespace) -> None:\n uds_crd_resource = f\"{TEST_DATA}/ap-waf/ap-ic-uds.yaml\"\n ap_policy_uds = \"dataguard-alarm-uds\"\n print(\"------------------------- Deploy logconf -----------------------------\")\n src_log_yaml = f\"{TEST_DATA}/ap-waf/logconf.yaml\"\n global log_name\n log_name = create_ap_logconf_from_yaml(kube_apis.custom_objects, src_log_yaml, test_namespace)\n\n print(\"------------------------- Create UserSig CRD resource-----------------------------\")\n usersig_name = create_ap_usersig_from_yaml(kube_apis.custom_objects, uds_crd_resource, test_namespace)\n\n print(f\"------------------------- Deploy dataguard-alarm appolicy ---------------------------\")\n src_pol_yaml = f\"{TEST_DATA}/ap-waf/{ap_policy_uds}.yaml\"\n global ap_pol_name\n ap_pol_name = create_ap_policy_from_yaml(kube_apis.custom_objects, src_pol_yaml, test_namespace)\n\n def fin():\n print(\"Clean up:\")\n delete_ap_policy(kube_apis.custom_objects, ap_pol_name, test_namespace)\n delete_ap_usersig(kube_apis.custom_objects, usersig_name, test_namespace)\n delete_ap_logconf(kube_apis.custom_objects, log_name, test_namespace)\n\n request.addfinalizer(fin)",
"def instantiate_ns(self, nsi_id, ns_descriptor, vnfds_descriptor, body, placement_info, resources, nestedInfo):\n # def instantiate_ns(self, nsi_id, ns_descriptor, body, placement_info):\n\n instantiationLevel = body.ns_instantiation_level_id\n # for composition/federation\n if nestedInfo:\n nested_descriptor = next(iter(nestedInfo))\n if len(nestedInfo[nested_descriptor]) > 1:\n # nested from a consumer domain\n nsId_tmp = nsi_id\n else:\n # nested local\n nsId_tmp = nsi_id + '_' + nested_descriptor\n else:\n nsId_tmp = nsi_id\n\n blueprint_name = nsId_tmp + \"_\" + ns_descriptor['nsd']['nsdIdentifier'] + \"_\" + instantiationLevel\n blueprints = self.__cloudify_client.blueprints.list(_include=['id'], id=[blueprint_name]).items\n\n agent_ids = {}\n if len(blueprints) == 0:\n #if True:\n log_queue.put([\"INFO\", \"CLOUDIFY_WRAPPER: Blueprint %s will be created\" % (blueprint_name)])\n # creates tmp folder for blueprint\n if not os.path.exists(self.__blueprints_path + \"/\" + nsId_tmp):\n os.makedirs(self.__blueprints_path + \"/\" + nsId_tmp)\n # os.makedirs(self.__blueprints_path + \"/\" + nsId_tmp)\n currentDT = datetime.datetime.now()\n string_date = currentDT.strftime(\"%Y_%m_%d_%H_%M_%S\")\n path_to_blueprint = self.__blueprints_path + \"/\" + nsId_tmp + \"/\" + string_date\n\n #full path and name for blueprint\n\n blueprint_yaml_name_with_path = path_to_blueprint + \"/\" + blueprint_name + \".yaml\"\n os.makedirs(path_to_blueprint)\n\n if self.__wrapper == \"openstack\":\n # set parameters for blueprint\n self.converter_to_yaml = ConverterNSDOpenstackYAML()\n self.converter_to_yaml.set_placement_info(placement_info)\n self.converter_to_yaml.set_nfvis_pop_info(self.get_nfvi_pop_info())\n self.converter_to_yaml.set_ns_instantiation_level_id(instantiationLevel)\n self.converter_to_yaml.set_ns_descriptor(ns_descriptor)\n self.converter_to_yaml.set_vnfds_descriptor(vnfds_descriptor)\n self.converter_to_yaml.set_ns_service_id(nsi_id)\n self.converter_to_yaml.parse()\n self.converter_to_yaml.sort_networks()\n self.converter_to_yaml.sort_servers()\n self.converter_to_yaml.generate_yaml(blueprint_yaml_name_with_path)\n\n if self.__wrapper == \"mtp\":\n\n self.converter_to_yaml = ConverterNSDMTPYAML()\n self.converter_to_yaml.set_placement_info(placement_info)\n self.converter_to_yaml.set_nested_info(nestedInfo)\n self.converter_to_yaml.set_nfvis_pop_info(self.get_nfvi_pop_info())\n self.converter_to_yaml.set_ns_instantiation_level_id(instantiationLevel)\n self.converter_to_yaml.set_ns_descriptor(ns_descriptor)\n db_vnf_deployed_info = nsir_db.get_vnf_deployed_info(nsId_tmp)\n self.converter_to_yaml.set_vnf_deployed_info(db_vnf_deployed_info)\n self.converter_to_yaml.set_vnfds_descriptor(vnfds_descriptor)\n self.converter_to_yaml.set_ns_service_id(nsId_tmp)\n self.converter_to_yaml.set_start_vlan(self.__start_vlan)\n self.converter_to_yaml.default_key_name(self.__default_key_name)\n self.converter_to_yaml.install_cloudify_agent(self.__install_cloudify_agent)\n self.converter_to_yaml.install_rvm_agent(self.__install_rvm_agent)\n self.converter_to_yaml.parse()\n self.converter_to_yaml.sort_networks()\n self.converter_to_yaml.sort_servers()\n self.converter_to_yaml.generate_yaml(blueprint_yaml_name_with_path)\n agent_ids = self.converter_to_yaml.get_agent_ids()\n\n # bluprint upload\n try:\n self.__cloudify_client.blueprints.upload(blueprint_yaml_name_with_path, blueprint_name)\n log_queue.put([\"DEBUG\", \"CLOUDIFY_WRAPPER: Blueprint %s.yaml upload completed\" % (nsId_tmp)])\n #Check if exists blueprint in cloudify\n except CloudifyClientError as e:\n if e.error_code == 'conflict_error':\n log_queue.put([\"INFO\", \"CLOUDIFY_WRAPPER: Blueprint %s %s\" % (blueprint_name, e)])\n else:\n log_queue.put([\"INFO\", \"CLOUDIFY_WRAPPER: Blueprint %s %s\" % (blueprint_name, e)])\n except Exception as e:\n log_queue.put([\"ERROR\", \"CLOUDIFY_WRAPPER: Blueprint %s.yaml upload error %s \" % (blueprint_name, e)])\n return None\n\n # deployment creation\n\n try:\n self.__cloudify_client.deployments.create(blueprint_name, nsId_tmp)\n log_queue.put([\"DEBUG\", \"CLOUDIFY_WRAPPER: Deployment %s creation started\" % (nsId_tmp)])\n except Exception as e:\n log_queue.put([\"ERROR\", \"CLOUDIFY_WRAPPER: Deployment creation error %s \" % (e)])\n return None\n\n try:\n self.wait_for_deployment_execution(nsId_tmp)\n log_queue.put([\"DEBUG\", \"CLOUDIFY_WRAPPER: Deployment %s creation completed\" % (nsId_tmp)])\n except Exception as e:\n log_queue.put([\"ERROR\", \"CLOUDIFY_WRAPPER: Deployment creation error %s \" % (e)])\n return None\n\n # deploying\n try:\n self.__cloudify_client.executions.start(nsId_tmp, \"install\")\n log_queue.put([\"DEBUG\", \"CLOUDIFY_WRAPPER: Deploying %s started\" % (nsId_tmp)])\n except Exception as e:\n log_queue.put([\"ERROR\", \"CLOUDIFY_WRAPPER: Deploying %s error %s \" % (nsId_tmp, e)])\n return None\n\n try:\n self.wait_for_deployment_execution(nsId_tmp)\n log_queue.put([\"DEBUG\", \"CLOUDIFY_WRAPPER: Deploying %s completed\" % (nsId_tmp)])\n except Exception as e:\n log_queue.put([\"ERROR\", \"CLOUDIFY_WRAPPER: Deploying %s error %s \" % (nsId_tmp, e)])\n return None\n\n nsi_sap = self.__cloudify_client.deployments.outputs.get(deployment_id=nsId_tmp)\n\n instances = self.__cloudify_client.node_instances.list(deployment_id=nsId_tmp)\n\n nodes = self.__cloudify_client.nodes.list(deployment_id=nsId_tmp)\n\n vnf_deployed_info = self.get_information_of_vnf(instances, agent_ids)\n nsir_db.save_vnf_deployed_info(nsId_tmp, vnf_deployed_info)\n\n vim_net_info = self.get_information_of_networks(nsId_tmp, instances, nodes, nestedInfo)\n nsir_db.save_vim_networks_info(nsId_tmp, vim_net_info)\n\n instantiation_output = {}\n instantiation_output[\"sapInfo\"] = nsi_sap[\"outputs\"]\n converted_output = self.convert_output(instantiation_output)\n rvm_agents_execute_scripts = RvmAgentsExecuteScripts(ns_descriptor)\n rvm_agents_execute_scripts.set_vnfds_descriptor(vnfds_descriptor)\n rvm_agents_execute_scripts.set_placement_info(placement_info)\n rvm_agents_execute_scripts.set_sap_info(converted_output)\n rvm_agents_execute_scripts.set_vim_net_info(vim_net_info)\n rvm_agents_execute_scripts.set_vnf_deployed_info(vnf_deployed_info)\n rvm_agents_execute_scripts.excute_script(\"instantiate\", instantiationLevel, None)\n return converted_output",
"def configure_roles(self, options):\n roles = [\n {\n 'name': 'orchestrate.devOps',\n 'title': 'Orchestrate DevOps',\n 'description': (\n 'Orchestrate the creation and lifecycle of all resources'\n ' available to and created by users.'\n ),\n 'includedPermissions': [\n 'compute.acceleratorTypes.list',\n 'compute.images.list',\n 'compute.images.get',\n 'compute.images.create',\n 'compute.images.delete',\n 'compute.images.getFromFamily',\n 'compute.images.useReadOnly',\n 'compute.instanceTemplates.list',\n 'compute.instanceTemplates.get',\n 'compute.instanceTemplates.create',\n 'compute.instanceTemplates.delete',\n 'compute.instances.list',\n 'compute.instances.get',\n 'compute.instances.create',\n 'compute.instances.delete',\n 'compute.instances.setDeletionProtection',\n 'compute.instances.setLabels',\n 'compute.instances.setMetadata',\n 'compute.instances.setServiceAccount',\n 'compute.instances.setTags',\n 'compute.instances.stop',\n 'compute.disks.create',\n 'compute.disks.useReadOnly',\n 'compute.networks.get',\n 'compute.networks.addPeering',\n 'compute.networks.updatePolicy',\n 'compute.subnetworks.get',\n 'compute.subnetworks.use',\n 'compute.subnetworks.useExternalIp',\n 'compute.globalOperations.get',\n 'compute.regionOperations.get',\n 'compute.zoneOperations.get'\n ],\n 'stage': 'ALPHA'\n },\n {\n 'name': 'orchestrate.resourceManager',\n 'title': 'Orchestrate Resource Manager',\n 'description': (\n 'Create instance templates, instances, and manage the lifecycle'\n ' of resources created by users.'\n ),\n 'includedPermissions': [\n 'compute.acceleratorTypes.list',\n 'compute.images.list',\n 'compute.images.get',\n 'compute.images.create',\n 'compute.images.delete',\n 'compute.images.getFromFamily',\n 'compute.images.useReadOnly',\n 'compute.instanceTemplates.list',\n 'compute.instanceTemplates.get',\n 'compute.instanceTemplates.create',\n 'compute.instanceTemplates.delete',\n 'compute.instances.list',\n 'compute.instances.get',\n 'compute.instances.create',\n 'compute.instances.delete',\n 'compute.instances.setLabels',\n 'compute.instances.setMetadata',\n 'compute.instances.setServiceAccount',\n 'compute.instances.setTags',\n 'compute.instances.stop',\n 'compute.disks.create',\n 'compute.subnetworks.use',\n 'compute.subnetworks.useExternalIp'\n ],\n 'stage': 'ALPHA'\n },\n {\n 'name': 'orchestrate.user',\n 'title': 'Orchestrate User',\n 'description': (\n 'Create instances and do basic lifecycle management of'\n ' resources they own.'\n ),\n 'includedPermissions': [\n 'compute.instanceTemplates.list',\n 'compute.instanceTemplates.get',\n 'compute.instances.list',\n 'compute.instances.get',\n 'compute.instances.create',\n 'compute.instances.delete',\n 'compute.instances.setDeletionProtection',\n 'compute.instances.setLabels',\n 'compute.instances.setMetadata',\n 'compute.instances.setServiceAccount',\n 'compute.instances.setTags',\n 'compute.instances.stop',\n 'compute.disks.create',\n 'compute.subnetworks.use',\n 'compute.subnetworks.useExternalIp'\n ],\n 'stage': 'ALPHA'\n }\n ]\n for role in roles:\n permissions = ','.join(role['includedPermissions'])\n try:\n # Try to create first\n command = (\n 'gcloud iam roles create {name} --project={project}'\n ' --title=\"{title}\" --description=\"{description}\"'\n ' --permissions={permissions} --stage={stage} --quiet').format(\n project=options.project,\n permissions=permissions,\n **role,\n )\n log.debug(command)\n subprocess.check_call(command, shell=True)\n except subprocess.CalledProcessError as exception:\n # if it fails, then try to update\n command = (\n 'gcloud iam roles update {name} --project={project}'\n ' --title=\"{title}\" --description=\"{description}\"'\n ' --permissions={permissions} --stage={stage} --quiet').format(\n project=options.project,\n permissions=permissions,\n **role,\n )\n run(command)",
"def dvs_remote_sg_simple(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.show_step(4)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(5)\n self.show_step(6)\n for sg in [sg1, sg2]:\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Create access_point to instances from SG1 and SG2\n _, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name, sg2.name])\n\n self.show_step(7)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n self.show_step(8)\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(9)\n self.show_step(10)\n for group in ips:\n ip_pair = dict.fromkeys(ips[group])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips[group] if key != value]\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, timeout=60 * 5)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = ips['SG2']\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, result_of_command=1, timeout=60 * 5)",
"def main():\n snap_name = 'REST_Snap_' + strftime('%d%m%Y%H%M%S')\n ru.replication.create_storagegroup_snap(sg_id, snap_name)\n print('Check the Gui now or REST Client to see if snapshot '\n '{snap_name} was created for Storge Group {sg_id}'\n .format(snap_name=snap_name, sg_id=sg_id))",
"def instantiate_ns(self, nsi_id, ns_descriptor, vnfds_descriptor, body, placement_info, resources, nestedInfo):\n # def instantiate_ns(self, nsi_id, ns_descriptor, body, placement_info):\n\n instantiationLevel = body.ns_instantiation_level_id\n # for composition/federation\n if nestedInfo:\n nested_descriptor = next(iter(nestedInfo))\n if len(nestedInfo[nested_descriptor]) > 1:\n # nested from a consumer domain\n nsId_tmp = nsi_id\n else:\n # nested local\n nsId_tmp = nsi_id + '_' + nested_descriptor\n else:\n nsId_tmp = nsi_id\n\n blueprint_name = nsId_tmp + \"_\" + ns_descriptor['nsd']['nsdIdentifier'] + \"_\" + instantiationLevel\n blueprints = self.__cloudify_client.blueprints.list(_include=['id'], id=[blueprint_name]).items\n if len(blueprints) == 0:\n # if True:\n log_queue.put([\"INFO\", \"CLOUDIFY_WRAPPER: Blueprint %s will be created\" % (blueprint_name)])\n # creates tmp folder for blueprint\n if not os.path.exists(self.__blueprints_path + \"/\" + nsId_tmp):\n os.makedirs(self.__blueprints_path + \"/\" + nsId_tmp)\n # os.makedirs(self.__blueprints_path + \"/\" + nsId_tmp)\n currentDT = datetime.datetime.now()\n string_date = currentDT.strftime(\"%Y_%m_%d_%H_%M_%S\")\n path_to_blueprint = self.__blueprints_path + \"/\" + nsId_tmp + \"/\" + string_date\n\n #full path and name for blueprint\n\n blueprint_yaml_name_with_path = path_to_blueprint + \"/\" + blueprint_name + \".yaml\"\n os.makedirs(path_to_blueprint)\n\n if self.__wrapper == \"openstack\":\n # set parameters for blueprint\n converter_to_yaml = ConverterNSDOpenstackYAML()\n converter_to_yaml.set_placement_info(placement_info)\n converter_to_yaml.set_nfvis_pop_info(self.get_nfvi_pop_info())\n converter_to_yaml.set_ns_instantiation_level_id(instantiationLevel)\n converter_to_yaml.set_ns_descriptor(ns_descriptor)\n converter_to_yaml.set_vnfds_descriptor(vnfds_descriptor)\n converter_to_yaml.set_ns_service_id(nsi_id)\n converter_to_yaml.parse()\n converter_to_yaml.sort_networks()\n converter_to_yaml.sort_servers()\n converter_to_yaml.generate_yaml(blueprint_yaml_name_with_path)\n\n if self.__wrapper == \"mtp\":\n converter_to_yaml = ConverterNSDMTPYAML()\n converter_to_yaml.set_placement_info(placement_info)\n converter_to_yaml.set_nested_info(nestedInfo)\n converter_to_yaml.set_nfvis_pop_info(self.get_nfvi_pop_info())\n converter_to_yaml.set_ns_instantiation_level_id(instantiationLevel)\n converter_to_yaml.set_ns_descriptor(ns_descriptor)\n converter_to_yaml.set_vnfds_descriptor(vnfds_descriptor)\n converter_to_yaml.set_ns_service_id(nsId_tmp)\n converter_to_yaml.set_start_vlan(self.__start_vlan)\n converter_to_yaml.default_key_name(self.__default_key_name)\n converter_to_yaml.install_cloudify_agent(self.__install_cloudify_agent)\n converter_to_yaml.parse()\n converter_to_yaml.sort_networks()\n converter_to_yaml.sort_servers()\n converter_to_yaml.generate_yaml(blueprint_yaml_name_with_path)\n\n # bluprint upload\n try:\n self.__cloudify_client.blueprints.upload(blueprint_yaml_name_with_path, blueprint_name)\n log_queue.put([\"DEBUG\", \"CLOUDIFY_WRAPPER: Blueprint %s.yaml upload completed\" % (nsId_tmp)])\n #Check if exists blueprint in cloudify\n except CloudifyClientError as e:\n if e.error_code == 'conflict_error':\n log_queue.put([\"INFO\", \"CLOUDIFY_WRAPPER: Blueprint %s %s\" % (blueprint_name, e)])\n else:\n log_queue.put([\"INFO\", \"CLOUDIFY_WRAPPER: Blueprint %s %s\" % (blueprint_name, e)])\n except Exception as e:\n log_queue.put([\"ERROR\", \"CLOUDIFY_WRAPPER: Blueprint %s.yaml upload error %s \" % (blueprint_name, e)])\n return None\n\n # deployment creation\n\n try:\n self.__cloudify_client.deployments.create(blueprint_name, nsId_tmp)\n log_queue.put([\"DEBUG\", \"CLOUDIFY_WRAPPER: Deployment %s creation started\" % (nsId_tmp)])\n except Exception as e:\n log_queue.put([\"ERROR\", \"CLOUDIFY_WRAPPER: Deployment creation error %s \" % (e)])\n return None\n\n try:\n self.wait_for_deployment_execution(nsId_tmp)\n log_queue.put([\"DEBUG\", \"CLOUDIFY_WRAPPER: Deployment %s creation completed\" % (nsId_tmp)])\n except Exception as e:\n log_queue.put([\"ERROR\", \"CLOUDIFY_WRAPPER: Deployment creation error %s \" % (e)])\n return None\n\n # deploying\n try:\n self.__cloudify_client.executions.start(nsId_tmp, \"install\")\n log_queue.put([\"DEBUG\", \"CLOUDIFY_WRAPPER: Deploying %s started\" % (nsId_tmp)])\n except Exception as e:\n log_queue.put([\"ERROR\", \"CLOUDIFY_WRAPPER: Deploying %s error %s \" % (nsId_tmp, e)])\n return None\n\n try:\n self.wait_for_deployment_execution(nsId_tmp)\n log_queue.put([\"DEBUG\", \"CLOUDIFY_WRAPPER: Deploying %s completed\" % (nsId_tmp)])\n except Exception as e:\n log_queue.put([\"ERROR\", \"CLOUDIFY_WRAPPER: Deploying %s error %s \" % (nsId_tmp, e)])\n return None\n\n nsi_sap = self.__cloudify_client.deployments.outputs.get(deployment_id=nsId_tmp)\n\n instances = self.__cloudify_client.node_instances.list(deployment_id=nsId_tmp)\n\n nodes = self.__cloudify_client.nodes.list(deployment_id=nsId_tmp)\n\n vnf_deployed_info = self.get_information_of_vnf(instances)\n nsir_db.save_vnf_deployed_info(nsId_tmp, vnf_deployed_info)\n\n vim_net_info = self.get_information_of_networks(nsId_tmp, instances, nodes, nestedInfo)\n nsir_db.save_vim_networks_info(nsId_tmp, vim_net_info)\n\n instantiation_output = {}\n instantiation_output[\"sapInfo\"] = nsi_sap[\"outputs\"]\n converted_output = self.convert_output(instantiation_output)\n return converted_output"
]
| [
"0.5668733",
"0.56471294",
"0.56471294",
"0.5595357",
"0.5510343",
"0.54987",
"0.5382354",
"0.52583814",
"0.5234275",
"0.52052534",
"0.51916015",
"0.51848376",
"0.51822424",
"0.5158139",
"0.51509154",
"0.5115389",
"0.51124156",
"0.5095959",
"0.50951517",
"0.5077985",
"0.507127",
"0.50441366",
"0.5039773",
"0.5038571",
"0.5036352",
"0.50259167",
"0.50066936",
"0.50047266",
"0.49985155",
"0.49966294"
]
| 0.68908155 | 0 |
calculate the loop number on each core | def _cal_core(tik_instance, total_core_loop_num, num_core, core_number):
core_loop = tik_instance.Scalar("uint64")
sum_core = tik_instance.Scalar("uint64")
with tik_instance.if_scope(num_core < total_core_loop_num % MAX_CORE_NUM):
core_loop.set_as((total_core_loop_num + core_number - 1) //
core_number)
sum_core.set_as(core_loop * num_core)
with tik_instance.else_scope():
core_loop.set_as(total_core_loop_num // core_number)
sum_core.set_as((core_loop + 1) *
(total_core_loop_num % MAX_CORE_NUM) +
core_loop *
(num_core - total_core_loop_num % MAX_CORE_NUM))
return core_loop, sum_core | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_total_n_cpu(self) -> int:",
"def _cal_core_loop_python(num_data_one_loop, core_loop, ub_ori):\n align_loop = ub_ori // num_data_one_loop\n remainder = core_loop % align_loop\n\n if align_loop > core_loop:\n align_loop = core_loop\n remainder = 0\n\n return align_loop, remainder",
"def get_ncpu():\n from multiprocessing import cpu_count\n return cpu_count()",
"def number_of_workers():\n return (cpu_count() * 2) + 1",
"def numcpu () :\n import multiprocessing\n return multiprocessing.cpu_count()",
"def number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1",
"def _cal_core_loop(tik_instance, num_data_one_loop, core_loop, ub_ori):\n align_loop = tik_instance.Scalar(\"uint64\")\n align_loop.set_as((ub_ori + num_data_one_loop - 1) // num_data_one_loop)\n with tik_instance.if_scope((align_loop - 1) * core_loop *\n num_data_one_loop >= ub_ori):\n align_loop.set_as(align_loop - 1)\n\n remainder = tik_instance.Scalar(\"uint64\")\n remainder.set_as(core_loop % align_loop)\n with tik_instance.if_scope(remainder == 0):\n remainder.set_as(align_loop)\n\n return align_loop, remainder",
"def ncore(self):",
"def cpu_count_cores():\n return cext.cpu_count_cores()",
"def threads_per_core(self) -> int:\n return pulumi.get(self, \"threads_per_core\")",
"def _set_loop(tik_instance, num_core, max_core, total_dim):\n core_loop = tik_instance.Scalar(\"uint64\")\n\n with tik_instance.if_scope(num_core < total_dim % AICORE_NUM):\n core_loop.set_as(_ceil_div(total_dim, max_core))\n with tik_instance.else_scope():\n core_loop.set_as(total_dim // max_core)\n\n return core_loop",
"def core_num(self):\n try:\n self.logger.info('正在计算简单图的核数值 ...')\n return self.order_dict(nx.core_number(self.G), index=1)\n except Exception as e:\n self.logger.error(\"计算失败,原因:{0}\".format(e))\n return {}",
"def threads_per_core(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"threads_per_core\")",
"def cpu_count():\n num_available_cores = multiprocessing.cpu_count()\n return num_available_cores",
"def compute_cores(config):\n cores = config.getint('General','cores')\n if cores > mp.cpu_count():\n cores = mp.cpu_count()\n return cores",
"def cores_per_node(self):\n return self.cores_per_socket * self.sockets_per_node",
"def _cal_core_loop_python_one(num_data_one_loop, core_loop, ub_ori):\n align_loop = ub_ori // num_data_one_loop\n\n if align_loop * num_data_one_loop > ub_ori:\n align_loop = align_loop - 1\n\n remainder = core_loop % align_loop\n\n return align_loop, remainder",
"def get_coreids(self):\n return range(0, self.get_ncores()) # default behaviour for x86",
"def num_processes(self, new_value):",
"def cpu_count():\r\n if mp is None:\r\n return 1\r\n return mp.cpu_count()",
"def cores_per_socket(self):\n return int(self.num_cores_per_socket) # type: ignore",
"def cores(self):\n return int(self.get('cores'))",
"def concurrency(self):\n return multiprocessing.cpu_count()",
"def getThreads():\r\n return multiprocessing.cpu_count()",
"def num_processes():\n return 1",
"def _get_threads():\n if sys.platform == 'win32':\n # return (int)(os.environ['NUMBER_OF_PROCESSORS'])\n return 0 # save trouble, do not use multiprocessing on windows\n else:\n return (int)(os.popen('grep -c cores /proc/cpuinfo').read())",
"def ncpu ( events ) :\n #\n n_cores = numcpu() \n if n_cores <= 1 : return ROOT.RooFit.NumCPU ( 1 ) ## fake!!! \n #\n n = events // _nemax\n if n <= 1 : return ROOT.RooFit.NumCPU ( 1 ) ## fake!!! \n #\n num = min ( n , n_cores , _ncmax )\n if not _ncpus : _ncpus.append ( num ) \n #\n return ROOT.RooFit.NumCPU ( num )",
"def core_number(G):\n if nx.number_of_selfloops(G) > 0:\n msg = (\n \"Input graph has self loops which is not permitted; \"\n \"Consider using G.remove_edges_from(nx.selfloop_edges(G)).\"\n )\n raise NetworkXError(msg)\n degrees = dict(G.degree())\n # Sort nodes by degree.\n nodes = sorted(degrees, key=degrees.get)\n bin_boundaries = [0]\n curr_degree = 0\n for i, v in enumerate(nodes):\n if degrees[v] > curr_degree:\n bin_boundaries.extend([i] * (degrees[v] - curr_degree))\n curr_degree = degrees[v]\n node_pos = {v: pos for pos, v in enumerate(nodes)}\n # The initial guess for the core number of a node is its degree.\n core = degrees\n nbrs = {v: list(nx.all_neighbors(G, v)) for v in G}\n for v in nodes:\n for u in nbrs[v]:\n if core[u] > core[v]:\n nbrs[u].remove(v)\n pos = node_pos[u]\n bin_start = bin_boundaries[core[u]]\n node_pos[u] = bin_start\n node_pos[nodes[bin_start]] = pos\n nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start]\n bin_boundaries[core[u]] += 1\n core[u] -= 1\n return core",
"def update_cores(self):\n num_cores = 0\n for job in self.job_list:\n num_cores += int(job.get_core_info())\n self.cores_used = num_cores\n return",
"def cpu_count(only_physical_cores=False):\n if mp is None:\n return 1\n\n return loky.cpu_count(only_physical_cores=only_physical_cores)"
]
| [
"0.68350554",
"0.6819639",
"0.6797855",
"0.67562383",
"0.67468655",
"0.6727995",
"0.6689224",
"0.65811265",
"0.6566272",
"0.65277976",
"0.6521367",
"0.6520759",
"0.6491461",
"0.6433719",
"0.6419304",
"0.64132607",
"0.6391651",
"0.63396746",
"0.6273546",
"0.6247827",
"0.61350226",
"0.6134889",
"0.6113574",
"0.6100786",
"0.6088433",
"0.60840696",
"0.6076384",
"0.60623854",
"0.60462284",
"0.6042713"
]
| 0.7611465 | 0 |
calculate the number of loops and remainder on each core and return python variable | def _cal_core_loop_python(num_data_one_loop, core_loop, ub_ori):
align_loop = ub_ori // num_data_one_loop
remainder = core_loop % align_loop
if align_loop > core_loop:
align_loop = core_loop
remainder = 0
return align_loop, remainder | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _cal_core(tik_instance, total_core_loop_num, num_core, core_number):\n core_loop = tik_instance.Scalar(\"uint64\")\n sum_core = tik_instance.Scalar(\"uint64\")\n\n with tik_instance.if_scope(num_core < total_core_loop_num % MAX_CORE_NUM):\n core_loop.set_as((total_core_loop_num + core_number - 1) //\n core_number)\n sum_core.set_as(core_loop * num_core)\n\n with tik_instance.else_scope():\n core_loop.set_as(total_core_loop_num // core_number)\n sum_core.set_as((core_loop + 1) *\n (total_core_loop_num % MAX_CORE_NUM) +\n core_loop *\n (num_core - total_core_loop_num % MAX_CORE_NUM))\n\n return core_loop, sum_core",
"def _cal_core_loop(tik_instance, num_data_one_loop, core_loop, ub_ori):\n align_loop = tik_instance.Scalar(\"uint64\")\n align_loop.set_as((ub_ori + num_data_one_loop - 1) // num_data_one_loop)\n with tik_instance.if_scope((align_loop - 1) * core_loop *\n num_data_one_loop >= ub_ori):\n align_loop.set_as(align_loop - 1)\n\n remainder = tik_instance.Scalar(\"uint64\")\n remainder.set_as(core_loop % align_loop)\n with tik_instance.if_scope(remainder == 0):\n remainder.set_as(align_loop)\n\n return align_loop, remainder",
"def _cal_core_loop_python_one(num_data_one_loop, core_loop, ub_ori):\n align_loop = ub_ori // num_data_one_loop\n\n if align_loop * num_data_one_loop > ub_ori:\n align_loop = align_loop - 1\n\n remainder = core_loop % align_loop\n\n return align_loop, remainder",
"def get_total_n_cpu(self) -> int:",
"def _set_loop(tik_instance, num_core, max_core, total_dim):\n core_loop = tik_instance.Scalar(\"uint64\")\n\n with tik_instance.if_scope(num_core < total_dim % AICORE_NUM):\n core_loop.set_as(_ceil_div(total_dim, max_core))\n with tik_instance.else_scope():\n core_loop.set_as(total_dim // max_core)\n\n return core_loop",
"def get_ncpu():\n from multiprocessing import cpu_count\n return cpu_count()",
"def numcpu () :\n import multiprocessing\n return multiprocessing.cpu_count()",
"def duet(lines):\n regs = defaultdict(int)\n idx = 0\n mul_count = 0\n while idx < len(lines):\n regs, idx, mul_flag = run_cmd(lines[idx], regs, idx)\n mul_count += int(mul_flag)\n idx += 1\n\n return mul_count",
"def number_of_iterations(self) -> int:\n pass",
"def number_of_workers():\n return (cpu_count() * 2) + 1",
"def number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1",
"def cpu_count_cores():\n return cext.cpu_count_cores()",
"def num_processes(self, new_value):",
"def ncore(self):",
"def parallel_count_calculate_func(cls, parallel_count):\n if parallel_count == 0:\n result = 1\n\n else:\n count = float(0.0)\n\n for number in range(int(parallel_count)):\n count += float(1 / (float(number) + 1))\n\n result = pow(count, (-1))\n\n return result",
"def COUNTER_TOTAL():\n return 3",
"def cpu_count():\n num_available_cores = multiprocessing.cpu_count()\n return num_available_cores",
"def cpu_count_logical():\n return cext.cpu_count_logical()",
"def calc(self):\n num = 22\n while not self.divisible(num):\n # we know that only even numbers are divisible by 2, so\n # we only inspect even numbers.\n num = num + 2\n if num % 10000:\n print(str(num), end='\\r')\n\n return num",
"def n_photon_counting_div(self):\n inti = ct.c_ulong()\n self.lib.GetNumberPhotonCountingDivisions(ct.pointer(inti))\n return inti.value",
"def counter(): # Local function\n nonlocal count\n if count < n:\n count += 1\n return count",
"def test_eval_8(self):\n maxcycles = collatz_eval(5000, 15000)\n self.assertEqual(maxcycles, 276)",
"def overall_reduction(self):\n return 84",
"def compute_cores(config):\n cores = config.getint('General','cores')\n if cores > mp.cpu_count():\n cores = mp.cpu_count()\n return cores",
"def _calculateIterations(self):\n #iterations = self.nb_images/self.batchsize\n imgs = self.protofile.nb_test()\n batch = self.protofile.batch_test()\n iterations = imgs/batch\n if imgs % batch != 0:\n iterations += 1\n return iterations",
"def divmod_node(self, mpi_procs, omp_threads):\n return divmod(mpi_procs * omp_threads, self.cores_per_node)",
"def concurrency(self):\n return multiprocessing.cpu_count()",
"def cpu_count():\r\n if mp is None:\r\n return 1\r\n return mp.cpu_count()",
"def get_cpu_count():\n\n # #Check nproc. I have found it respecting the visible CPUs in SLURM:\n # try:\n # m = subprocess.run(['nproc'], stdout=subprocess.PIPE)\n # if m:\n # res = int(m.stdout.decode('ascii').replace(\"\\n\", \"\"))\n # if res > 0:\n # return res\n # except:\n # pass\n \n\n # cpuset\n # cpuset may restrict the number of *available* processors\n try:\n m = re.search(r'(?m)^Cpus_allowed:\\s*(.*)$',\n open('/proc/self/status').read())\n if m:\n res = bin(int(m.group(1).replace(',', ''), 16)).count('1')\n if res > 0:\n return res\n except IOError:\n pass\n\n # Python 2.6+\n try:\n import multiprocessing\n return multiprocessing.cpu_count()\n except (ImportError, NotImplementedError):\n pass\n\n # https://github.com/giampaolo/psutil\n try:\n import psutil\n return psutil.cpu_count() # psutil.NUM_CPUS on old versions\n except (ImportError, AttributeError):\n pass\n\n # POSIX\n try:\n res = int(os.sysconf('SC_NPROCESSORS_ONLN'))\n\n if res > 0:\n return res\n except (AttributeError, ValueError):\n pass\n\n # Windows\n try:\n res = int(os.environ['NUMBER_OF_PROCESSORS'])\n\n if res > 0:\n return res\n except (KeyError, ValueError):\n pass\n\n # jython\n try:\n from java.lang import Runtime\n runtime = Runtime.getRuntime()\n res = runtime.availableProcessors()\n if res > 0:\n return res\n except ImportError:\n pass\n\n # BSD\n try:\n sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'],\n stdout=subprocess.PIPE)\n scStdout = sysctl.communicate()[0]\n res = int(scStdout)\n\n if res > 0:\n return res\n except (OSError, ValueError):\n pass\n\n # Linux\n try:\n res = open('/proc/cpuinfo').read().count('processor\\t:')\n\n if res > 0:\n return res\n except IOError:\n pass\n\n # Solaris\n try:\n pseudoDevices = os.listdir('/devices/pseudo/')\n res = 0\n for pd in pseudoDevices:\n if re.match(r'^cpuid@[0-9]+$', pd):\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n # Other UNIXes (heuristic)\n try:\n try:\n dmesg = open('/var/run/dmesg.boot').read()\n except IOError:\n dmesgProcess = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE)\n dmesg = dmesgProcess.communicate()[0]\n\n res = 0\n while '\\ncpu' + str(res) + ':' in dmesg:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n raise Exception('Can not determine number of CPUs on this system')",
"def detect_loop(memory: Memory) -> int:\n arrangements_seen = set()\n balancer_rounds = 0\n\n while memory not in arrangements_seen:\n arrangements_seen.add(memory)\n memory = memory.balance()\n balancer_rounds += 1\n\n return balancer_rounds, memory"
]
| [
"0.72697914",
"0.6879744",
"0.6682081",
"0.65112865",
"0.64037377",
"0.63325685",
"0.6216641",
"0.61193895",
"0.61",
"0.60939896",
"0.60520416",
"0.60310453",
"0.60280025",
"0.59839344",
"0.59617555",
"0.58904606",
"0.58621055",
"0.58573747",
"0.5834444",
"0.5831784",
"0.58117616",
"0.5809556",
"0.5803029",
"0.57945246",
"0.57845587",
"0.5780646",
"0.57546836",
"0.5749724",
"0.57420945",
"0.5740882"
]
| 0.71695155 | 1 |
vector_dup zeros when dup_number is python variable | def vector_dup_zero(self, tik_instance, ub_trans, dup_number, offset):
scalar_zero = tik_instance.Scalar(dtype="float16", init_value=0.0)
repeat_number = dup_number // MAX_MASK
tail = dup_number % MAX_MASK
with tik_instance.for_range(0, repeat_number // MAX_REPEATS) as \
num_repeat_loop:
tik_instance.vector_dup(MAX_MASK,
ub_trans[MAX_MASK * MAX_REPEATS *
num_repeat_loop + offset],
scalar_zero,
MAX_REPEATS,
self.cast_num_byte // 2,
MAX_MASK // self.cast_num_data)
if repeat_number % MAX_REPEATS != 0:
tik_instance.vector_dup(MAX_MASK,
ub_trans[repeat_number // MAX_REPEATS *
MAX_MASK * MAX_REPEATS + offset],
scalar_zero,
repeat_number % MAX_REPEATS,
self.cast_num_byte // 2,
MAX_MASK // self.cast_num_data)
if tail != 0:
tik_instance.vector_dup(tail,
ub_trans[MAX_MASK * repeat_number +
offset],
scalar_zero,
1,
self.cast_num_byte // 2,
MAX_MASK // self.cast_num_data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vector_dump_set(ibuilder, scalar, block_num, buf):\n vec_dtype = buf.dtype\n if vec_dtype in [\"float32\", \"int32\"]:\n vec_compute_nums = VEC_NUMS_HALF\n else:\n vec_compute_nums = VEC_NUMS\n\n repeat_times = int(block_num // VEC_BLOCK_NUMS)\n remain_len = int(block_num % VEC_BLOCK_NUMS)\n init_times = int(repeat_times // UINT8_MAX)\n init_remain = int(repeat_times % UINT8_MAX)\n\n with ibuilder.if_scope(repeat_times != 0):\n if init_times != 0:\n with ibuilder.for_range(0, init_times) as rch:\n with ibuilder.new_scope():\n reset_mask_insn(ibuilder, buf.dtype, bits=vec_compute_nums)\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 2)\n ibuilder.emit(tvm.call_extern(buf.dtype, \"vector_dup\", \\\n buf.access_ptr('w', offset=rch * UINT8_MAX\n * vec_compute_nums), \\\n tvm.const(scalar, dtype=vec_dtype), \\\n 255, 1, 1, 8, 8))\n if init_remain != 0:\n with ibuilder.new_scope():\n reset_mask_insn(ibuilder, buf.dtype, bits=vec_compute_nums)\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 2)\n ibuilder.emit(tvm.call_extern(buf.dtype, \"vector_dup\", \\\n buf.access_ptr('w', offset=init_times * UINT8_MAX\n * vec_compute_nums), \\\n tvm.const(scalar, dtype=vec_dtype), \\\n init_remain, 1, 1, 8, 8))\n\n with ibuilder.if_scope(remain_len != 0):\n with ibuilder.new_scope():\n mask_len = remain_len * (vec_compute_nums // VEC_BLOCK_NUMS)\n reset_mask_insn(ibuilder, buf.dtype, bits=mask_len)\n ibuilder.scope_attr(cce.CCE_AXIS, \"coproc_scope\", 2)\n ibuilder.emit(tvm.call_extern(buf.dtype, \"vector_dup\", \\\n buf.access_ptr('w', offset=repeat_times\n * vec_compute_nums), \\\n tvm.const(scalar, dtype=vec_dtype), 1, 1, 1, 8, 8))",
"def clone_zero(self):\n return VectorHeat1D(self.size)",
"def clone_zero(self):",
"def duplicateZeros(self, arr) -> None:\n tmp = list()\n for i,v in enumerate(arr):\n if v==0:\n tmp.append(0)\n tmp.append(0)\n else:\n tmp.append(v)\n\n arr[i] = tmp[i]",
"def nonzero(self):\n\t\t_x = self.__seqvector.vec.nonzero()[1]\n\t\t_x = list(set(_x)) # uniquify them\n\t\t_x.sort() # sort positions\n\t\treturn _x",
"def add_unique(self, *args):\n return _ida_hexrays.qvector_lvar_t_add_unique(self, *args)",
"def _clean_ubuf(tik_inst, src, src_offset, dup_len):\n\n if src.dtype.lower() == \"float16\":\n dtype_factor = 2\n elif src.dtype.lower() == \"float32\":\n dtype_factor = 1\n batch_size = 64\n\n if dup_len > 0:\n repeat = dup_len // (batch_size * dtype_factor)\n left_elem = dup_len % (batch_size * dtype_factor)\n repeat_loop = repeat // REPEAT_LIMIT\n repeat_left = repeat % REPEAT_LIMIT\n dup_value = float(0)\n\n if repeat_loop > 0:\n with tik_inst.for_range(0, repeat_loop) as rpt_idx:\n tik_inst.vector_dup(MASK_128,\n src[src_offset + rpt_idx *\n REPEAT_LIMIT *\n batch_size * dtype_factor],\n dup_value, REPEAT_LIMIT, 1, 8)\n\n if repeat_left > 0:\n tik_inst.vector_dup(MASK_128,\n src[src_offset + repeat_loop *\n REPEAT_LIMIT *\n batch_size * dtype_factor],\n dup_value, repeat_left, 1, 8)\n\n if left_elem > 0:\n tik_inst.vector_dup(left_elem,\n src[src_offset + repeat *\n batch_size * dtype_factor],\n dup_value, 1, 1, 8)",
"def clone(self):\n v = self.mV[:]\n return Vector.fromSequence(v)",
"def cdup (self):\r\n pass",
"def duplicateZeros(self, arr: List[int]) -> None:\n zeros = arr.count(0)\n for i in reversed(range(len(arr))): \n if i + zeros < len(arr): \n arr[i+zeros] = arr[i]\n if arr[i] == 0: \n zeros -= 1\n if i + zeros < len(arr): \n arr[i+zeros] = arr[i]",
"def unique_id(self):\n return _add_vector_swig.add_vector_2_cpp_sptr_unique_id(self)",
"def add_unique(self, *args):\n return _ida_hexrays.qvector_ccase_t_add_unique(self, *args)",
"def hash_vector(self,v):\r\n # you will need to use self.functions for this method\r\n x = np.array([f(v[0]) for f in self.functions])\r\n #print (x)\r\n return x\r\n raise NotImplementedError",
"def test_handle_duplicates(self):\n vector_updates = np.array([[0.5, 0.5], [0.1, 0.2], [0.3, -0.2]])\n node_indices = [0, 1, 0]\n PoincareModel._handle_duplicates(vector_updates, node_indices)\n vector_updates_expected = np.array([[0.0, 0.0], [0.1, 0.2], [0.8, 0.3]])\n self.assertTrue((vector_updates == vector_updates_expected).all())",
"def duplicateZeros(self, arr: List[int]) -> None:\n last_index = -1\n for i, num in enumerate(arr):\n if i > last_index and num == 0:\n last_index = i + 1\n arr.insert(i, 0)\n arr.pop()",
"def Copy(self) -> BaseVector:",
"def CreateVector(self) -> BaseVector:",
"def duplicateZeros(self, arr: List[int]) -> None:\n i = 0\n j = 0\n n = len(arr)\n while i < n:\n if arr[i] == 0:\n j += 1\n i += 1\n j += 1\n i -= 1\n j -= 1\n while i >= 0:\n if j < n:\n arr[j] = arr[i]\n if arr[i] == 0:\n j -= 1\n if j < n:\n arr[j] = 0\n i -= 1\n j -= 1",
"def set_zero_vector(self):\n self.vector = np.zeros(self.dimension, dtype = float)",
"def counts_to_vector(counts):\n\n return np.hstack([np.repeat(idx, count) for idx, count in enumerate(counts)])",
"def duplicateZeros(self, arr: List[int]) -> None:\n i = 0\n for num in list(arr):\n if i >= len(arr): break\n arr[i] = num\n if not num:\n i += 1\n if i < len(arr):\n arr[i] = num\n i += 1",
"def duplicateZeros(self, arr: List[int]) -> None:\n i = 0\n j = len(arr)\n while i < j:\n if arr[i] == 0:\n arr.insert(i,0)\n arr.pop()\n i += 2\n else:\n i += 1",
"def prepare_solution_vector(self, number_density):\n atomic_numbers = number_density.index\n solution_array = []\n for atomic_number in atomic_numbers:\n solution_array.append(\n self.solution_vector_block(\n atomic_number, number_density.loc[atomic_number]\n )\n )\n solution_vector = np.hstack(solution_array + [0])\n return solution_vector",
"def test_vector_projection_on_zero_vector():\n random_state = np.random.RandomState(23)\n for _ in range(5):\n a = pr.random_vector(random_state, 3)\n a_on_b = pr.vector_projection(a, np.zeros(3))\n assert_array_almost_equal(a_on_b, np.zeros(3))",
"def duplicateZeros(self, arr: List[int]) -> None:\n x = 0\n while x < len(arr):\n if arr[x] == 0:\n arr.insert(x, 0)\n arr.pop(-1)\n x+=1\n x += 1",
"def duplicateZeros(self, arr: List[int]) -> None:\n zero = 0\n i = 0\n while i + zero < len(arr):\n if arr[i] == 0:\n zero += 1\n i += 1\n \n if i + zero > len(arr):\n arr[-1] = 0\n i -= 1\n zero -= 1\n \n i -= 1\n j = i + zero\n while j >= 0:\n if arr[i]:\n arr[j] = arr[i]\n else:\n arr[j] = 0\n j -= 1\n arr[j] = 0\n j -= 1\n i -= 1",
"def __init__(self, _v):\n\t\tself.v = copy.deepcopy(_v)\n\t\tself.n = len(self.v)",
"def vec_unit(n, i):\n return jnp.zeros(n).at[i].set(1)",
"def add_unique(self, *args):\n return _ida_hexrays.cinsnptrvec_t_add_unique(self, *args)",
"def dense_vector (n, init_val=0.0):\n return [init_val] * n"
]
| [
"0.5970925",
"0.5745958",
"0.5735644",
"0.5696948",
"0.56464106",
"0.549432",
"0.5441594",
"0.5392188",
"0.53915316",
"0.53656274",
"0.5356167",
"0.5326084",
"0.5323357",
"0.5291507",
"0.5251519",
"0.5247529",
"0.5225927",
"0.5218693",
"0.52077883",
"0.52020216",
"0.5196561",
"0.5183152",
"0.51781815",
"0.51581603",
"0.51564944",
"0.51520187",
"0.5146461",
"0.51429003",
"0.51414794",
"0.51344836"
]
| 0.7371926 | 0 |
Convert a dataframe of indicators into an inverse covariance matrix index | def invcov_index(indicators):
df = indicators.copy()
df = (df-df.mean())/df.std()
I = np.ones(df.shape[1])
E = inv(df.cov())
s1 = I.dot(E).dot(I.T)
s2 = I.dot(E).dot(df.T)
try:
int(s1)
S = s2/s1
except TypeError:
S = inv(s1).dot(s2)
S = pd.Series(S,index=indicators.index)
return S | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def invert(self):\r\n return pd.DataFrame(\r\n np.linalg.pinv(self.data, hermitian=True),\r\n index=self.data.index,\r\n columns=self.data.columns,\r\n ) # do not return CategoryCov because variance can be negative\r",
"def information_matrix(self):\n return self._cov.inv()",
"def inverse_transform(self, df):\n return df",
"def build_inverse_covariance(self):\n return np.linalg.inv(self.cov)",
"def incidence_matrix(self):\n try: \n return self._incidence_matrix\n except AttributeError:\n self._incidence_matrix = matrix(ZZ, len(self.Vrepresentation()), \n len(self.Hrepresentation()), 0)\n for V in self.Vrep_generator():\n for H in self.Hrep_generator():\n if self._is_zero(H*V):\n self._incidence_matrix[V.index(),H.index()] = 1\n\n return self._incidence_matrix",
"def inverseN(self):\r\n result = Matrix(self.rows, self.columns)\r\n for r in range(self.rows):\r\n for c in range(self.columns):\r\n result.mat[r][c] = self.cofactor(r, c)\r\n result.out()\r\n result = result.transpose()\r\n det = self.determinant()\r\n print(\"1/(\" + str(det) + \")\")\r\n result.out()\r\n return result",
"def get_data_matrix(df):\n return df[[\"Open\", \"High\", 'Low', \"Close\"]].to_numpy()",
"def inverse_transform(self, df, trans_method: str = \"forecast\"):\n\n if self.discretization in [\n 'sklearn-quantile',\n 'sklearn-uniform',\n 'sklearn-kmeans',\n ]:\n df_index = df.index\n df_colnames = df.columns9\n df = df.clip(upper=self.bin_max, lower=self.bin_min, axis=1)\n df = df.astype(int).clip(lower=0, upper=(self.n_bins - 1))\n df = pd.DataFrame(self.kbins_discretizer.inverse_transform(df))\n df.index = df_index\n df.columns = df_colnames\n return df",
"def _inverse_covariance(spectral_array):\n hsi_img = spectral_array.array_data\n\n n_lines, n_samples, n_band = hsi_img.shape\n n_pixels = n_lines * n_samples\n hsi_data = np.reshape(hsi_img, (n_pixels, n_band), order='F').T\n inverse_covariance = np.linalg.pinv(np.cov(hsi_data.T, rowvar=False))\n\n return inverse_covariance",
"def getInverseMatrix(self) -> CMatrix4:\n ...",
"def inverseTransformationMatrix(self,index=None):\n if self.method == 'pca':\n if index is not None:\n coordinateIndex = distribution1D.vectori_cxx(len(index))\n for i in range(len(index)):\n coordinateIndex[i] = index[i]\n matrixDim = self._distribution.getInverseTransformationMatrixDimensions(coordinateIndex)\n inverseTransformation = self._distribution.getInverseTransformationMatrix(coordinateIndex)\n else:\n matrixDim = self._distribution.getInverseTransformationMatrixDimensions()\n inverseTransformation = self._distribution.getInverseTransformationMatrix()\n row = matrixDim[0]\n column = matrixDim[1]\n # convert 1D vector to 2D array\n L = np.atleast_1d(inverseTransformation).reshape(row,column)\n else:\n self.raiseAnError(NotImplementedError,' inverse transformationMatrix is not yet implemented for ' + self.method + ' method')\n return L",
"def inverse(self):\n # find the determinant of the matrix\n determinant = self.determinant()\n # find the matrix of minors of the matrix\n matrix_of_minors = self.matrix_of_minors()\n # find the cofactor of the matrix of minors\n cofactor_matrix = self.cofactor_matrix(matrix_of_minors)\n # find the transpose of the cofactor matrix\n transpose_cofactor_matrix = self.transpose(cofactor_matrix)\n # find the adjugate (inverse) matrix\n inverse_matrix = self.adjugate_matrix(determinant, transpose_cofactor_matrix)\n\n return inverse_matrix",
"def get_cov_matrix_states(self):\n cov = numpy.diag(numpy.zeros(self.get_num_variables()))\n i = 0\n for v in self.variables:\n cov[i,i] = v.get_covariance()\n i += 1\n return cov",
"def build_vcov_inverse(vcov):\n ivcov = numpy.linalg.inv(vcov)\n return ivcov",
"def _get_vif_table(self):\n\n vif_data = [['']]\n\n exog = self._model.exog\n\n # for variable in self._explanatory_variables:\n for exog_idx in range(1, exog.shape[1]):\n vif = variance_inflation_factor(exog, exog_idx)\n\n vif_data.append([self._FLOAT_STRING_FORMAT.format(vif)])\n\n vif_table = SimpleTable(vif_data, headers=['VIF'])\n\n return vif_table",
"def inverse_transform(self, df):\n invtrans_df = df.copy()\n\n invtrans_df = invtrans_df.where(df <= 0, self.upper_mean * df, axis=1)\n invtrans_df = invtrans_df.where(\n df >= 0, (self.lower_mean * df).abs() * -1, axis=1\n )\n invtrans_df = invtrans_df + self.df_med\n invtrans_df = invtrans_df.where(df != 0, self.df_med, axis=1)\n return invtrans_df",
"def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi",
"def nondimensionalize(df, cols=[], series=None, inverse=False):\n\n if not cols:\n temp_df = df\n else:\n temp_df = df[cols]\n\n if type(series) is pd.Series:\n pass\n elif type(series) is str:\n series = df[series]\n elif series is None:\n series = df['Close']\n\n\n result = temp_df.divide(series, axis=0)\n\n if inverse:\n result = 1 / result\n result.columns = [series.name + ' / ' + name for name in temp_df.columns]\n else:\n result.columns = [name + ' / ' + series.name for name in temp_df.columns]\n\n return out(SETTINGS, df, result)",
"def inverse_transform(self, df, cols_to_inverse=None):\n\n # drop p_id column\n profile_ids = None\n if isinstance(df, pd.DataFrame):\n if self.PROFILE_ID_COL in df:\n profile_ids = df.pop(self.PROFILE_ID_COL)\n arr = df.to_numpy()\n else:\n arr = df\n if profile_ids is None:\n profile_ids = self.tst_df.loc[:, self.PROFILE_ID_COL]\n\n if len(arr.shape) == 1: # make at least 2 dim\n arr = arr.reshape([-1, 1])\n\n if cols_to_inverse is None:\n # assumption: df contains targets only\n assert arr.shape[1] == len(self.y_cols), \\\n f'target mismatch {arr.shape[1]} != {len(self.y_cols)}'\n cols_to_inverse = self.y_cols\n\n # inverse scaling\n if isinstance(self.standardize, bool):\n if self.standardize:\n if cols_to_inverse == self.y_cols and len(self.y_cols) == 4:\n # extra scaling of targets\n arr *= self.target_stds[2] / self.target_stds\n\n orig_scaling_cols = [x for x in self.df.columns if '_bin_' not in\n x and x != self.PROFILE_ID_COL\n and x != 'time']\n inversed = pd.DataFrame(np.zeros((len(df), len(orig_scaling_cols))),\n columns=orig_scaling_cols)\n inversed.loc[:, cols_to_inverse] = arr\n inversed.loc[:, orig_scaling_cols] = \\\n self.scaler.inverse_transform(inversed)\n inversed = inversed.loc[:, cols_to_inverse]\n else:\n if isinstance(df, pd.DataFrame):\n inversed = df\n else:\n inversed = pd.DataFrame(arr, columns=cols_to_inverse)\n else:\n assert isinstance(self.standardize, str), \\\n f'{self.standardize} is neither bool nor str'\n if self.standardize == 'simple':\n temperature_cols = [c for c in cfg.data_cfg['temperature_cols']\n if c in self.x_cols + self.y_cols]\n n_left_pad = len(temperature_cols) - len(self.y_cols)\n inversed = self.scaler_temps.inverse_transform(\n np.hstack([np.zeros((len(arr), n_left_pad)), arr])\n )[:, -len(self.y_cols):]\n inversed = pd.DataFrame(inversed, columns=cols_to_inverse)\n else:\n raise ValueError(f'{self.standardize} not allowed.')\n\n if self.estimate_diffs:\n assert len(inversed) == len(profile_ids), \\\n f'length mismatch {len(inversed)} != {len(profile_ids)}'\n\n inversed[self.PROFILE_ID_COL] = profile_ids\n\n def invert_shift(p_df, p_init_vals):\n p_df = p_df.drop([self.PROFILE_ID_COL], axis=1)\n p_df.iloc[0, :] = p_init_vals\n return p_df.cumsum()\n\n inversed = pd.concat([invert_shift(_df, self.initial_y.loc[p_id, :])\n for p_id, _df in\n inversed.groupby(self.PROFILE_ID_COL)])\n return inversed",
"def inverse_transform(self, df, trans_method: str = \"forecast\"):\n return df",
"def inverse_transform(self, df, trans_method: str = \"forecast\"):\n return df",
"def inverse_transform(self, df, trans_method: str = \"forecast\"):\n return df",
"def covariate_to_index(self):\n covariate_df = self.dismod_file.covariate\n return dict(covariate_df[[\"covariate_name\", \"covariate_id\"]].to_records(index=False))",
"def inverse_transform(self, df, trans_method: str = \"forecast\"):\n df = df * self.center\n return df",
"def build_cov_dataset(self):\n return self.ini_eeg_f[:, :, self.mask_tri].copy()",
"def iat_df(self, df):\n result = self.iat(**df).reset_coords(drop=True).to_dataframe()\n if isinstance(df, pd.DataFrame):\n result.index = df.index\n return result",
"def inverse_transform(self, df, trans_method: str = \"forecast\"):\n if self.on_inverse:\n df = df.round(decimals=self.decimals)\n if self.force_int:\n df = df.astype(int)\n return df",
"def inverse_transform(self, df):\n try:\n df = df.astype(float)\n except Exception:\n raise ValueError(\"Data Cannot Be Converted to Numeric Float\")\n X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values\n\n sin_df = pd.DataFrame()\n # make this faster\n for index, row in self.sin_params.iterrows():\n yy = pd.DataFrame(\n row['amp'] * np.sin(row['omega'] * X + row['phase']) + row['offset'],\n columns=[index],\n )\n sin_df = pd.concat([sin_df, yy], axis=1)\n df_index = df.index\n df = df.astype(float).reset_index(drop=True) + sin_df.reset_index(drop=True)\n df.index = df_index\n return df",
"def ssc.inverse (x_ij):\n\n Hij = xyzrph2matrix (x_ij)\n Rji = Hij[0:3, 0:3]\n tij = Hij[0:3,3]\n Rij = Rji.transpose ()\n tji = -Rij.dot (tij)\n Hji = zeros ((4,4))\n Hji[0:3,0:3] = Rij\n Hji[0:3,3] = tji\n Hji[3,3] = 1\n return matrix2xyzrph (Hji)",
"def invert(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square to invert\")\n\n A, operations = self.to_reduced_row_echelon()\n if not A.is_identity():\n return 0\n\n # If A was reduced to the identity matrix, then the same set of operations will take I to the inverse of A.\n # [A I] -> [I A^(-1)]\n\n I = IdentityMatrix(size = self.rows)\n for operation in operations:\n func = I.__getattribute__(operation[0])\n args = operation[1:]\n func(*args)\n\n return I"
]
| [
"0.6422175",
"0.5994541",
"0.59150004",
"0.58663636",
"0.5760582",
"0.5612461",
"0.55810237",
"0.5535483",
"0.55111533",
"0.54344183",
"0.54144865",
"0.5412415",
"0.54084176",
"0.53880584",
"0.538651",
"0.53669155",
"0.5360462",
"0.531783",
"0.5299906",
"0.5290913",
"0.5290913",
"0.5290913",
"0.52855587",
"0.52853465",
"0.5274589",
"0.52553076",
"0.52435",
"0.5205351",
"0.5173295",
"0.5160572"
]
| 0.7347716 | 0 |
Count the number of pairs with a given sum | def sum_pairs(arr: list, sum: int):
pair_count = 0
count_map = {}
for i in arr:
if i in count_map:
count_map[i] += 1
else:
count_map[i] = 1
for key, value in count_map.items():
if (sum - key) in count_map:
count1 = value
count2 = count_map[sum - key]
if count1 == count2 and count1 > 1:
pair_count += int(count1 * (count1 - 1) / 2)
else:
pair_count += count1 * count2
count_map[key] = 0
count_map[sum - key] = 0
return pair_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def numIdenticalPairs(self, nums: List[int]) -> int:\n #brute force\n # res = 0\n # for i in range(len(nums)):\n # for j in range(len(nums)):\n # if i >= j:\n # continue\n\n # if nums[i] == nums[j]:\n # res += 1\n # return res\n #with memoization\n res = 0\n memo = {}\n \n for n in nums:\n \n if n not in memo:\n memo[n] = 1\n \n else:\n # count number of pairs based on duplicate values\n if memo[n] == 1:\n res += 1\n else:\n res += memo[n]\n \n memo[n] += 1\n \n return res",
"def get_num_pairs(seq):\n n = len(seq)\n return int(n * (n-1)/2) # sum of arphmetic progression (n-1)...1",
"def get_total_identical_pairs(list_of_numbers):\n\n pairs = 0\n\n # Loop through the list of number\n for a in range(len(list_of_numbers)):\n\n # Start from after the first number of the possible pair\n for b in range(a + 1, len(list_of_numbers)):\n\n # Compare the first pair to next possible number pair\n if list_of_numbers[a] == list_of_numbers[b]:\n\n # The first number's index must be less than the second number's index\n if a < b:\n pairs += 1\n\n return pairs",
"def count_same(pairs):\n same_count = 0\n for x, y in pairs:\n if x == y:\n same_count = same_count + 1\n return same_count",
"def countPairs(arr):\n pairs = 0\n for i in range(len(arr) - 1):\n for j in range(i + 1, len(arr)):\n if arr[i] & arr[j] == 0: # log 0 = Infinity...avoid this problem\n pass\n elif int(math.log2(arr[i] & arr[j])) == math.log2(arr[i] & arr[j]):\n print('(', arr[i], '&', arr[j], ')', '=', arr[i] & arr[j],\n '=2^', int(math.log2(arr[i] & arr[j])))\n pairs += 1\n # print('Pairs:', pairs, '\\n')\n print(pairs)\n return pairs",
"def countTriplets1(arr, r):\n from collections import Counter\n arr_dict = Counter()\n ratio_range = []\n triplets = 0\n\n # Build the counter\n for x in arr:\n arr_dict[x] += 1\n\n # Build a list for easier iteration\n for key, value in arr_dict.items():\n ratio_range.append(tuple([key,value]))\n ratio_range.sort()\n \n for y in range(len(ratio_range)-2):\n firstvalue = ratio_range[y][1]\n secondvalue = ratio_range[y+1][1]\n thirdvalue = ratio_range[y+2][1]\n print(ratio_range, firstvalue, secondvalue,thirdvalue)\n\n summedvalue = (firstvalue + secondvalue + thirdvalue) - 3\n triplet_count = 2**summedvalue\n print(summedvalue, triplet_count)\n triplets += triplet_count\n\n return triplets, arr_dict, ratio_range",
"def number_of_pairs(self, state):\n\n pairs = 0\n for i in range(0,len(state),2):\n if state[i] == 1 and state[i+1] == 1:\n pairs += 1\n return pairs",
"def pair_sum(arr,num):\n\t# check if the number entered is much bigger than the elements of the list\n\tlist1 = sorted(arr,reverse=True)\n\tif ((list1[0] + list1[1]) < num):\n\t\treturn False\n\t# Check if the array entered has atleast 2 elements\n\tif (len(arr) < 2):\n\t\treturn False\n\t# Declare two sets : seen for elements not summing up to num\n\t# output is another set in which we push the elements which sum up to num\n\tseen = set()\n\toutput = set()\n\t\n\tfor i in arr:\n\t\ttarget = num - i\n\t\tif target not in seen:\n\t\t\tseen.add(i)\n\t\telse:\n\t\t\toutput.add((min(i,target),max(i,target)))\n\treturn list(output)",
"def count_square_sums(n):\n if n == 0: return 1\n total = 4*( sum(1 for i in divisors(n) if i % 4 == 1) \n - sum(1 for i in divisors(n) if i % 4 == 3) )\n ## Remove duplicate countings if n > 0\n ## Eight duplicates: (+/-a, +/-b) (+/-b, +/-a) \n ## Four duplicates: (0,+1), (0,-1), (+1,0), (-1,0)\n ## Four duplicates: (+/-1,+/-1)\n flg = 0\n if is_square(n): flg += 1\n if is_square(n/2) and (n % 2 == 0): flg += 1\n return (total + 4*flg)/8",
"def taskOfPairing(freq):\n n_pairs = list(map(lambda x: x // 2, freq))\n remainders = list(map(lambda x: x % 2, freq))\n print(freq)\n print(n_pairs)\n print(remainders)\n\n total = sum(n_pairs)\n print(total)\n i = 1\n while i < len(freq):\n print('i', i, remainders[i], remainders[i - 1])\n if remainders[i] + remainders[i - 1] == 2:\n print('total', total, 'i', i)\n total = total + 1\n i = i + 2\n print('newtotal', total, 'i', i)\n else:\n i += 1\n print(total)\n return total",
"def howmany_sequences(listOfTuples):\r\n #initialize number of pairs as 0\r\n pairs = 0\r\n #count pairs\r\n for n in listOfTuples:\r\n pairs += 1\r\n k = 1\r\n #find number of initial sequences \r\n while k*(k-1) != pairs*2:\r\n k += 1\r\n return(k)",
"def Hashtables__Triplets():\n # URL: https://www.hackerrank.com/challenges/count-triplets-1/problem\n ## Passes all tests\n # O(n) ish.\n # dae9ccff5aea4a8ca6e087a7c16bd70d Notability notes\n from collections import defaultdict\n from dataclasses import dataclass\n\n @dataclass\n class I:\n idx: int\n cnt: int\n\n\n def countTriplets(arr, r):\n d = defaultdict(list)\n prev_count = defaultdict(int) #\n triple_count = 0\n for i, v in enumerate(arr):\n prev = v / r # (!) Integer division can be wrong. 17 // 3 -> 5. This builds incorrect previous (5, 17)\n prev_prev = (prev / r, prev)\n\n if prev_prev in d:\n # cnt = sum([i.cnt for i in d[prev_prev]]) # Counting the whole chain can be O(n) ish. Tests 6,11 fail.\n cnt = prev_count[(prev / r, prev, \"sum\")] # Optimization, keep rolling sum. -> O(1)\n triple_count += cnt\n if prev in d:\n prev_c = len(d[prev]) # O(1)\n d[(prev, v)].append(I(i, prev_c))\n prev_count[(prev, v, \"sum\")] += prev_c # Keep rolling su.\n d[v].append(i)\n\n return triple_count\n\n _, r = [int(i) for i in input().split()]\n arr = [float(i) for i in input().split()]\n print(countTriplets(arr, r))\n\n #### wip entries\n # T (Submission 6) -> (integer devision issue.\n # 100000 3\n # 1 17 80 68 5 5 58 17 38 81 26 44 38 6 12 ...\n # expr: 2325652489\n # Act : 667065187 << wrong, under count.\n # ac2 : 19107507001 << wrong, over count. (integer devision issue.\n # ac3: 2325652489",
"def fast(pairs):\n # {{{\n teams = np.unique(pairs[:, 0])\n out = np.zeros((teams.size, 2))\n for i, team in enumerate(teams):\n num = pairs[pairs[:, 0] == team][:, 1].sum()\n out[i, 0], out[i, 1] = team, num\n return out\n # }}}",
"def countTriplets(arr, r):\n c_2, c_3 = Counter(), Counter()\n n_triplets = 0\n for e in arr:\n # print(f'arr: {arr}, e: {e}, c_3: {c_3}, c_2: {c_2}, n_triplets: {n_triplets}')\n if e in c_3:\n n_triplets += c_3[e]\n if e in c_2:\n c_3[e*r] += c_2[e]\n c_2[e*r] += 1\n return n_triplets",
"def get_sum_zero_pairs(numbers):\n numbers = set(numbers)\n numbers = list(numbers)\n pairs_that_add_to_zero = []\n\n for i, item in enumerate(numbers):\n if numbers[i] == len(numbers):\n break\n\n if numbers[i] == 0:\n pairs_that_add_to_zero.append([0, 0]) \n\n for j in range(i+1, len(numbers)):\n total_of_two_items = numbers[i] + numbers[j]\n if (total_of_two_items == 0):\n pairs_that_add_to_zero.append([numbers[i], numbers[j]]) \n\n return pairs_that_add_to_zero",
"def pair_sum(list1, x):\n\n for y in list1:\n for m in list1:\n if y+m==x:\n difference=abs(y-m)\n return difference",
"def count_pairs(idx, bit_up_a, bit_up_b, bit_up_c, mem):\n\n global a\n global b\n global c\n\n if idx == -1:\n return 1\n\n if (idx, bit_up_a, bit_up_b, bit_up_c) in mem:\n return mem[(idx, bit_up_a, bit_up_b, bit_up_c)]\n\n count = 0\n mask = 1 << idx\n\n for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:\n\n k = i ^ j\n\n if bit_up_a:\n if not (a & mask) and i == 1:\n continue\n elif (a & mask) and i == 0:\n next_a = 0\n else:\n next_a = 1\n else:\n next_a = 0\n\n if bit_up_b:\n if not (b & mask) and j == 1:\n continue\n elif (b & mask) and j == 0:\n next_b = 0\n else:\n next_b = 1\n else:\n next_b = 0\n\n if bit_up_c:\n if not (c & mask) and k == 1:\n continue\n elif (c & mask) and k == 0:\n next_c = 0\n else:\n next_c = 1\n else:\n next_c = 0\n\n count += count_pairs(idx - 1, next_a, next_b, next_c, mem)\n\n mem[(idx, bit_up_a, bit_up_b, bit_up_c)] = count\n return count",
"def countTriplets2(arr, r):\n from collections import Counter\n from math import factorial\n\n # If the ratio 'r' is 1 then this is a special case of combinations\n if r == 1:\n n = len(arr)\n r = 3\n return factorial(n)//(factorial(r)*factorial(n-r))\n\n arr_dict = Counter()\n ratio_range = []\n index = 0\n counter = 0\n triplets = 0\n \n # Build the counter\n for x in arr:\n arr_dict[x] += 1\n max_arr_dict = max(arr_dict)\n\n # With the 1 special case removed, there now cannot be triplets if there are not 3 items in the dict\n if len(arr_dict) < 3: return triplets\n \n # There is now the potential for triplets so build all possible values\n while index < max_arr_dict:\n index = r**counter\n ratio_range.append(index)\n counter += 1\n if ratio_range[-1] > max_arr_dict: ratio_range.pop(-1)\n\n for y in range(len(ratio_range)-2):\n firstkey = ratio_range[y]\n secondkey = ratio_range[y+1]\n thirdkey = ratio_range[y+2]\n \n # If there are no triplets then the loop will exit without incrementing triplets \n if firstkey not in arr_dict or secondkey not in arr_dict or thirdkey not in arr_dict: \n continue\n else:\n firstvalue = arr_dict[firstkey]\n secondvalue = arr_dict[secondkey]\n thirdvalue = arr_dict[thirdkey]\n \n summedvalue = (firstvalue + secondvalue + thirdvalue) - 3\n triplet_count = 2**summedvalue\n triplets += triplet_count\n\n return triplets",
"def sum_finder(nums, sum_wanted):\r\n\r\n for i, ni in enumerate(nums):\r\n\r\n for x, nx in enumerate(nums[i+1:]):\r\n\r\n if ni + nx == sum_wanted:\r\n print(\"Yes\", ni, \"and\", nx, \"=\", sum_wanted)\r\n else:\r\n print(ni, \"and\", nx, \"=\", \"No match\")",
"def count_pairs(assignments, v1, v2, M):\n assert v1 != v2\n pairs = assignments[:, v1].astype(np.int32) * M + assignments[:, v2]\n return np.bincount(pairs, minlength=M * M).reshape((M, M))",
"def _count_concordant_pairs(preds: Tensor, target: Tensor) ->Tensor:\n return torch.cat([_concordant_element_sum(preds, target, i) for i in range(preds.shape[0])]).sum(0)",
"def find_valid_pairs(self, nums, index):\n left, right = 0, index - 1\n count = 0\n while left < right:\n if nums[left] + nums[right] > nums[index]:\n count += right - left\n right -= 1\n else:\n left += 1\n\n return count",
"def countTriplets(x, n):\n x = tuple(x)\n if not x or not n: return\n return deep(x, n, 0, None)",
"def count_pairs(clusters_list, cluster_labels):\n algorithm_pairs = 0\n intersecting_pairs = 0\n for points_in_cluster in clusters_list:\n algorithm_pairs += (len(points_in_cluster)**2 - len(points_in_cluster)) / 2\n for pair in itertools.combinations(points_in_cluster, 2):\n if cluster_labels[pair[0]] == cluster_labels[pair[1]]:\n intersecting_pairs += 1\n return algorithm_pairs, intersecting_pairs",
"def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))",
"def countPoints(self,sumation):\n if sumation == 21:\n points = 7\n elif sumation == 20:\n points = 5\n elif sumation == 19:\n points = 4\n elif sumation == 18:\n points = 3\n elif sumation == 17:\n points = 2\n elif sumation <=16:\n points = 1\n else:\n points = 0\n return points",
"def countTriplets(ratio, arr):\n primary_elements = {}\n secondary_elements = {}\n triplet_count = 0\n for val in arr:\n if val % ratio == 0:\n # If val completes triplet\n triplet_count += secondary_elements.get(val / ratio, 0)\n # If val completes doublet\n if primary_elements.get(val / ratio):\n if secondary_elements.get(val):\n secondary_elements[val] += \\\n primary_elements.get(val / ratio)\n else:\n secondary_elements[val] = \\\n primary_elements.get(val / ratio)\n\n # Single val\n if primary_elements.get(val):\n primary_elements[val] += 1\n else:\n primary_elements[val] = 1\n\n return triplet_count",
"def violations(alist):\n count = 0\n for k in alist:\n add = False\n for i in helpers.list_combinations(k,2):\n if abs(i[0]-i[1]) < 2:\n add = True\n if add:\n count += 1\n\n return count",
"def _count_discordant_pairs(preds: Tensor, target: Tensor) ->Tensor:\n return torch.cat([_discordant_element_sum(preds, target, i) for i in range(preds.shape[0])]).sum(0)",
"def doubles(counts):\n return (counts==2).sum()"
]
| [
"0.6868359",
"0.66832805",
"0.66112846",
"0.66064453",
"0.65179956",
"0.6467897",
"0.64248705",
"0.6424355",
"0.63199806",
"0.63117194",
"0.62766564",
"0.62698525",
"0.6211663",
"0.62083685",
"0.61964554",
"0.61943144",
"0.6185799",
"0.61598265",
"0.61551404",
"0.61480534",
"0.6104004",
"0.60554093",
"0.60389197",
"0.60306895",
"0.60167545",
"0.6007799",
"0.6005184",
"0.5997349",
"0.59929055",
"0.59811497"
]
| 0.83935183 | 0 |
Archive the provided URL using archive.org's Wayback Machine. Returns the archive.org URL where the capture is stored. Raises a CachedPage exception if archive.org declines to conduct a new capture and returns a previous snapshot instead. To silence that exception, pass into True to the `accept_cache` keyword argument. By default the request is anonymous. Pass the `authenticate` flag to login the request. It will use the `SAVEPAGENOW_ACCESS_KEY` and `SAVEPAGENOW_SECRET_KEY` environment variables to authenticate the request. | def capture(
target_url: str,
user_agent: str = DEFAULT_USER_AGENT,
accept_cache: bool = False,
authenticate: bool = False,
):
# Put together the URL that will save our request
domain = "https://web.archive.org"
save_url = urljoin(domain, "/save/")
request_url = save_url + target_url
# Access Keys for Internet Archive API
if authenticate:
access_key = os.getenv("SAVEPAGENOW_ACCESS_KEY")
secret_key = os.getenv("SAVEPAGENOW_SECRET_KEY")
try:
assert access_key and secret_key
except AssertionError:
raise ValueError(
"You must set SAVEPAGENOW_ACCESS_KEY and SAVEPAGENOW_SECRET_KEY environment variables to use the authenticate flag"
)
headers = {
"Accept": "application/json",
"User-Agent": user_agent,
"Authorization": f"LOW {access_key}:{secret_key}",
"Content-Type": "application/x-www-form-urlencoded",
}
else:
headers = {
"User-Agent": user_agent,
}
# Make the request
response = requests.get(request_url, headers=headers)
# If it has an error header, raise that.
has_error_header = "X-Archive-Wayback-Runtime-Error" in response.headers
if has_error_header:
error_header = response.headers["X-Archive-Wayback-Runtime-Error"]
if error_header == "RobotAccessControlException: Blocked By Robots":
raise BlockedByRobots("archive.org returned blocked by robots.txt error")
else:
raise WaybackRuntimeError(error_header)
# If it has an error code, raise that
status_code = response.status_code
if status_code == 401:
raise Unauthorized("Your archive.org access key and/or secret is not valid")
elif status_code == 403:
raise Forbidden(response.headers)
elif status_code == 429:
raise TooManyRequests(response.headers)
elif status_code == 502:
raise BadGateway(response.headers)
elif status_code == 520:
raise UnknownError(response.headers)
# If there's a content-location header in the response, we will use that.
try:
content_location = response.headers["Content-Location"]
archive_url = domain + content_location
except KeyError:
# If there's not, we will try to parse out a Link header, which is another style they use.
try:
# Parse the Link tag in the header, which points to memento URLs in Wayback
header_links = parse_header_links(response.headers["Link"])
archive_obj = [h for h in header_links if h["rel"] == "memento"][0]
archive_url = archive_obj["url"]
except Exception:
# If neither of those things works throw this error.
raise WaybackRuntimeError(
dict(status_code=response.status_code, headers=response.headers)
)
# Determine if the response was cached
cached = (
"X-Page-Cache" in response.headers and response.headers["X-Page-Cache"] == "HIT"
)
# If it was cached ...
if cached:
# .. and we're not allowing that
if not accept_cache:
# ... throw an error
msg = f"archive.org returned a cache of this page: {archive_url}"
raise CachedPage(msg)
# Finally, return the archived URL
return archive_url | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cli(\n url: str,\n user_agent: str | None = None,\n accept_cache: bool = False,\n authenticate: bool = False,\n):\n kwargs: dict[str, typing.Any] = {}\n if user_agent:\n kwargs[\"user_agent\"] = user_agent\n if accept_cache:\n kwargs[\"accept_cache\"] = accept_cache\n if authenticate:\n kwargs[\"authenticate\"] = authenticate\n archive_url = capture(url, **kwargs)\n click.echo(archive_url)",
"def archive_url(url: str) -> Tuple[str, bool]:\n internet_archive = InternetArchive()\n return internet_archive.archive_page(url)",
"def capture_or_cache(\n target_url: str,\n user_agent: str = DEFAULT_USER_AGENT,\n authenticate: bool = False,\n):\n try:\n return (\n capture(\n target_url,\n user_agent=user_agent,\n accept_cache=False,\n authenticate=authenticate,\n ),\n True,\n )\n except CachedPage:\n return (\n capture(\n target_url,\n user_agent=user_agent,\n accept_cache=True,\n authenticate=authenticate,\n ),\n False,\n )",
"def web_archive():\n\n try:\n auth_check()\n except Exception as e:\n return flask.redirect(str(e))\n\n return flask.render_template('archive.html', user = flask.session['user'],\n archives = db_get_archives())",
"def archivelink(self, archive_format, ref=None):\r\n url = '{0}/{1}'.format(self.parent.get_url(), archive_format)\r\n if ref:\r\n url = '{0}/{1}'.format(url, ref)\r\n\r\n return http.Request('GET', url), parse_redirect",
"def add_archive_to_cache(self, required_digest, data, url, extract = None, type = None, start_offset = 0):\n\t\tself._write_store(lambda store, **kwargs: store.add_archive_to_cache(required_digest,\n\t\t\t\t\t\tdata, url, extract, type = type, start_offset = start_offset, **kwargs))",
"def getArchive(conf):\n url = conf.get('url')\n maxreq = conf.getint('maxrequests', 10)\n maxq = conf.getint('maxquery')\n \n proxy=Proxy(url, limit=maxreq, qlimit=maxq)\n proxy.connectTimeout=3.0\n\n info = proxy.callRemote('archiver.info').addErrback(_connerror)\n archs= proxy.callRemote('archiver.archives').addErrback(_connerror)\n X = yield defer.DeferredList([info, archs], fireOnOneErrback=True).addErrback(_connerror)\n info, archs = X[0][1], X[1][1]\n \n defer.returnValue(Archive(proxy, conf, info, archs))",
"def fetch_all_snapshots(archive_dir, wayback_filename, target_url):\n # Read the list of snapshots.\n with open(wayback_filename) as f:\n data = f.read()\n\n url_template = \"http://web.archive.org/web/{timestamp}/{target_url}\"\n snapshots = data.split(\"\\n\")\n pages_downloaded = 0\n pages_failed = 0\n pages_skipped = 0\n for snapshot in snapshots:\n fields = snapshot.split()\n if len(fields) < 1:\n print(\"Bad fields. End of data?\")\n break\n date_string = fields[1]\n assert 14 == len(date_string)\n ymd = date_string[:8]\n year = int(date_string[:4])\n month = int(date_string[4:6])\n day = int(date_string[6:8])\n assert 1900 < year < 2100 and 1 <= month <= 12 and 1 <= day <=31\n date_of_fire = datetime.date(year,month, day)\n filename = F\"firedata_{year}_{month:02}_{day:02}.html\"\n path = os.path.join(archive_dir, filename)\n if os.path.exists(path):\n print(\"Not replacing \", path)\n pages_skipped += 1\n continue\n else:\n print(\"Downloading for \", path)\n url = url_template.format(timestamp=date_string, target_url=target_url)\n print(url)\n\n page = fetch(url)\n if page is None:\n print(\"Fetching above url failed.\")\n pages_failed +=1\n continue\n\n pages_downloaded += 1\n with open(path, \"wb\") as f:\n f.write(page)\n print(\"Page saved\")\n sleep(2)\n return pages_downloaded, pages_failed, pages_skipped",
"def download_project_archive(request, **kwargs):\n project = kwargs.get(\"project\")\n if request.user.is_authenticated and request.user == project.user:\n filename = project.create_downloadable_archive()\n file_handle = open(filename, \"rb\")\n response = FileResponse(file_handle)\n\n response[\"Content-Length\"] = os.path.getsize(filename)\n response[\n \"Content-Disposition\"\n ] = 'attachment; filename=\"{}.zip\"'.format(project.name)\n\n return response\n else:\n raise PermissionDenied",
"def get_archive(*args, **kwargs):\n return get_archive_async(*args, **kwargs).get_result()",
"def create_archive(response, url):\n\n\traw_content_type = response.headers['content-type']\n\tcontent_type_result = mimetype.parse(raw_content_type)\n\n\tif content_type_result.is_err( ):\n\t\treturn content_type_result\n\telse:\n\n\t\tcontent_type = content_type_result.from_ok( )\n\t\tmime = content_type['type'] + '/' + content_type['subtype']\n\n\t\tif mimetype.is_html(mime):\n\n\t\t\treturn Archive('', 'application/pdf')\n\n\t\t\t#content = download_website(url) # -- avoid redownloading!\n\n\t\telse:\n\n\t\t\treturn Archive(response.body, raw_content_type)",
"def archive(self, step_name, isolate_server=None):\n isolate_server = isolate_server or self._api.isolated.isolate_server\n cmd = [\n 'archive',\n '-verbose',\n '-isolate-server', isolate_server,\n '-namespace', self._api.isolated.namespace,\n '-dump-hash', self._api.raw_io.output_text(),\n ]\n for f in self._files:\n cmd.extend(['-files', self._isolated_path_format(f)])\n for d in self._dirs:\n cmd.extend(['-dirs', self._isolated_path_format(d)])\n isolated_hash = self._api.isolated._run(\n step_name,\n cmd,\n step_test_data=self._api.isolated.test_api.archive,\n ).raw_io.output_text\n q = {\n 'hash': isolated_hash,\n 'namespace': self._api.isolated.namespace,\n }\n self._api.step.active_result.presentation.links['isolated UI'] = (\n '%s/browse?%s' % (isolate_server, urllib.urlencode(q))\n )\n return isolated_hash",
"async def puppeteer_screenshot(archive_id, url_id, date, url, pics_out_path, timeout_duration):\n\n browser = await launch()\n page = await browser.newPage()\n await page.setViewport({'height': 768, 'width': 1024})\n await page.goto(url, timeout=(int(timeout_duration) * 1000))\n await page.screenshot(path='{0}{1}.{2}.{3}.png'.format(pics_out_path, archive_id, url_id, date))\n await browser.close()",
"def take_screenshot(archive_id, url_id, date, url, pics_out_path, screenshot_method, timeout_duration):\n\n return_code = check_site_availability(url)\n if return_code != 200 and return_code != 302:\n return return_code\n\n # command which takes the screenshots\n command = \"\"\n if screenshot_method == 0:\n command = \"timeout {5}s google-chrome --headless --hide-scrollbars --disable-gpu --noerrdialogs \" \\\n \"--enable-fast-unload --screenshot={0}{1}.{2}.{3}.png --window-size=1024x768 '{4}'\"\\\n .format(pics_out_path, archive_id, url_id, date, url, timeout_duration)\n\n elif screenshot_method == 2:\n command = \"timeout {5}s xvfb-run --server-args=\\\"-screen 0, 1024x768x24\\\" \" \\\n \"cutycapt --url='{0}' --out={1}{2}.{3}.{4}.png --delay=2000\"\\\n .format(url, pics_out_path, archive_id, url_id, date, timeout_duration)\n\n elif screenshot_method == 1:\n try:\n asyncio.get_event_loop().run_until_complete(\n puppeteer_screenshot(archive_id, url_id, date, url, pics_out_path, timeout_duration))\n logging.info(\"Screenshot successful\")\n print(\"Screenshot successful\")\n return 200\n except errors.TimeoutError as e:\n print(e)\n logging.info(e)\n return -1\n except errors.NetworkError as e:\n print(e)\n logging.info(e)\n return -2\n except errors.PageError as e:\n print(e)\n logging.info(e)\n return -3\n except Exception as e:\n print(e)\n return -4\n else:\n pass # assumes the user entered 0,1,2 as method\n\n try:\n if os.system(command) == 0:\n succeed = 200\n logging.info(\"Screenshot successful\")\n else:\n logging.info(\"Screenshot unsuccessful\")\n succeed = -5\n except:\n logging.info(\"Screenshot unsuccessful\")\n succeed = -6\n time.sleep(1) # xvfb needs time to rest\n\n return str(succeed)",
"def _download_archive(self):\n _logger.debug('Downloading archive...')\n response = urlopen(self.url)\n\n with open(self._archive_full_path, 'wb') as archive_file:\n chunk_size = 1024 * 1024 # 1 MB\n chunk = response.read(chunk_size)\n\n while chunk:\n archive_file.write(chunk)\n chunk = response.read(chunk_size)\n\n _logger.debug('Archive {name} has been successfully downloaded.'.format(name=self.archive_name))",
"def __call__(self, request):\n # If request is already in the archive, return the archived response.\n if request in self.http_archive:\n logging.debug('Repeated request found: %s', request)\n response = self.http_archive[request]\n else:\n response = self.real_http_fetch(request)\n if response is None:\n return None\n self.http_archive[request] = response\n if self.inject_script:\n response = _InjectScripts(response, self.inject_script)\n logging.debug('Recorded: %s', request)\n return response",
"def extract_web_archive(cls, url, apath, ffilter=[]):\n\n if apath not in cls._archives.keys():\n download(url, apath)\n\n _files = extract(apath, ffilter=ffilter)\n\n return _files",
"def download_archive(self):\n\n def time_convert(structure):\n \"\"\"\n :param structure: tuple representation of time\n :return: GitHub archive time\n \"\"\"\n \n \n join_number_to_zero = lambda number: (\"\" if number > 9 else \"0\") + str(number)\n\n return \"%s-%s-%s-%s\" % (\n structure.tm_year, join_number_to_zero(structure.tm_mon), join_number_to_zero(structure.tm_mday),\n structure.tm_hour)\n\n current_time = self.get_time()\n self.logger.debug(__name__ + \": \" + \"current time: \" + str(gmtime(current_time)))\n\n difference = -25200\n #timezone difference in seconds between GMT and west coast of USA\n\n downloading_time = int(timegm(self.config[\"last_connection_time\"])) + 3600\n self.logger.debug(__name__ + \": \" + \"downloading time: \" + str(gmtime(downloading_time)))\n\n if downloading_time > current_time - 7200:\n self.logger.info(__name__ + \": \" + \"unable to download file (time limiting).\")\n return\n\n downloading_time += difference\n\n json_file_name = self.download_file(time_convert(gmtime(downloading_time)))\n\n self.config[\"last_connection_time\"] = gmtime(downloading_time - difference)\n self.logger.debug(__name__ + \": \" + \"last_connection_time: \" + str(self.config[\"last_connection_time\"]))\n\n return json_file_name",
"def archive(self, user: User, snapshot: str, path: str, **callback) -> Job:\n # Get the upload policy\n policy = snapshots_storage().generate_post_policy(path)\n url = policy.get(\"url\") if policy else None\n secrets = policy.get(\"fields\") if policy else None\n\n return Job.objects.create(\n project=self,\n creator=user,\n method=JobMethod.archive.name,\n params=dict(project=self.id, snapshot=snapshot, path=path, url=url,),\n secrets=secrets,\n description=f\"Archive project '{self.name}'\",\n **callback,\n )",
"def get_a_bing_archive_wallpaper_remote():\r\n \r\n logging.debug('get_a_bing_archive_wallpaper_remote()')\r\n\r\n now = datetime.datetime.now()\r\n url = \"https://bingwallpaper.anerg.com/de/{}\".format(now.strftime('%Y%m'))\r\n\r\n # get image url\r\n if use_proxy:\r\n \r\n response = requests.get(url, proxies=proxies, timeout=15, verify=False)\r\n else:\r\n response = requests.get(url)\r\n match = re.findall('.*src=\\\"([^\\\"]*\\.jpg)\\\".*', response.text)\r\n for i in range(0, len(match)):\r\n full_image_url = \"https:{}\".format(match[i])\r\n \r\n # image's name\r\n image_name = get_generated_image_name(full_image_url)\r\n \r\n # Check and maintain DB\r\n if not exists_image_in_database(full_image_url) and i+1 < len(match):\r\n add_image_to_database(full_image_url, image_name, \"bingarchive\")\r\n # download and save image\r\n full_image_path = download_image(full_image_url, image_name)\r\n update_image_in_database(full_image_url, full_image_path)\r\n\r\n # Return full path to image\r\n logging.debug('get_a_bing_archive_wallpaper_remote - full_image_path = {}'.format(full_image_path))\r\n return full_image_path\r\n elif i+1 == len(match):\r\n full_image_path = get_image_path_from_database(full_image_url)\r\n\r\n # Return full path to image\r\n logging.debug('get_a_bing_archive_wallpaper_remote - full_image_path = {}'.format(full_image_path))\r\n return full_image_path",
"def get_page_and_store(url, cache_path=None):\n page = urllib2.urlopen(url).read()\n\n if cache_path is not None:\n open(cache_path, 'w').write(page)\n\n return page",
"def extract_web_archive(url, apath, ffilter=[]):\n\n download(url, apath)\n output_files = extract(apath, ffilter=ffilter)\n\n return output_files",
"def archive_projectbuild(projectbuild, archive):\n transport = get_transport_for_projectbuild(projectbuild, archive)\n transport.archive()",
"def wrapped_tarball(export_context, context):\n result = export_result_dict(export_context)\n RESPONSE = context.REQUEST.RESPONSE\n RESPONSE.setHeader('Content-type', 'application/x-gzip')\n RESPONSE.setHeader('Content-disposition',\n 'attachment; filename=%s' % result['filename'])\n return result['tarball']",
"def archive(self, request, pk=None, **kwargs):\n goal = self.get_object()\n # If I'm an adviser or the goal is unsupervised,\n # archive the goal immediately.\n sr = SupportRequest.get_current(request, as_obj=True)\n # check helped user instead if support request is active\n user, sr_id = (sr.user, sr.id) if sr else (request.user, None)\n if not goal.account.supervised or user.is_advisor:\n check_state(Goal.State(goal.state),\n [Goal.State.ACTIVE, Goal.State.ARCHIVE_REQUESTED])\n Event.ARCHIVE_GOAL.log('{} {}'.format(request.method,\n request.path),\n user=request.user, obj=goal,\n support_request_id=sr_id)\n # Set the state to archive requested,\n # as the call to archive() requires it.\n goal.state = Goal.State.ARCHIVE_REQUESTED.value\n goal.archive()\n else:\n # I'm a client with a supervised goal, just change the status to\n # ARCHIVE_REQUESTED, and add a notification\n check_state(Goal.State(goal.state), Goal.State.ACTIVE)\n Event.ARCHIVE_GOAL_REQUESTED.log('{} {}'.format(request.method,\n request.path),\n user=request.user, obj=goal,\n support_request_id=sr_id)\n # Flag the goal as archive requested.\n goal.state = Goal.State.ARCHIVE_REQUESTED.value\n # TODO: Add a notification to the advisor that the goal is archive requested.\n goal.save()\n return Response(serializers.GoalSerializer(goal).data)",
"def main(args):\n args = parse_args(args)\n setup_logging(args.loglevel)\n\n dir = 'snapshots/' + args.name\n Path(dir).mkdir(parents=True, exist_ok=True)\n\n session = sql_session(dir)\n today = session.query(Sunrise).filter_by(date=date.today()).first()\n if today is None:\n today = add_sunrise(session, args.name, args.latitude, args.longitude)\n now = datetime.utcnow()\n username, password = password_from_netrc(args.url)\n\n if args.force == True or (now > today.sunrise_time and now < today.sunset_time):\n where = tzwhere.tzwhere()\n timezone_str = where.tzNameAt(float(args.latitude), float(args.longitude))\n local_timezone = pytz.timezone(timezone_str)\n utcnow = now.replace(tzinfo=pytz.utc)\n local_now = utcnow.astimezone(local_timezone)\n path = local_now.strftime('%Y%m%d-%H%M%S%z') + '_' + args.name + '.jpg'\n\n r = requests.get(args.url, auth=HTTPDigestAuth(username, password), verify=False, stream=True)\n if r.status_code == requests.codes.ok:\n with open(dir + '/' + path, 'wb') as f:\n for chunk in r.iter_content(4096):\n f.write(chunk)",
"def get_archive_async(\n hostname, project, treeish, dir_path=None, **fetch_kwargs):\n _validate_args(hostname, project, treeish, dir_path)\n dir_path = (dir_path or '').strip('/')\n if dir_path:\n dir_path = '/%s' % dir_path\n return gerrit.fetch_async(\n hostname,\n '%s/+archive/%s%s.tar.gz' % _quote_all(project, treeish, dir_path),\n **fetch_kwargs)",
"def archive(mongo_backup_file):\r\n filename = get_archive_filename()\r\n tar = tarfile.open(filename, \"w|gz\")\r\n tar.add(mongo_backup_file)\r\n tar.close()\r\n\r\n return filename",
"def fetch(self) -> None:\n archive_path = os.path.join(self._output_dir, self._archive_name)\n self._download_file(self._parsed_url.original_url, archive_path)\n try:\n with zipfile.ZipFile(archive_path, \"r\") as zip_file:\n zip_file.extractall(path=self._output_dir)\n except zipfile.BadZipfile:\n raise REANAFetcherError(\"The provided zip file is not valid\")\n\n os.remove(archive_path)\n\n if not self._discover_workflow_specs():\n top_level_entries = [\n os.path.join(self._output_dir, entry)\n for entry in os.listdir(self._output_dir)\n ]\n # Some zip archives contain a single directory with all the files.\n if len(top_level_entries) == 1 and os.path.isdir(top_level_entries[0]):\n top_level_dir = top_level_entries[0]\n # Move all entries inside the top level directory\n # to the output directory.\n for entry in os.listdir(top_level_dir):\n shutil.move(os.path.join(top_level_dir, entry), self._output_dir)\n os.rmdir(top_level_dir)",
"def isolate(self, isolate_path):\n dump_json = os.path.join(self._tmpdir, 'digest.json')\n cmd = [\n ISOLATE_CLI,\n 'archive',\n '-cas-addr',\n self._cas_addr,\n '-i',\n isolate_path,\n '-dump-json',\n dump_json,\n '-log-level',\n 'debug',\n ]\n logging.debug('SwarmingClient.isolate: executing command. %s', cmd)\n with open(self._rotate_logfile(), 'wb') as f:\n f.write('\\nRunning: %s\\n' % ' '.join(cmd))\n p = subprocess.Popen(cmd, stdout=f, stderr=f)\n p.communicate()\n assert p.returncode == 0, ('Failed to isolate files. exit_code=%d, cmd=%s' %\n (p.returncode, cmd))\n with open(dump_json) as f:\n data = json.load(f)\n digest = data.values()[0]\n logging.debug('CAS digest = %s', digest)\n return digest"
]
| [
"0.687109",
"0.5586112",
"0.557555",
"0.53966314",
"0.53348285",
"0.5196314",
"0.5022589",
"0.4892101",
"0.48886994",
"0.48691416",
"0.4849584",
"0.4844973",
"0.47459376",
"0.4710222",
"0.4675755",
"0.46476734",
"0.46368256",
"0.46331286",
"0.46284184",
"0.4627435",
"0.46157935",
"0.46024883",
"0.4585167",
"0.45318258",
"0.4530921",
"0.4497275",
"0.44615296",
"0.44388375",
"0.44331467",
"0.44295847"
]
| 0.77938116 | 0 |
Test readout error on qubit 0 for bell state | def test_readout_error_qubit0(self):
# Test circuit: ideal bell state
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.h(qr[0])
circuit.cx(qr[0], qr[1])
# Ensure qubit 0 is measured before qubit 1
circuit.barrier(qr)
circuit.measure(qr[0], cr[0])
circuit.barrier(qr)
circuit.measure(qr[1], cr[1])
backend = QasmSimulator()
# Asymetric readout error on qubit-0 only
probs_given0 = [0.9, 0.1]
probs_given1 = [0.3, 0.7]
noise_model = NoiseModel()
noise_model.add_readout_error([probs_given0, probs_given1], [0])
shots = 2000
target = {
'0x0': probs_given0[0] * shots / 2,
'0x1': probs_given0[1] * shots / 2,
'0x2': probs_given1[0] * shots / 2,
'0x3': probs_given1[1] * shots / 2
}
circuit = transpile(circuit, basis_gates=noise_model.basis_gates)
qobj = assemble([circuit], backend, shots=shots)
result = backend.run(qobj, noise_model=noise_model).result()
self.is_completed(result)
self.compare_counts(result, [circuit], [target], delta=0.05 * shots) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_readout_error_qubit1(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_readout_error([probs_given0, probs_given1], [1])\n\n shots = 2000\n target = {\n '0x0': probs_given0[0] * shots / 2,\n '0x1': probs_given1[0] * shots / 2,\n '0x2': probs_given0[1] * shots / 2,\n '0x3': probs_given1[1] * shots / 2\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_readout_error_all_qubit(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_all_qubit_readout_error([probs_given0, probs_given1])\n\n # Expected counts\n shots = 2000\n p00 = 0.5 * (probs_given0[0]**2 + probs_given1[0]**2)\n p01 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p10 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p11 = 0.5 * (probs_given0[1]**2 + probs_given1[1]**2)\n target = target = {\n '0x0': p00 * shots,\n '0x1': p01 * shots,\n '0x2': p10 * shots,\n '0x3': p11 * shots\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_corruptedbit(self):\n self.assertRaises(ValueError, two_out_five, '1100000111') #Too many 1s must raise a ValueError!\n self.assertRaises(ValueError, two_out_five, '1100000100') #Too many 0s must raise a ValueError!",
"def isReadError(self):\n return self.f5 is 'x'",
"def test_5_false(self):\n\t\tself.spawn(\"./quidditch\").stdin(\"5\").stdin(\"0\").stdout(\"50\\n\").exit(0)",
"def test_bad_input():\n\n for arg in ['5', 'ch']:\n rv, out = getstatusoutput('{} {}'.format(prg, arg))\n assert rv == 0\n expected = 'I do not know \"{}\".'.format(arg)\n assert out.strip() == expected",
"def test_qubits_not_on_device(self, valkmusa, qubit):\n\n with pytest.raises(ValueError, match='Qubit not on device'):\n valkmusa.validate_operation(cirq.X(qubit))",
"def test_readout_error_correlated_2qubit(self):\n # Test circuit: prepare all plus state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr)\n circuit.barrier(qr)\n # We will manually add a correlated measure operation to\n # the assembled qobj\n backend = QasmSimulator()\n\n # Correlated 2-qubit readout error\n probs_given00 = [0.3, 0, 0, 0.7]\n probs_given01 = [0, 0.6, 0.4, 0]\n probs_given10 = [0, 0, 1, 0]\n probs_given11 = [0.1, 0, 0, 0.9]\n probs_noise = [\n probs_given00, probs_given01, probs_given10, probs_given11\n ]\n noise_model = NoiseModel()\n noise_model.add_readout_error(probs_noise, [0, 1])\n\n # Expected counts\n shots = 2000\n probs_ideal = [0.25, 0.25, 0.25, 0.25]\n p00 = sum([\n ideal * noise[0] for ideal, noise in zip(probs_ideal, probs_noise)\n ])\n p01 = sum([\n ideal * noise[1] for ideal, noise in zip(probs_ideal, probs_noise)\n ])\n p10 = sum([\n ideal * noise[2] for ideal, noise in zip(probs_ideal, probs_noise)\n ])\n p11 = sum([\n ideal * noise[3] for ideal, noise in zip(probs_ideal, probs_noise)\n ])\n target = {\n '0x0': p00 * shots,\n '0x1': p01 * shots,\n '0x2': p10 * shots,\n '0x3': p11 * shots\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n # Add measure to qobj\n item = measure_instr([0, 1], [0, 1])\n append_instr(qobj, 0, item)\n # Execute\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_io_in_out_loop(self):\n self.l.output(conf_io=0x1, state_io=0x0)\n for i in range(10):\n state_d, state_io, count = self.l.output(state_io=0x1)\n self.assertTrue(state_io & 0x2)\n state_d, state_io, count = self.l.output(state_io=0x0)\n self.assertTrue(not state_io & 0x2)",
"def test_read_not_interested(self):\n try:\n self.reader.read(self.books[2], 0, 0)\n self.fail(\"Readed book not interested\")\n except AssertionError:\n pass",
"def test_bit_driver_error(self):\n\n with pytest.raises(ValueError, match=r\"'b' must be either 0 or 1\"):\n qaoa.bit_driver(range(3), 2)",
"def test_check_failed_highstate(self):\n self.assertEqual(self.checkredis.check_failed_highstate(\"aw1-php70-qa\", \"01\"), False)",
"def read_test(self, cmd):\n w_bytes = [random.randrange(0, 128) for i in range(0, 16)]\n self._pyb.send(w_bytes)\n self._serial.reset_input_buffer()\n self._serial.write('\\r\\n'.encode('utf-8'))\n self._serial.write(cmd.encode('utf-8'))\n self._serial.write('\\r\\n'.encode('utf-8'))\n\n res = self._serial.read_until(terminator=serial.to_bytes([ord(c) for c in 'Ignored '])).decode('utf-8')\n self._pyb.deinit()\n\n r_bytes = []\n for x in re.sub('\\r', '', res).split('\\n'):\n if x.find('IGNORE') != -1:\n r_bytes = [int(s, 16) for s in x.split(',') if len(s) == 2]\n break\n\n if self.compare_host_dut_result(w_bytes, r_bytes) == -1:\n print(repr(res))\n return \"Fail\"\n\n return \"Pass\"",
"def test_specific_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'reset', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_missingbit(self):\n self.assertRaises(ValueError, two_out_five, '110000011')\n self.assertRaises(ValueError, two_out_five, '110000010')",
"def test_invalid_output(self):\n b1 = Block()\n self.configure_block(b1, {})\n b1.notify_signals([Signal()], \"invalid_output\")\n self.assert_num_signals_notified(1, b1, \"invalid_output\")",
"def test_block_bad_state(self):\n pass",
"def test_read_EOF(demo_data):\n\n openeeg = openEDF(demo_data)\n start = max(openeeg.header.samples) + 1\n arr = openeeg.read(start, start+100)\n assert arr.size == 0\n\n openeeg.close()",
"def test_errors_on_output(self):\n mb = self.maria_backup\n\n # normal run\n errors = b\"\"\"\n 220309 11:19:09 Finished backing up non-InnoDB tables and files\n 220309 11:19:09 Executing FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS...\n xtrabackup: The latest check point (for incremental): '92134324'\n xtrabackup: Stopping log copying thread..\n 220309 11:19:10 >> log scanned up to (900123121)\n 220309 11:19:10 Executing UNLOCK TABLES\n 220309 11:19:10 All tables unlocked\n 220309 11:19:10 Backup created in directory '/a/dir'\n 220309 11:19:10 [00] Writing backup-my.cnf\n 220309 11:19:10 [00] ...done\n 220309 11:19:10 [00] Writing xtrabackup_info\n 220309 11:19:10 [00] ...done\n xtrabackup: Transaction log of lsn (89423125) to (900123121) was copied.\n 220309 11:19:10 completed OK!\n \"\"\"\n self.assertFalse(mb.errors_on_output(b'', errors))\n\n # failed run\n errors = b\"\"\"\n xtrabackup: error: log block numbers mismatch:\n xtrabackup: error: expected log block no. 293842034, but got no. 13324598 from the log file.\n xtrabackup: error: it looks like InnoDB log has wrapped around before xtrabackup\n could process all records due to either log copying being too slow, or log files being too small.\n xtrabackup: Error: xtrabackup_copy_logfile() failed\n \"\"\"\n self.assertTrue(mb.errors_on_output(b'', errors))",
"def test_ErrorProduce(self):\n samplefastQ=iter(self.fastQ)\n Newline=errorproducer(samplefastQ)\n self.assertNotEqual(Newline, self.fastQ)\n #uses very high phred score to make sure that errors are produced and the two datasets are not equal",
"def test_is_information_written_through_stderr_methods(self):\n\n io = BufferedSystemIO()\n io._stdout = lambda *args, **kwargs: None\n\n try:\n raise IndexError('Invalid index 5')\n except Exception as exc:\n output_formatted_exception(exc, ':my-test-task', io)\n\n self.assertIn('IndexError', io.get_value())\n self.assertIn('Invalid index 5', io.get_value())\n self.assertIn('Retry with \"-rl debug\" switch before failed task to see stacktrace', io.get_value())",
"def test_specific_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [1])\n # Execute\n target = {'0x2': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_reset_error_specific_qubit_50percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 50% reset noise on qubit-0 \"u3\" only.\n noise_probs = [0.5, 0.5]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [0])\n shots = 2000\n target = {'0x2': shots / 2, '0x3': shots / 2}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'reset')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_failed_elastic():\n test_file = os.path.join(DATA_DIR, 'failed_elastic.out')\n\n with pytest.raises(CRYSTOUT_Error) as ex:\n CRYSTOUT(test_file)\n assert 'Inadequate elastic calculation' in ex.msg",
"def test_specific_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [0])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x1': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_run_read(self):\n\n self.ictrl[0] = 1 + 2 + 4 + 8\n vmec_f90wrap.runvmec(self.ictrl, self.filename, self.verbose, \\\n self.fcomm, reset_file)\n\n self.assertTrue(self.ictrl[1] in success_codes)\n\n self.assertEqual(vmec_f90wrap.vmec_input.nfp, 3)\n self.assertEqual(vmec_f90wrap.vmec_input.mpol, 4)\n self.assertEqual(vmec_f90wrap.vmec_input.ntor, 3)\n print('rbc.shape:', vmec_f90wrap.vmec_input.rbc.shape)\n print('rbc:',vmec_f90wrap.vmec_input.rbc[101:103, 0:4])\n\n # n = 0, m = 0:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.rbc[101,0], 1.3782)\n\n # n = 0, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[101,1], 4.6465E-01)\n\n # n = 1, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[102,1], 1.6516E-01)\n\n # Now try reading in the output\n wout_file = os.path.join(os.path.dirname(__file__), 'wout_li383_low_res.nc')\n ierr = 0\n vmec_f90wrap.read_wout_mod.read_wout_file(wout_file, ierr)\n self.assertEqual(ierr, 0)\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.betatot, \\\n 0.0426215030653306, places=4)\n\n print('iotaf.shape:',vmec_f90wrap.read_wout_mod.iotaf.shape)\n print('rmnc.shape:',vmec_f90wrap.read_wout_mod.rmnc.shape)\n\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.iotaf[-1], \\\n 0.654868168783638, places=4)\n\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.rmnc[0, 0], \\\n 1.4773028173065, places=4)",
"def test_port_get_data_error(self):\n test_sensor_error_data = {\n \"sensorid\": int(self.test_sens_data['sensorid']),\n \"error\": \"Exception\",\n \"code\": 1,\n \"message\": \"Port check failed. See log for details\"\n }\n self.test_port.get_data(self.test_sens_data, self.test_out_queue)\n assert_equal(self.test_out_queue.get(), test_sensor_error_data)",
"def test_bad_data(self):\r\n # LB180210_3_corrupted.PD0 has three records in it, the 2nd record was corrupted\r\n with open(os.path.join(RESOURCE_PATH, 'LB180210_3_corrupted.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n\r\n # try to get 3 particles, should only get 2 back\r\n # the second one should correspond to ensemble 3\r\n parser.get_records(3)\r\n\r\n log.debug('Exceptions : %s', self.exception_callback_value[0])\r\n\r\n self.assertEqual(len(self.exception_callback_value), 1)\r\n self.assert_(isinstance(self.exception_callback_value[0], RecoverableSampleException))",
"def test_reset_error_specific_qubit_25percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 25% reset noise on qubit-1 \"u3\" only.\n noise_probs = [0.25, 0.75]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [1])\n shots = 2000\n # target = {'01': shots / 4, '11': 3 * shots / 4}\n target = {'0x1': shots / 4, '0x3': 3 * shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)"
]
| [
"0.761526",
"0.7077091",
"0.62291527",
"0.6159507",
"0.6138393",
"0.61096",
"0.6094899",
"0.6089263",
"0.6088242",
"0.6083451",
"0.60297614",
"0.58834726",
"0.5879146",
"0.58698696",
"0.5863989",
"0.58603543",
"0.5845688",
"0.5740765",
"0.5725579",
"0.57230467",
"0.5655644",
"0.5654308",
"0.5643549",
"0.5639753",
"0.56156784",
"0.56103057",
"0.5606492",
"0.55789703",
"0.55544287",
"0.55529314"
]
| 0.7693814 | 0 |
Test readout error on qubit 1 for bell state | def test_readout_error_qubit1(self):
# Test circuit: ideal bell state
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.h(qr[0])
circuit.cx(qr[0], qr[1])
# Ensure qubit 0 is measured before qubit 1
circuit.barrier(qr)
circuit.measure(qr[0], cr[0])
circuit.barrier(qr)
circuit.measure(qr[1], cr[1])
backend = QasmSimulator()
# Asymetric readout error on qubit-0 only
probs_given0 = [0.9, 0.1]
probs_given1 = [0.3, 0.7]
noise_model = NoiseModel()
noise_model.add_readout_error([probs_given0, probs_given1], [1])
shots = 2000
target = {
'0x0': probs_given0[0] * shots / 2,
'0x1': probs_given1[0] * shots / 2,
'0x2': probs_given0[1] * shots / 2,
'0x3': probs_given1[1] * shots / 2
}
circuit = transpile(circuit, basis_gates=noise_model.basis_gates)
qobj = assemble([circuit], backend, shots=shots)
result = backend.run(qobj, noise_model=noise_model).result()
self.is_completed(result)
self.compare_counts(result, [circuit], [target], delta=0.05 * shots) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_readout_error_qubit0(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_readout_error([probs_given0, probs_given1], [0])\n\n shots = 2000\n target = {\n '0x0': probs_given0[0] * shots / 2,\n '0x1': probs_given0[1] * shots / 2,\n '0x2': probs_given1[0] * shots / 2,\n '0x3': probs_given1[1] * shots / 2\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_readout_error_all_qubit(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_all_qubit_readout_error([probs_given0, probs_given1])\n\n # Expected counts\n shots = 2000\n p00 = 0.5 * (probs_given0[0]**2 + probs_given1[0]**2)\n p01 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p10 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p11 = 0.5 * (probs_given0[1]**2 + probs_given1[1]**2)\n target = target = {\n '0x0': p00 * shots,\n '0x1': p01 * shots,\n '0x2': p10 * shots,\n '0x3': p11 * shots\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_corruptedbit(self):\n self.assertRaises(ValueError, two_out_five, '1100000111') #Too many 1s must raise a ValueError!\n self.assertRaises(ValueError, two_out_five, '1100000100') #Too many 0s must raise a ValueError!",
"def isReadError(self):\n return self.f5 is 'x'",
"def test_readout_error_correlated_2qubit(self):\n # Test circuit: prepare all plus state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr)\n circuit.barrier(qr)\n # We will manually add a correlated measure operation to\n # the assembled qobj\n backend = QasmSimulator()\n\n # Correlated 2-qubit readout error\n probs_given00 = [0.3, 0, 0, 0.7]\n probs_given01 = [0, 0.6, 0.4, 0]\n probs_given10 = [0, 0, 1, 0]\n probs_given11 = [0.1, 0, 0, 0.9]\n probs_noise = [\n probs_given00, probs_given01, probs_given10, probs_given11\n ]\n noise_model = NoiseModel()\n noise_model.add_readout_error(probs_noise, [0, 1])\n\n # Expected counts\n shots = 2000\n probs_ideal = [0.25, 0.25, 0.25, 0.25]\n p00 = sum([\n ideal * noise[0] for ideal, noise in zip(probs_ideal, probs_noise)\n ])\n p01 = sum([\n ideal * noise[1] for ideal, noise in zip(probs_ideal, probs_noise)\n ])\n p10 = sum([\n ideal * noise[2] for ideal, noise in zip(probs_ideal, probs_noise)\n ])\n p11 = sum([\n ideal * noise[3] for ideal, noise in zip(probs_ideal, probs_noise)\n ])\n target = {\n '0x0': p00 * shots,\n '0x1': p01 * shots,\n '0x2': p10 * shots,\n '0x3': p11 * shots\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n # Add measure to qobj\n item = measure_instr([0, 1], [0, 1])\n append_instr(qobj, 0, item)\n # Execute\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_bad_input():\n\n for arg in ['5', 'ch']:\n rv, out = getstatusoutput('{} {}'.format(prg, arg))\n assert rv == 0\n expected = 'I do not know \"{}\".'.format(arg)\n assert out.strip() == expected",
"def test_specific_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'reset', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_qubits_not_on_device(self, valkmusa, qubit):\n\n with pytest.raises(ValueError, match='Qubit not on device'):\n valkmusa.validate_operation(cirq.X(qubit))",
"def test_check_failed_highstate(self):\n self.assertEqual(self.checkredis.check_failed_highstate(\"aw1-php70-qa\", \"01\"), False)",
"def test_bit_driver_error(self):\n\n with pytest.raises(ValueError, match=r\"'b' must be either 0 or 1\"):\n qaoa.bit_driver(range(3), 2)",
"def test_5_false(self):\n\t\tself.spawn(\"./quidditch\").stdin(\"5\").stdin(\"0\").stdout(\"50\\n\").exit(0)",
"def test_read_not_interested(self):\n try:\n self.reader.read(self.books[2], 0, 0)\n self.fail(\"Readed book not interested\")\n except AssertionError:\n pass",
"def test_ErrorProduce(self):\n samplefastQ=iter(self.fastQ)\n Newline=errorproducer(samplefastQ)\n self.assertNotEqual(Newline, self.fastQ)\n #uses very high phred score to make sure that errors are produced and the two datasets are not equal",
"def test_specific_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [0])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x1': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_specific_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [1])\n # Execute\n target = {'0x2': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_block_bad_state(self):\n pass",
"def test_invalid_output(self):\n b1 = Block()\n self.configure_block(b1, {})\n b1.notify_signals([Signal()], \"invalid_output\")\n self.assert_num_signals_notified(1, b1, \"invalid_output\")",
"def test_io_in_out_loop(self):\n self.l.output(conf_io=0x1, state_io=0x0)\n for i in range(10):\n state_d, state_io, count = self.l.output(state_io=0x1)\n self.assertTrue(state_io & 0x2)\n state_d, state_io, count = self.l.output(state_io=0x0)\n self.assertTrue(not state_io & 0x2)",
"def test_reset_error_specific_qubit_50percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 50% reset noise on qubit-0 \"u3\" only.\n noise_probs = [0.5, 0.5]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [0])\n shots = 2000\n target = {'0x2': shots / 2, '0x3': shots / 2}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'reset')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_errors_on_output(self):\n mb = self.maria_backup\n\n # normal run\n errors = b\"\"\"\n 220309 11:19:09 Finished backing up non-InnoDB tables and files\n 220309 11:19:09 Executing FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS...\n xtrabackup: The latest check point (for incremental): '92134324'\n xtrabackup: Stopping log copying thread..\n 220309 11:19:10 >> log scanned up to (900123121)\n 220309 11:19:10 Executing UNLOCK TABLES\n 220309 11:19:10 All tables unlocked\n 220309 11:19:10 Backup created in directory '/a/dir'\n 220309 11:19:10 [00] Writing backup-my.cnf\n 220309 11:19:10 [00] ...done\n 220309 11:19:10 [00] Writing xtrabackup_info\n 220309 11:19:10 [00] ...done\n xtrabackup: Transaction log of lsn (89423125) to (900123121) was copied.\n 220309 11:19:10 completed OK!\n \"\"\"\n self.assertFalse(mb.errors_on_output(b'', errors))\n\n # failed run\n errors = b\"\"\"\n xtrabackup: error: log block numbers mismatch:\n xtrabackup: error: expected log block no. 293842034, but got no. 13324598 from the log file.\n xtrabackup: error: it looks like InnoDB log has wrapped around before xtrabackup\n could process all records due to either log copying being too slow, or log files being too small.\n xtrabackup: Error: xtrabackup_copy_logfile() failed\n \"\"\"\n self.assertTrue(mb.errors_on_output(b'', errors))",
"def test_reset_error_specific_qubit_25percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 25% reset noise on qubit-1 \"u3\" only.\n noise_probs = [0.25, 0.75]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [1])\n shots = 2000\n # target = {'01': shots / 4, '11': 3 * shots / 4}\n target = {'0x1': shots / 4, '0x3': 3 * shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_missingbit(self):\n self.assertRaises(ValueError, two_out_five, '110000011')\n self.assertRaises(ValueError, two_out_five, '110000010')",
"def experiment3():\n raise FAKE_ERROR",
"def read_test(self, cmd):\n w_bytes = [random.randrange(0, 128) for i in range(0, 16)]\n self._pyb.send(w_bytes)\n self._serial.reset_input_buffer()\n self._serial.write('\\r\\n'.encode('utf-8'))\n self._serial.write(cmd.encode('utf-8'))\n self._serial.write('\\r\\n'.encode('utf-8'))\n\n res = self._serial.read_until(terminator=serial.to_bytes([ord(c) for c in 'Ignored '])).decode('utf-8')\n self._pyb.deinit()\n\n r_bytes = []\n for x in re.sub('\\r', '', res).split('\\n'):\n if x.find('IGNORE') != -1:\n r_bytes = [int(s, 16) for s in x.split(',') if len(s) == 2]\n break\n\n if self.compare_host_dut_result(w_bytes, r_bytes) == -1:\n print(repr(res))\n return \"Fail\"\n\n return \"Pass\"",
"def test_bad_data(self):\r\n # LB180210_3_corrupted.PD0 has three records in it, the 2nd record was corrupted\r\n with open(os.path.join(RESOURCE_PATH, 'LB180210_3_corrupted.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n\r\n # try to get 3 particles, should only get 2 back\r\n # the second one should correspond to ensemble 3\r\n parser.get_records(3)\r\n\r\n log.debug('Exceptions : %s', self.exception_callback_value[0])\r\n\r\n self.assertEqual(len(self.exception_callback_value), 1)\r\n self.assert_(isinstance(self.exception_callback_value[0], RecoverableSampleException))",
"def test_failed_elastic():\n test_file = os.path.join(DATA_DIR, 'failed_elastic.out')\n\n with pytest.raises(CRYSTOUT_Error) as ex:\n CRYSTOUT(test_file)\n assert 'Inadequate elastic calculation' in ex.msg",
"def test_all_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {'0x3': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_all_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset_error_all_qubit_100percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 100% reset noise on all qubit \"u3\".\n noise_probs = [1, 0]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, \"u3\")\n shots = 100\n # target = {'00': shots}\n target = {'0x0': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)"
]
| [
"0.7660536",
"0.7181986",
"0.63022304",
"0.623464",
"0.6231123",
"0.61916983",
"0.61812544",
"0.613943",
"0.6105643",
"0.6102502",
"0.61010426",
"0.6059329",
"0.6034883",
"0.5977648",
"0.5974461",
"0.5962319",
"0.59377927",
"0.5909117",
"0.59089637",
"0.5904632",
"0.5895594",
"0.5880701",
"0.58621526",
"0.58477986",
"0.5796765",
"0.57770085",
"0.576398",
"0.5762053",
"0.57304895",
"0.5721682"
]
| 0.7747605 | 0 |
Test 100% readout error on all qubits | def test_readout_error_all_qubit(self):
# Test circuit: ideal bell state
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.h(qr[0])
circuit.cx(qr[0], qr[1])
# Ensure qubit 0 is measured before qubit 1
circuit.barrier(qr)
circuit.measure(qr[0], cr[0])
circuit.barrier(qr)
circuit.measure(qr[1], cr[1])
backend = QasmSimulator()
# Asymetric readout error on qubit-0 only
probs_given0 = [0.9, 0.1]
probs_given1 = [0.3, 0.7]
noise_model = NoiseModel()
noise_model.add_all_qubit_readout_error([probs_given0, probs_given1])
# Expected counts
shots = 2000
p00 = 0.5 * (probs_given0[0]**2 + probs_given1[0]**2)
p01 = 0.5 * (probs_given0[0] * probs_given0[1] +
probs_given1[0] * probs_given1[1])
p10 = 0.5 * (probs_given0[0] * probs_given0[1] +
probs_given1[0] * probs_given1[1])
p11 = 0.5 * (probs_given0[1]**2 + probs_given1[1]**2)
target = target = {
'0x0': p00 * shots,
'0x1': p01 * shots,
'0x2': p10 * shots,
'0x3': p11 * shots
}
circuit = transpile(circuit, basis_gates=noise_model.basis_gates)
qobj = assemble([circuit], backend, shots=shots)
result = backend.run(qobj, noise_model=noise_model).result()
self.is_completed(result)
self.compare_counts(result, [circuit], [target], delta=0.05 * shots) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_readout_error_qubit1(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_readout_error([probs_given0, probs_given1], [1])\n\n shots = 2000\n target = {\n '0x0': probs_given0[0] * shots / 2,\n '0x1': probs_given1[0] * shots / 2,\n '0x2': probs_given0[1] * shots / 2,\n '0x3': probs_given1[1] * shots / 2\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_readout_error_qubit0(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_readout_error([probs_given0, probs_given1], [0])\n\n shots = 2000\n target = {\n '0x0': probs_given0[0] * shots / 2,\n '0x1': probs_given0[1] * shots / 2,\n '0x2': probs_given1[0] * shots / 2,\n '0x3': probs_given1[1] * shots / 2\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_readout_error_correlated_2qubit(self):\n # Test circuit: prepare all plus state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr)\n circuit.barrier(qr)\n # We will manually add a correlated measure operation to\n # the assembled qobj\n backend = QasmSimulator()\n\n # Correlated 2-qubit readout error\n probs_given00 = [0.3, 0, 0, 0.7]\n probs_given01 = [0, 0.6, 0.4, 0]\n probs_given10 = [0, 0, 1, 0]\n probs_given11 = [0.1, 0, 0, 0.9]\n probs_noise = [\n probs_given00, probs_given01, probs_given10, probs_given11\n ]\n noise_model = NoiseModel()\n noise_model.add_readout_error(probs_noise, [0, 1])\n\n # Expected counts\n shots = 2000\n probs_ideal = [0.25, 0.25, 0.25, 0.25]\n p00 = sum([\n ideal * noise[0] for ideal, noise in zip(probs_ideal, probs_noise)\n ])\n p01 = sum([\n ideal * noise[1] for ideal, noise in zip(probs_ideal, probs_noise)\n ])\n p10 = sum([\n ideal * noise[2] for ideal, noise in zip(probs_ideal, probs_noise)\n ])\n p11 = sum([\n ideal * noise[3] for ideal, noise in zip(probs_ideal, probs_noise)\n ])\n target = {\n '0x0': p00 * shots,\n '0x1': p01 * shots,\n '0x2': p10 * shots,\n '0x3': p11 * shots\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n # Add measure to qobj\n item = measure_instr([0, 1], [0, 1])\n append_instr(qobj, 0, item)\n # Execute\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset_error_all_qubit_100percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 100% reset noise on all qubit \"u3\".\n noise_probs = [1, 0]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, \"u3\")\n shots = 100\n # target = {'00': shots}\n target = {'0x0': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_specific_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'reset', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'reset')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_qubits_not_on_device(self, valkmusa, qubit):\n\n with pytest.raises(ValueError, match='Qubit not on device'):\n valkmusa.validate_operation(cirq.X(qubit))",
"def test_all_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {'0x3': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_reset_error_specific_qubit_50percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 50% reset noise on qubit-0 \"u3\" only.\n noise_probs = [0.5, 0.5]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [0])\n shots = 2000\n target = {'0x2': shots / 2, '0x3': shots / 2}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset_error_specific_qubit_25percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 25% reset noise on qubit-1 \"u3\" only.\n noise_probs = [0.25, 0.75]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [1])\n shots = 2000\n # target = {'01': shots / 4, '11': 3 * shots / 4}\n target = {'0x1': shots / 4, '0x3': 3 * shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_specific_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [1])\n # Execute\n target = {'0x2': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_all_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_specific_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [0])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x1': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_measurement_failures(self):\n\n # single qubit\n projQ_backend = ProjectqQuantumSimulator(\n register_size=1,\n seed=234,\n backend=Simulator\n )\n\n circuit = [\n [\"START_SESSION\", 0, 0],\n [\"STATE_PREPARATION_ALL\", 0, 0],\n ['X', 0, 0],\n ['QUBIT_MEASURE', 0, 0]\n ]\n\n for commands in circuit:\n\n hal_cmd = command_creator(*commands)\n projQ_backend.accept_command(hal_cmd)\n\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n projQ_backend.accept_command(command_creator(*['END_SESSION', 0, 0]))\n\n # multi qubit\n projQ_backend = ProjectqQuantumSimulator(\n register_size=2,\n seed=234,\n backend=Simulator\n )\n\n circuit = [\n [\"START_SESSION\", 0, 0],\n [\"STATE_PREPARATION_ALL\", 0, 0],\n ['X', 0, 0],\n ['QUBIT_MEASURE', 0, 0]\n ]\n\n for commands in circuit:\n\n hal_cmd = command_creator(*commands)\n projQ_backend.accept_command(hal_cmd)\n\n # try double measurement\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n\n # try manipulation after measurement\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['X', 0, 0])\n )\n\n # re-prepare state of qubit, then try bit-flip and measure\n projQ_backend.accept_command(\n command_creator(*['STATE_PREPARATION', 0, 0])\n )\n projQ_backend.accept_command(\n command_creator(*['X', 0, 0])\n )\n res = projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n\n self.assertEqual(res, 1)\n\n projQ_backend.accept_command(command_creator(*['END_SESSION', 0, 0]))",
"def test_all_qubit_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'measure')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_specific_qubit_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'measure', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_ErrorProduce(self):\n samplefastQ=iter(self.fastQ)\n Newline=errorproducer(samplefastQ)\n self.assertNotEqual(Newline, self.fastQ)\n #uses very high phred score to make sure that errors are produced and the two datasets are not equal",
"def test_nonlocal_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n # use barrier to ensure measure qubit 0 is before qubit 1\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'measure', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_nonlocal_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr[1])\n circuit.barrier(qr)\n circuit.reset(qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'reset', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_noFailure(self):\n for i in range(10):\n self.assertTrue(self.circuit_breaker.available())",
"def test_read_not_interested(self):\n try:\n self.reader.read(self.books[2], 0, 0)\n self.fail(\"Readed book not interested\")\n except AssertionError:\n pass",
"def test_error_if_not_expval_batched(self):\n qml.enable_tape()\n dev = qml.device(\"orquestra.qiskit\", wires=2)\n\n with qml.tape.QuantumTape() as tape1:\n qml.expval(qml.PauliZ(wires=[0]))\n qml.var(qml.PauliZ(wires=[0]))\n\n with qml.tape.QuantumTape() as tape2:\n qml.expval(qml.PauliZ(wires=[0]))\n\n circuits = [tape1, tape2]\n with pytest.raises(NotImplementedError):\n res = dev.batch_execute(circuits)\n\n qml.disable_tape()",
"def test_check_failed_highstate(self):\n self.assertEqual(self.checkredis.check_failed_highstate(\"aw1-php70-qa\", \"01\"), False)",
"def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs",
"def test_measure_nondeterministic_multi_qubit_without_sampling(self):\n shots = 2000\n qobj = ref_measure.measure_circuits_qobj_nondeterministic(allow_sampling=False)\n qobj.config.shots = shots\n circuits = [experiment.header.name for experiment in qobj.experiments]\n targets = ref_measure.measure_counts_qobj_nondeterministic(shots)\n job = QasmSimulator().run(qobj)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)",
"def test__API_with_wrong_answer(self):\n self.mock_connection.state = MockConnection.WRONG_NUM_OF_CONFIRMATIONS\n\n # timeout supposed to be here\n self.assertEqual(self.mutex.lock(), False) # acquire mutex",
"def test_standard_reset0_error_100percent(self):\n qr = QuantumRegister(1, 'qr')\n cr = ClassicalRegister(1, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = reset_error(1)\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, ['id', 'x'])\n # Execute\n target = {'0x0': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_standard_reset1_error_100percent(self):\n qr = QuantumRegister(1, 'qr')\n cr = ClassicalRegister(1, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = reset_error(0, 1)\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, ['id', 'x'])\n # Execute\n target = {'0x1': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def testAllRead(self):\n import time,copy\n time.sleep(2)\n to_config = self.config['vdevs']['slave']['icsifaces'][0]\n from_config = self.config['vdevs']['master']['clientifaces'][0]\n points = self.config['vdevs']['slave']['points']\n client = ModbusRTU(to_config, points, from_config)\n\n\n pts = copy.deepcopy(self.config['vdevs']['slave']['points'])\n for i in xrange(50):\n ptnames = [ pt['name'] for pt in pts ]\n reply = client.readPoints(ptnames)\n #print \"Reply: \", reply\n for pt in ptnames:\n value = filter(lambda x: x['name']==pt, pts)[0]['value']\n #assert value == reply[ptnames.index(pt)]\n received = reply[ptnames.index(pt)]\n if not value == received: \n print pt, ' was %s but should be %s'%(str(received),str(value))",
"def test_job_failure(app):\n with worker(app):\n state = wait_for_results(app, length=100, sleep=0.2, maxwait=4)\n\n # Tasks have been delivered and executed.\n assert set(r.return_value for r in all_results(app)) == set(range(100))\n assert len(state.queue.messages) == 0\n\n # Consumer groups behaved properly.\n assert state.queue.info.groups == 1\n assert state.queue.groups[0].pending == 0\n\n # Nothing in the DLQ.\n assert len(state.dead.messages) == 0\n\n # Any scheduled tasks completed and removed.\n assert len(state.schedule) == 0"
]
| [
"0.76637715",
"0.7603116",
"0.6704679",
"0.6683766",
"0.6649942",
"0.6611385",
"0.65605485",
"0.6504623",
"0.64533865",
"0.6441967",
"0.6404075",
"0.6287275",
"0.6265213",
"0.62271565",
"0.61801034",
"0.6169512",
"0.61167383",
"0.6037226",
"0.5986241",
"0.59798735",
"0.5923683",
"0.58994436",
"0.5817977",
"0.5809393",
"0.5779637",
"0.5778599",
"0.5775073",
"0.57624006",
"0.57543784",
"0.5742736"
]
| 0.76687586 | 0 |
Test a correlated twoqubit readout error | def test_readout_error_correlated_2qubit(self):
# Test circuit: prepare all plus state
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.h(qr)
circuit.barrier(qr)
# We will manually add a correlated measure operation to
# the assembled qobj
backend = QasmSimulator()
# Correlated 2-qubit readout error
probs_given00 = [0.3, 0, 0, 0.7]
probs_given01 = [0, 0.6, 0.4, 0]
probs_given10 = [0, 0, 1, 0]
probs_given11 = [0.1, 0, 0, 0.9]
probs_noise = [
probs_given00, probs_given01, probs_given10, probs_given11
]
noise_model = NoiseModel()
noise_model.add_readout_error(probs_noise, [0, 1])
# Expected counts
shots = 2000
probs_ideal = [0.25, 0.25, 0.25, 0.25]
p00 = sum([
ideal * noise[0] for ideal, noise in zip(probs_ideal, probs_noise)
])
p01 = sum([
ideal * noise[1] for ideal, noise in zip(probs_ideal, probs_noise)
])
p10 = sum([
ideal * noise[2] for ideal, noise in zip(probs_ideal, probs_noise)
])
p11 = sum([
ideal * noise[3] for ideal, noise in zip(probs_ideal, probs_noise)
])
target = {
'0x0': p00 * shots,
'0x1': p01 * shots,
'0x2': p10 * shots,
'0x3': p11 * shots
}
circuit = transpile(circuit, basis_gates=noise_model.basis_gates)
qobj = assemble([circuit], backend, shots=shots)
# Add measure to qobj
item = measure_instr([0, 1], [0, 1])
append_instr(qobj, 0, item)
# Execute
result = backend.run(qobj, noise_model=noise_model).result()
self.is_completed(result)
self.compare_counts(result, [circuit], [target], delta=0.05 * shots) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_readout_error_qubit1(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_readout_error([probs_given0, probs_given1], [1])\n\n shots = 2000\n target = {\n '0x0': probs_given0[0] * shots / 2,\n '0x1': probs_given1[0] * shots / 2,\n '0x2': probs_given0[1] * shots / 2,\n '0x3': probs_given1[1] * shots / 2\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_readout_error_qubit0(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_readout_error([probs_given0, probs_given1], [0])\n\n shots = 2000\n target = {\n '0x0': probs_given0[0] * shots / 2,\n '0x1': probs_given0[1] * shots / 2,\n '0x2': probs_given1[0] * shots / 2,\n '0x3': probs_given1[1] * shots / 2\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_readout_error_all_qubit(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_all_qubit_readout_error([probs_given0, probs_given1])\n\n # Expected counts\n shots = 2000\n p00 = 0.5 * (probs_given0[0]**2 + probs_given1[0]**2)\n p01 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p10 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p11 = 0.5 * (probs_given0[1]**2 + probs_given1[1]**2)\n target = target = {\n '0x0': p00 * shots,\n '0x1': p01 * shots,\n '0x2': p10 * shots,\n '0x3': p11 * shots\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_backward_realization_value_error_caught(\n self, some_normal_rv1, some_normal_rv2\n ):\n with pytest.raises(ValueError):\n out, _ = self.transition.backward_realization(\n some_normal_rv1.mean,\n some_normal_rv2,\n t=0.0,\n )",
"def test_rb_utils(self):\n\n t1 = 100.\n t2 = 100.\n gate2Q = 0.5\n gate1Q = 0.1\n twoq_coherence_err = rb.rb_utils.coherence_limit(2, [t1, t1],\n [t2, t2], gate2Q)\n\n oneq_coherence_err = rb.rb_utils.coherence_limit(1, [t1],\n [t2], gate1Q)\n\n self.assertAlmostEqual(oneq_coherence_err, 0.00049975, 6,\n \"Error: 1Q Coherence Limit\")\n\n self.assertAlmostEqual(twoq_coherence_err, 0.00597, 5,\n \"Error: 2Q Coherence Limit\")\n\n twoq_epc = rb.rb_utils.twoQ_clifford_error([5.2, 5.2, 1.5],\n [0, 1, -1],\n [0.001, 0.0015, 0.02])\n\n self.assertAlmostEqual(twoq_epc, 0.0446283, 6,\n \"Error: 2Q EPC Calculation\")",
"def test_decompose_two_qubit_product_gate_detr_too_small(self):\n kl = np.eye(2)\n kr = 0.05 * np.eye(2)\n klkr = np.kron(kl, kr)\n with self.assertRaises(QiskitError) as exc:\n decompose_two_qubit_product_gate(klkr)\n self.assertIn(\"detR <\", exc.exception.message)",
"def test_memory(self):\n tau = 53.0\n tau0 = 22.0\n mrate = 50.0\n Mrate = 100.0\n\n tmax = 100.0\n dt = 0.01\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n\n ndiv3 = self.Nsrc/3\n\n self.motor.error_fct = lambda t: np.hstack((\n np.cos(t/tau0)*np.ones(ndiv3), np.sin(t/tau0)*np.ones(ndiv3),\n np.ones(ndiv3)))\n\n M = simulation.StateMonitor(self.rule, 'out')\n\n sim = simulation.Simulation(self.source, self.motor, self.rule, M, dt=dt)\n sim.run(tmax)\n\n # tutor output points *opposite* the motor error!\n prefactor = -self.rule.gain*tau0/(tau*tau + tau0*tau0)\n integral_part1 = np.cos(M.t/tau0)*np.exp(-M.t/tau)\n integral_part2 = np.sin(M.t/tau0)*np.exp(-M.t/tau)\n\n expected_cos = prefactor*(tau0 - tau0*integral_part1 + tau*integral_part2)\n expected_sin = prefactor*(tau - tau*integral_part1 - tau0*integral_part2)\n expected_const = -(1 - np.exp(-M.t/tau))\n\n mavg = (mrate + Mrate)*0.5\n mdiff = (Mrate - mrate)*0.5\n expected = np.vstack((\n np.tile(mavg + mdiff*expected_cos, (ndiv3, 1)),\n np.tile(mavg + mdiff*expected_sin, (ndiv3, 1)),\n np.tile(mavg + mdiff*expected_const, (ndiv3, 1))\n ))\n\n # mismatch is relatively large since we're using Euler's method\n # we can't do much better, however, since the motor controller cannot give\n # us motor error information at sub-step resolution\n mismatch = np.mean(np.abs(expected - M.out)/expected)\n self.assertLess(mismatch, 0.05)",
"def test_measurement_failures(self):\n\n # single qubit\n projQ_backend = ProjectqQuantumSimulator(\n register_size=1,\n seed=234,\n backend=Simulator\n )\n\n circuit = [\n [\"START_SESSION\", 0, 0],\n [\"STATE_PREPARATION_ALL\", 0, 0],\n ['X', 0, 0],\n ['QUBIT_MEASURE', 0, 0]\n ]\n\n for commands in circuit:\n\n hal_cmd = command_creator(*commands)\n projQ_backend.accept_command(hal_cmd)\n\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n projQ_backend.accept_command(command_creator(*['END_SESSION', 0, 0]))\n\n # multi qubit\n projQ_backend = ProjectqQuantumSimulator(\n register_size=2,\n seed=234,\n backend=Simulator\n )\n\n circuit = [\n [\"START_SESSION\", 0, 0],\n [\"STATE_PREPARATION_ALL\", 0, 0],\n ['X', 0, 0],\n ['QUBIT_MEASURE', 0, 0]\n ]\n\n for commands in circuit:\n\n hal_cmd = command_creator(*commands)\n projQ_backend.accept_command(hal_cmd)\n\n # try double measurement\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n\n # try manipulation after measurement\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['X', 0, 0])\n )\n\n # re-prepare state of qubit, then try bit-flip and measure\n projQ_backend.accept_command(\n command_creator(*['STATE_PREPARATION', 0, 0])\n )\n projQ_backend.accept_command(\n command_creator(*['X', 0, 0])\n )\n res = projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n\n self.assertEqual(res, 1)\n\n projQ_backend.accept_command(command_creator(*['END_SESSION', 0, 0]))",
"def test_ErrorProduce(self):\n samplefastQ=iter(self.fastQ)\n Newline=errorproducer(samplefastQ)\n self.assertNotEqual(Newline, self.fastQ)\n #uses very high phred score to make sure that errors are produced and the two datasets are not equal",
"def test_run_read(self):\n\n self.ictrl[0] = 1 + 2 + 4 + 8\n vmec_f90wrap.runvmec(self.ictrl, self.filename, self.verbose, \\\n self.fcomm, reset_file)\n\n self.assertTrue(self.ictrl[1] in success_codes)\n\n self.assertEqual(vmec_f90wrap.vmec_input.nfp, 3)\n self.assertEqual(vmec_f90wrap.vmec_input.mpol, 4)\n self.assertEqual(vmec_f90wrap.vmec_input.ntor, 3)\n print('rbc.shape:', vmec_f90wrap.vmec_input.rbc.shape)\n print('rbc:',vmec_f90wrap.vmec_input.rbc[101:103, 0:4])\n\n # n = 0, m = 0:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.rbc[101,0], 1.3782)\n\n # n = 0, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[101,1], 4.6465E-01)\n\n # n = 1, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[102,1], 1.6516E-01)\n\n # Now try reading in the output\n wout_file = os.path.join(os.path.dirname(__file__), 'wout_li383_low_res.nc')\n ierr = 0\n vmec_f90wrap.read_wout_mod.read_wout_file(wout_file, ierr)\n self.assertEqual(ierr, 0)\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.betatot, \\\n 0.0426215030653306, places=4)\n\n print('iotaf.shape:',vmec_f90wrap.read_wout_mod.iotaf.shape)\n print('rmnc.shape:',vmec_f90wrap.read_wout_mod.rmnc.shape)\n\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.iotaf[-1], \\\n 0.654868168783638, places=4)\n\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.rmnc[0, 0], \\\n 1.4773028173065, places=4)",
"def testrescorr():\n tau = np.zeros((2,50))\n tau[0,25] = 2\n tau[1,23] = 3\n tau2 = spec_utils.res_corr(tau, 2, 8)\n #Check flux conserved\n assert np.abs(np.sum(tau2[0,:])/ np.sum(tau[0,:]) -1) < 1e-6\n assert np.abs(np.sum(tau2[1,:])/ np.sum(tau[1,:]) -1) < 1e-6\n #Check expanded by expected amount\n for i in (0,1):\n assert np.size(np.where(tau2[i,:]> 0)) == 15",
"def test_non_differentiable_error(self):\r\n psi = np.array([1, 0, 1, 0]) / np.sqrt(2)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.QubitStateVector(psi, wires=[0, 1])\r\n qml.RX(0.543, wires=[0])\r\n qml.RY(-0.654, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.probs(wires=[0, 1])\r\n\r\n # by default all parameters are assumed to be trainable\r\n with pytest.raises(\r\n ValueError, match=r\"Cannot differentiate with respect to parameter\\(s\\) {0}\"\r\n ):\r\n finite_diff(tape)\r\n\r\n # setting trainable parameters avoids this\r\n tape.trainable_params = {1, 2}\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n tapes, fn = finite_diff(tape)\r\n\r\n # For now, we must squeeze the results of the device execution, since\r\n # qml.probs results in a nested result. Later, we will revisit device\r\n # execution to avoid this issue.\r\n res = fn(qml.math.squeeze(dev.batch_execute(tapes)))\r\n assert res.shape == (4, 2)",
"def test_compute_correlation_paired_incompatible_samples(self):\r\n self.assertRaises(ValueError, _compute_correlation,\r\n self.taxa_summary1, self.taxa_summary3, 'paired',\r\n 'spearman', 'high', 9, 0.22222)",
"def test_thermal_relaxation_error_t1_equal_t2_0state(self):\n error = thermal_relaxation_error(1, 1, 1)\n targets = [[{'name': 'id', 'qubits': [0]}],\n [{'name': 'reset', 'qubits': [0]}]]\n probs = [np.exp(-1), 1 - np.exp(-1)]\n for j in range(2):\n circ, p = error.error_term(j)\n self.remove_if_found(circ, targets)\n if circ[0]['name'] == 'id':\n self.assertAlmostEqual(p, probs[0], msg=\"identity probability\")\n else:\n self.assertAlmostEqual(p, probs[1], msg=\"reset probability\")\n self.assertEqual(targets, [], msg=\"relaxation circuits\")",
"def test_specific_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'reset', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_vncdr(backend, nqubits, noise, full_output, insertion_gate, readout):\n if backend.name == \"tensorflow\":\n import tensorflow as tf\n\n tf.config.threading.set_inter_op_parallelism_threads = 1\n tf.config.threading.set_intra_op_parallelism_threads = 1\n else:\n backend.set_threads(1)\n # Define the circuit\n c = get_circuit(nqubits)\n # Define the observable\n obs = np.prod([Z(i) for i in range(nqubits)])\n obs = SymbolicHamiltonian(obs, backend=backend)\n # Noise-free expected value\n exact = obs.expectation(backend.execute_circuit(c).state())\n # Noisy expected value without mitigation\n if \"calibration_matrix\" in readout.keys() or \"ncircuits\" in readout.keys():\n if nqubits == 1:\n p = cal_matrix_1q\n elif nqubits == 3:\n p = cal_matrix_3q\n # noise.add(ReadoutError(probabilities=p),gate=gates.M)\n state = backend.execute_circuit(noise.apply(c), nshots=10000)\n noisy = state.expectation_from_samples(obs)\n # Mitigated expected value\n estimate = vnCDR(\n circuit=c,\n observable=obs,\n backend=backend,\n noise_levels=range(3),\n noise_model=noise,\n nshots=10000,\n n_training_samples=20,\n insertion_gate=insertion_gate,\n full_output=full_output,\n readout=readout,\n )\n if full_output:\n estimate = estimate[0]\n assert np.abs(exact - estimate) <= np.abs(exact - noisy)",
"def test_thermal_relaxation_error_t1_t2_inf_ideal(self):\n error = thermal_relaxation_error(np.inf, np.inf, 0)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"ideal probability\")\n self.assertEqual(circ[0], {\"name\": \"id\", \"qubits\": [0]},\n msg=\"ideal circuit\")",
"def test_depolarizing_error_2q_unitary(self):\n p_depol = 0.3\n error = depolarizing_error(p_depol, 2, standard_gates=False)\n X = standard_gate_unitary('x')\n Y = standard_gate_unitary('y')\n Z = standard_gate_unitary('z')\n target_unitaries = [X, Y, Z, # on qubit 0\n X, Y, Z, # on qubit 1\n np.kron(X, X), np.kron(X, Y), np.kron(X, Z),\n np.kron(Y, X), np.kron(Y, Y), np.kron(Y, Z),\n np.kron(Z, X), np.kron(Z, Y), np.kron(Z, Z)]\n for j in range(16):\n circ, p = error.error_term(j)\n name = circ[0]['name']\n self.assertIn(name, ('unitary', \"id\"))\n if name == \"unitary\":\n self.assertAlmostEqual(p, p_depol / 16)\n op = circ[0]['params'][0]\n qubits = circ[0]['qubits']\n if len(op) == 2:\n self.assertIn(qubits, [[0], [1]])\n else:\n self.assertEqual(qubits, [0, 1])\n self.remove_if_found(op, target_unitaries)\n else:\n self.assertAlmostEqual(p, 1 - p_depol + p_depol / 16)\n self.assertEqual(circ[0]['qubits'], [0])\n self.assertEqual(target_unitaries, [], msg=\"Incorrect unitaries\")",
"def test_second_q_ops_without_transformers(self):\n expected_num_of_sec_quant_ops = 5\n logfile = self.get_resource_path(\n \"CO2_freq_B3LYP_ccpVDZ.log\",\n \"problems/second_quantization/vibrational/resources\",\n )\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n driver = GaussianForcesDriver(logfile=logfile)\n watson_hamiltonian = driver.run()\n\n num_modals = 2\n truncation_order = 3\n num_modes = watson_hamiltonian.num_modes\n num_modals = [num_modals] * num_modes\n vibrational_problem = VibrationalStructureProblem(driver, num_modals, truncation_order)\n second_quantized_ops = vibrational_problem.second_q_ops()\n vibrational_op = second_quantized_ops[0]\n\n with self.subTest(\"Check that the correct properties are/aren't None\"):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n # new driver used, molecule_data* should be None\n self.assertIsNone(vibrational_problem.molecule_data)\n self.assertIsNone(vibrational_problem.molecule_data_transformed)\n # converted properties should never be None\n self.assertIsNotNone(vibrational_problem.grouped_property)\n self.assertIsNotNone(vibrational_problem.grouped_property_transformed)\n\n with self.subTest(\"Check expected length of the list of second quantized operators.\"):\n assert len(second_quantized_ops) == expected_num_of_sec_quant_ops\n with self.subTest(\"Check types in the list of second quantized operators.\"):\n assert isinstance(vibrational_op, VibrationalOp)\n # TODO: add more checks once the algorithms are fully in place",
"def test_thermal_relaxation_error_gate(self):\n t1, t2, time, p1 = (2, 1, 1, 0.3)\n error = thermal_relaxation_error(t1, t2, time, p1)\n targets = [[{'name': 'id', 'qubits': [0]}],\n [{'name': 'z', 'qubits': [0]}],\n [{'name': 'reset', 'qubits': [0]}],\n [{'name': 'reset', 'qubits': [0]}, {'name': 'x', 'qubits': [0]}]]\n p_reset0 = (1 - p1) * (1 - np.exp(-1 / t1))\n p_reset1 = p1 * (1 - np.exp(-1 / t1))\n p_z = 0.5 * np.exp(-1 / t1) * (1 - np.exp(-(1 / t2 - 1 / t1) * time))\n p_id = 1 - p_z - p_reset0 - p_reset1\n for j in range(4):\n circ, p = error.error_term(j)\n self.remove_if_found(circ, targets)\n name = circ[0]['name']\n if circ[0]['name'] == 'id':\n self.assertAlmostEqual(p, p_id, msg=\"identity probability\")\n elif name == 'z':\n self.assertAlmostEqual(p, p_z, msg=\"Z error probability\")\n elif len(circ) == 1:\n self.assertAlmostEqual(p, p_reset0, msg=\"reset-0 probability\")\n else:\n self.assertAlmostEqual(p, p_reset1, msg=\"reset-1 probability\")\n self.assertEqual(targets, [], msg=\"relaxation circuits\")",
"def test_reset_error_specific_qubit_50percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 50% reset noise on qubit-0 \"u3\" only.\n noise_probs = [0.5, 0.5]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [0])\n shots = 2000\n target = {'0x2': shots / 2, '0x3': shots / 2}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_motor_error(self):\n # reproducible arbitrariness\n np.random.seed(12325)\n\n nsteps = 10\n nchan = 3\n tmax = nsteps*self.dt\n sequence = np.random.randn(nsteps, self.N)\n\n target = np.random.randn(nchan, nsteps)\n controller = LinearController(self.G, target, tau=None)\n controller.W = np.random.randn(*controller.W.shape)\n\n self.G.out_fct = lambda i: sequence[i]\n\n class MotorErrorGrabber(object):\n def __init__(self, target):\n self.target = target\n self.order = 10\n \n def prepare(self, tmax, dt):\n nsteps = int_r(tmax/dt)\n self.motor_error = np.zeros((nsteps, self.target.N))\n\n def evolve(self, t, dt):\n i = int_r(t/dt)\n self.motor_error[i, :] = self.target.get_motor_error()\n\n M = MotorErrorGrabber(controller)\n M1 = simulation.StateMonitor(controller, 'out')\n\n sim = simulation.Simulation(self.G, controller, M, M1, dt=self.dt)\n sim.run(tmax)\n\n for i in xrange(int_r(tmax/self.dt)):\n diff = M1.out[:, i] - target[:, i]\n self.assertTrue(np.allclose(M.motor_error[i], diff))",
"def test_compute_correlation_invalid_comparison_mode(self):\r\n self.assertRaises(ValueError, _compute_correlation,\r\n self.taxa_summary1, self.taxa_summary2, 'foo',\r\n 'pearson', 'two-sided', 999, 0.90)",
"def test_reset_error_specific_qubit_25percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 25% reset noise on qubit-1 \"u3\" only.\n noise_probs = [0.25, 0.75]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [1])\n shots = 2000\n # target = {'01': shots / 4, '11': 3 * shots / 4}\n target = {'0x1': shots / 4, '0x3': 3 * shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_spectrum_chi_squared_with_unequal_axis_throws_exception(host):\n host2 = host.copy()\n host2.wav_select(host2.xaxis[50], host2.xaxis[-50])\n\n with pytest.raises(Exception):\n spectrum_chisqr(host, host2)",
"def test_depolarizing_error_2q_gate(self):\n p_depol = 0.3\n error = depolarizing_error(p_depol, 2, standard_gates=True)\n target_circs = [[{\"name\": \"id\", \"qubits\": [0]}],\n [{\"name\": \"x\", \"qubits\": [0]}],\n [{\"name\": \"y\", \"qubits\": [0]}],\n [{\"name\": \"z\", \"qubits\": [0]}],\n [{\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"z\", \"qubits\": [1]}],\n [{\"name\": \"x\", \"qubits\": [0]}, {\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"x\", \"qubits\": [0]}, {\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"x\", \"qubits\": [0]}, {\"name\": \"z\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [0]}, {\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [0]}, {\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [0]}, {\"name\": \"z\", \"qubits\": [1]}],\n [{\"name\": \"z\", \"qubits\": [0]}, {\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"z\", \"qubits\": [0]}, {\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"z\", \"qubits\": [0]}, {\"name\": \"z\", \"qubits\": [1]}]]\n for j in range(16):\n circ, p = error.error_term(j)\n self.remove_if_found(circ, target_circs)\n if circ == [{\"name\": \"id\", \"qubits\": [0]}]:\n self.assertAlmostEqual(p, 1 - p_depol + p_depol / 16,\n msg=\"Incorrect identity probability\")\n else:\n self.assertAlmostEqual(p, p_depol / 16, msg=\"Incorrect Pauli probability\")\n self.assertEqual(target_circs, [], msg=\"Incorrect unitaries\")",
"def test_thermal_relaxation_error_t1_equal_t2_1state(self):\n error = thermal_relaxation_error(1, 1, 1, 1)\n targets = [[{'name': 'id', 'qubits': [0]}],\n [{'name': 'reset', 'qubits': [0]}, {'name': 'x', 'qubits': [0]}]]\n probs = [np.exp(-1), 1 - np.exp(-1)]\n for j in range(2):\n circ, p = error.error_term(j)\n self.remove_if_found(circ, targets)\n if circ[0]['name'] == 'id':\n self.assertAlmostEqual(p, probs[0], msg=\"identity probability\")\n else:\n self.assertAlmostEqual(p, probs[1], msg=\"reset probability\")\n self.assertEqual(targets, [], msg=\"relaxation circuits\")",
"def test_thermal_relaxation_error_raises_invalid_t2(self):\n # T2 == 0\n self.assertRaises(NoiseError, lambda: thermal_relaxation_error(1, 0, 0))\n # T2 < 0\n self.assertRaises(NoiseError, lambda: thermal_relaxation_error(1, -1, 0))",
"def test_decompose_two_qubit_product_gate_not_product(self):\n klkr = Ud(1.0e-6, 0, 0)\n with self.assertRaises(QiskitError) as exc:\n decompose_two_qubit_product_gate(klkr)\n self.assertIn(\"decomposition failed\", exc.exception.message)",
"def test_specific_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [1])\n # Execute\n target = {'0x2': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)"
]
| [
"0.72487193",
"0.7131155",
"0.67280173",
"0.6090974",
"0.5945384",
"0.5846784",
"0.58361",
"0.5827254",
"0.57993144",
"0.57961226",
"0.57810736",
"0.5767251",
"0.57519376",
"0.5744709",
"0.5707226",
"0.5689585",
"0.5668584",
"0.5661487",
"0.5660232",
"0.56589425",
"0.5656539",
"0.5655932",
"0.5654211",
"0.56506395",
"0.5648322",
"0.563653",
"0.56361896",
"0.5631517",
"0.5602416",
"0.5586948"
]
| 0.78623223 | 0 |
Test 50% perecent reset error on qubit0 | def test_reset_error_specific_qubit_50percent(self):
# Test circuit: ideal outcome "11"
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.x(qr)
circuit.measure(qr, cr)
backend = QasmSimulator()
noise_circs = [[{
"name": "reset",
"qubits": [0]
}], [{
"name": "id",
"qubits": [0]
}]]
# 50% reset noise on qubit-0 "u3" only.
noise_probs = [0.5, 0.5]
error = QuantumError(zip(noise_circs, noise_probs))
noise_model = NoiseModel()
noise_model.add_quantum_error(error, "u3", [0])
shots = 2000
target = {'0x2': shots / 2, '0x3': shots / 2}
circuit = transpile(circuit, basis_gates=noise_model.basis_gates)
qobj = assemble([circuit], backend, shots=shots)
result = backend.run(qobj, noise_model=noise_model).result()
self.is_completed(result)
self.compare_counts(result, [circuit], [target], delta=0.05 * shots) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_specific_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'reset', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset_error_specific_qubit_25percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 25% reset noise on qubit-1 \"u3\" only.\n noise_probs = [0.25, 0.75]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [1])\n shots = 2000\n # target = {'01': shots / 4, '11': 3 * shots / 4}\n target = {'0x1': shots / 4, '0x3': 3 * shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'reset')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset_error_all_qubit_100percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 100% reset noise on all qubit \"u3\".\n noise_probs = [1, 0]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, \"u3\")\n shots = 100\n # target = {'00': shots}\n target = {'0x0': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_nonlocal_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr[1])\n circuit.barrier(qr)\n circuit.reset(qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'reset', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_standard_reset0reset1_error_50percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr[0])\n circuit.x(qr[1])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = reset_error(0.25, 0.25)\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, ['id', 'x'])\n # Execute\n target = {\n '0x0': 3 * shots / 16,\n '0x1': shots / 16,\n '0x2': 9 * shots / 16,\n '0x3': 3 * shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_standard_reset0_error_100percent(self):\n qr = QuantumRegister(1, 'qr')\n cr = ClassicalRegister(1, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = reset_error(1)\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, ['id', 'x'])\n # Execute\n target = {'0x0': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_standard_reset1_error_100percent(self):\n qr = QuantumRegister(1, 'qr')\n cr = ClassicalRegister(1, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = reset_error(0, 1)\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, ['id', 'x'])\n # Execute\n target = {'0x1': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_all_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {'0x3': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_specific_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [0])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x1': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_specific_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [1])\n # Execute\n target = {'0x2': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_nonlocal_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n # use barrier to ensure measure qubit 0 is before qubit 1\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'measure', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_nonlocal_pauli_error_gate_25percent(self):\n qr = QuantumRegister(3, 'qr')\n cr = ClassicalRegister(3, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.cx(qr[0], qr[1])\n circuit.barrier(qr)\n circuit.cx(qr[1], qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('XII', 0.25), ('III', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'cx', [0, 1], [0, 1, 2])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x4': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset(self):\r\n self.p += 8\r\n self.p.reset()\r\n self.assertEqual(str(self.p), '[>............] 0%')",
"def test_readout_error_all_qubit(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_all_qubit_readout_error([probs_given0, probs_given1])\n\n # Expected counts\n shots = 2000\n p00 = 0.5 * (probs_given0[0]**2 + probs_given1[0]**2)\n p01 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p10 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p11 = 0.5 * (probs_given0[1]**2 + probs_given1[1]**2)\n target = target = {\n '0x0': p00 * shots,\n '0x1': p01 * shots,\n '0x2': p10 * shots,\n '0x3': p11 * shots\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_specific_qubit_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'measure', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'measure')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_readout_error_qubit0(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_readout_error([probs_given0, probs_given1], [0])\n\n shots = 2000\n target = {\n '0x0': probs_given0[0] * shots / 2,\n '0x1': probs_given0[1] * shots / 2,\n '0x2': probs_given1[0] * shots / 2,\n '0x3': probs_given1[1] * shots / 2\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_readout_error_qubit1(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_readout_error([probs_given0, probs_given1], [1])\n\n shots = 2000\n target = {\n '0x0': probs_given0[0] * shots / 2,\n '0x1': probs_given1[0] * shots / 2,\n '0x2': probs_given0[1] * shots / 2,\n '0x3': probs_given1[1] * shots / 2\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset(self):\r\n self.p += 8\r\n self.p.reset()\r\n self.assertEqual(str(self.p), '0% [....................]')",
"def reset_gate_error_circuits():\n circuits = []\n\n # 50% reset to 0 state on qubit 0\n qr = QuantumRegister(2, \"qr\")\n cr = ClassicalRegister(2, \"cr\")\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # 25% reset to 0 state on qubit 1\n qr = QuantumRegister(2, \"qr\")\n cr = ClassicalRegister(2, \"cr\")\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # 100% reset error to 0 on all qubits\n qr = QuantumRegister(1, \"qr\")\n cr = ClassicalRegister(1, \"cr\")\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # 100% reset error to 1 on all qubits\n qr = QuantumRegister(1, \"qr\")\n cr = ClassicalRegister(1, \"cr\")\n circuit = QuantumCircuit(qr, cr)\n circuit.i(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # 25% reset error to 0 and 1 on all qubits\n qr = QuantumRegister(2, \"qr\")\n cr = ClassicalRegister(2, \"cr\")\n circuit = QuantumCircuit(qr, cr)\n circuit.i(qr[0])\n circuit.x(qr[1])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n return circuits",
"def experiment3():\n raise FAKE_ERROR",
"def testReset(self):\n \n clk = Signal(0)\n rst = Signal(1)\n clock_gen = ClkDriver(clk, period=4)\n \n out = Signal(intbv(0)[4:])\n counter = Counter(out, clk, rst)\n \n def test():\n for i in range(200):\n # count up to 9 then reset\n if int(out) == 9:\n rst.next = 0\n yield delay(1)\n self.assertEqual(int(out), 0)\n # turn off reset next time\n else:\n rst.next = 1\n yield delay(1)\n \n check = test()\n sim = Simulation(counter, clock_gen, check)\n sim.run(400, quiet=1)",
"def test_reset_failure(self):\r\n problem_url_name = 'H1P1'\r\n location = InstructorTaskModuleTestCase.problem_location(problem_url_name)\r\n self.define_option_problem(problem_url_name)\r\n self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])\r\n\r\n expected_message = \"bad things happened\"\r\n with patch('courseware.models.StudentModule.save') as mock_save:\r\n mock_save.side_effect = ZeroDivisionError(expected_message)\r\n instructor_task = self.reset_problem_attempts('instructor', location)\r\n self._assert_task_failure(instructor_task.id, 'reset_problem_attempts', problem_url_name, expected_message)",
"def test_issue_reset_time(self):\n pass",
"def test_reset_nondeterministic(self):\n # For statevector output we can combine deterministic and non-deterministic\n # count output circuits\n shots = 2000\n circuits = ref_reset.reset_circuits_nondeterministic(final_measure=True)\n targets = ref_reset.reset_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)",
"def on_reset(qutest):\n\n qutest.expect_pause()\n qutest.glb_filter(FILTER.SM)\n qutest.loc_filter(QS_OBJ_KIND.SM_AO, 'AO_Philo<2>')\n qutest.Continue() # note continue in lower case. is a reserved word in python\n qutest.expect(\"===RTC===> St-Init Obj=AO_Philo<2>,State=QP::QHsm::top->thinking\")\n qutest.expect(\"===RTC===> St-Entry Obj=AO_Philo<2>,State=thinking\")\n qutest.expect(\"%timestamp Init===> Obj=AO_Philo<2>,State=thinking\")\n qutest.glb_filter(FILTER.SM, FILTER.AO, FILTER.UA)\n qutest.current_obj(QS_OBJ_KIND.SM_AO, 'AO_Philo<2>')",
"def test_reset(self):\n p1 = self.player()\n p1.reset()\n self.assertEqual(p1.history, [])\n self.assertEqual(p1.genome[0], C)",
"def test_measurement_failures(self):\n\n # single qubit\n projQ_backend = ProjectqQuantumSimulator(\n register_size=1,\n seed=234,\n backend=Simulator\n )\n\n circuit = [\n [\"START_SESSION\", 0, 0],\n [\"STATE_PREPARATION_ALL\", 0, 0],\n ['X', 0, 0],\n ['QUBIT_MEASURE', 0, 0]\n ]\n\n for commands in circuit:\n\n hal_cmd = command_creator(*commands)\n projQ_backend.accept_command(hal_cmd)\n\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n projQ_backend.accept_command(command_creator(*['END_SESSION', 0, 0]))\n\n # multi qubit\n projQ_backend = ProjectqQuantumSimulator(\n register_size=2,\n seed=234,\n backend=Simulator\n )\n\n circuit = [\n [\"START_SESSION\", 0, 0],\n [\"STATE_PREPARATION_ALL\", 0, 0],\n ['X', 0, 0],\n ['QUBIT_MEASURE', 0, 0]\n ]\n\n for commands in circuit:\n\n hal_cmd = command_creator(*commands)\n projQ_backend.accept_command(hal_cmd)\n\n # try double measurement\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n\n # try manipulation after measurement\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['X', 0, 0])\n )\n\n # re-prepare state of qubit, then try bit-flip and measure\n projQ_backend.accept_command(\n command_creator(*['STATE_PREPARATION', 0, 0])\n )\n projQ_backend.accept_command(\n command_creator(*['X', 0, 0])\n )\n res = projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n\n self.assertEqual(res, 1)\n\n projQ_backend.accept_command(command_creator(*['END_SESSION', 0, 0]))"
]
| [
"0.8051521",
"0.79759365",
"0.79388624",
"0.7862399",
"0.7766173",
"0.77096605",
"0.7605542",
"0.75477517",
"0.67614543",
"0.6734318",
"0.6728533",
"0.6721009",
"0.6587347",
"0.65431523",
"0.6492219",
"0.64849436",
"0.6459176",
"0.6434604",
"0.63961065",
"0.63679075",
"0.6339163",
"0.63234305",
"0.6295249",
"0.6262204",
"0.62491584",
"0.62126607",
"0.61778486",
"0.61756754",
"0.6100864",
"0.6077279"
]
| 0.80340695 | 1 |
Test 25% percent reset error on qubit1 | def test_reset_error_specific_qubit_25percent(self):
# Test circuit: ideal outcome "11"
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.x(qr)
circuit.measure(qr, cr)
backend = QasmSimulator()
noise_circs = [[{
"name": "reset",
"qubits": [0]
}], [{
"name": "id",
"qubits": [0]
}]]
# 25% reset noise on qubit-1 "u3" only.
noise_probs = [0.25, 0.75]
error = QuantumError(zip(noise_circs, noise_probs))
noise_model = NoiseModel()
noise_model.add_quantum_error(error, "u3", [1])
shots = 2000
# target = {'01': shots / 4, '11': 3 * shots / 4}
target = {'0x1': shots / 4, '0x3': 3 * shots / 4}
circuit = transpile(circuit, basis_gates=noise_model.basis_gates)
qobj = assemble([circuit], backend, shots=shots)
result = backend.run(qobj, noise_model=noise_model).result()
self.is_completed(result)
self.compare_counts(result, [circuit], [target], delta=0.05 * shots) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_specific_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'reset', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset_error_specific_qubit_50percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 50% reset noise on qubit-0 \"u3\" only.\n noise_probs = [0.5, 0.5]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [0])\n shots = 2000\n target = {'0x2': shots / 2, '0x3': shots / 2}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'reset')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset_error_all_qubit_100percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 100% reset noise on all qubit \"u3\".\n noise_probs = [1, 0]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, \"u3\")\n shots = 100\n # target = {'00': shots}\n target = {'0x0': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_nonlocal_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr[1])\n circuit.barrier(qr)\n circuit.reset(qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'reset', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_standard_reset1_error_100percent(self):\n qr = QuantumRegister(1, 'qr')\n cr = ClassicalRegister(1, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = reset_error(0, 1)\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, ['id', 'x'])\n # Execute\n target = {'0x1': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_standard_reset0reset1_error_50percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr[0])\n circuit.x(qr[1])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = reset_error(0.25, 0.25)\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, ['id', 'x'])\n # Execute\n target = {\n '0x0': 3 * shots / 16,\n '0x1': shots / 16,\n '0x2': 9 * shots / 16,\n '0x3': 3 * shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_standard_reset0_error_100percent(self):\n qr = QuantumRegister(1, 'qr')\n cr = ClassicalRegister(1, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = reset_error(1)\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, ['id', 'x'])\n # Execute\n target = {'0x0': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_specific_qubit_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'measure', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'measure')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_nonlocal_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n # use barrier to ensure measure qubit 0 is before qubit 1\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'measure', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_specific_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [0])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x1': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {'0x3': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_specific_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [1])\n # Execute\n target = {'0x2': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_reset(self):\r\n self.p += 8\r\n self.p.reset()\r\n self.assertEqual(str(self.p), '[>............] 0%')",
"def test_nonlocal_pauli_error_gate_25percent(self):\n qr = QuantumRegister(3, 'qr')\n cr = ClassicalRegister(3, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.cx(qr[0], qr[1])\n circuit.barrier(qr)\n circuit.cx(qr[1], qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('XII', 0.25), ('III', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'cx', [0, 1], [0, 1, 2])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x4': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset(self):\r\n self.p += 8\r\n self.p.reset()\r\n self.assertEqual(str(self.p), '0% [....................]')",
"def test_readout_error_qubit1(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_readout_error([probs_given0, probs_given1], [1])\n\n shots = 2000\n target = {\n '0x0': probs_given0[0] * shots / 2,\n '0x1': probs_given1[0] * shots / 2,\n '0x2': probs_given0[1] * shots / 2,\n '0x3': probs_given1[1] * shots / 2\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_readout_error_all_qubit(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_all_qubit_readout_error([probs_given0, probs_given1])\n\n # Expected counts\n shots = 2000\n p00 = 0.5 * (probs_given0[0]**2 + probs_given1[0]**2)\n p01 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p10 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p11 = 0.5 * (probs_given0[1]**2 + probs_given1[1]**2)\n target = target = {\n '0x0': p00 * shots,\n '0x1': p01 * shots,\n '0x2': p10 * shots,\n '0x3': p11 * shots\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_readout_error_qubit0(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_readout_error([probs_given0, probs_given1], [0])\n\n shots = 2000\n target = {\n '0x0': probs_given0[0] * shots / 2,\n '0x1': probs_given0[1] * shots / 2,\n '0x2': probs_given1[0] * shots / 2,\n '0x3': probs_given1[1] * shots / 2\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset_failure(self):\r\n problem_url_name = 'H1P1'\r\n location = InstructorTaskModuleTestCase.problem_location(problem_url_name)\r\n self.define_option_problem(problem_url_name)\r\n self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])\r\n\r\n expected_message = \"bad things happened\"\r\n with patch('courseware.models.StudentModule.save') as mock_save:\r\n mock_save.side_effect = ZeroDivisionError(expected_message)\r\n instructor_task = self.reset_problem_attempts('instructor', location)\r\n self._assert_task_failure(instructor_task.id, 'reset_problem_attempts', problem_url_name, expected_message)",
"def reset_gate_error_circuits():\n circuits = []\n\n # 50% reset to 0 state on qubit 0\n qr = QuantumRegister(2, \"qr\")\n cr = ClassicalRegister(2, \"cr\")\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # 25% reset to 0 state on qubit 1\n qr = QuantumRegister(2, \"qr\")\n cr = ClassicalRegister(2, \"cr\")\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # 100% reset error to 0 on all qubits\n qr = QuantumRegister(1, \"qr\")\n cr = ClassicalRegister(1, \"cr\")\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # 100% reset error to 1 on all qubits\n qr = QuantumRegister(1, \"qr\")\n cr = ClassicalRegister(1, \"cr\")\n circuit = QuantumCircuit(qr, cr)\n circuit.i(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # 25% reset error to 0 and 1 on all qubits\n qr = QuantumRegister(2, \"qr\")\n cr = ClassicalRegister(2, \"cr\")\n circuit = QuantumCircuit(qr, cr)\n circuit.i(qr[0])\n circuit.x(qr[1])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n return circuits",
"def test_issue_reset_time(self):\n pass",
"def test_error_at_99tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.99))",
"def test_foo(self):\n self.ran = True\n 1 / 0",
"def testReset(self):\n \n clk = Signal(0)\n rst = Signal(1)\n clock_gen = ClkDriver(clk, period=4)\n \n out = Signal(intbv(0)[4:])\n counter = Counter(out, clk, rst)\n \n def test():\n for i in range(200):\n # count up to 9 then reset\n if int(out) == 9:\n rst.next = 0\n yield delay(1)\n self.assertEqual(int(out), 0)\n # turn off reset next time\n else:\n rst.next = 1\n yield delay(1)\n \n check = test()\n sim = Simulation(counter, clock_gen, check)\n sim.run(400, quiet=1)",
"def experiment3():\n raise FAKE_ERROR",
"def error(self): \n if not self.terminal:\n err = sum([v**2 for v in self.state + self.q[:-1]])\n else:\n err = sum([v**2 for v in LIMITS[:9]] + [1.0 - LIMITS[9]**2])\n err *= (self.max_steps - self.steps)\n return err",
"def test_submission_reset(self):\r\n self.basic_setup(reset=True)\r\n resp = self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n # submit a second time to draw NotFoundError\r\n resp = self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.assertEqual(resp.status_code, 200)\r\n err_msg = (\r\n \"The state of this problem has changed since you loaded this page. \"\r\n \"Please refresh your page.\"\r\n )\r\n self.assertEqual(json.loads(resp.content).get(\"success\"), err_msg)"
]
| [
"0.8192283",
"0.8074021",
"0.8054405",
"0.79512155",
"0.7703991",
"0.76622504",
"0.7568427",
"0.7505362",
"0.7011177",
"0.6954605",
"0.69186103",
"0.6909316",
"0.6887726",
"0.68811786",
"0.68578637",
"0.6784696",
"0.64939183",
"0.6487063",
"0.6195229",
"0.6170042",
"0.61048234",
"0.5993075",
"0.5885541",
"0.5820936",
"0.58118767",
"0.5798545",
"0.5791219",
"0.57688266",
"0.5765495",
"0.5750002"
]
| 0.8253222 | 0 |
Test 100% precent reset error on all qubits | def test_reset_error_all_qubit_100percent(self):
# Test circuit: ideal outcome "11"
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.x(qr)
circuit.measure(qr, cr)
backend = QasmSimulator()
noise_circs = [[{
"name": "reset",
"qubits": [0]
}], [{
"name": "id",
"qubits": [0]
}]]
# 100% reset noise on all qubit "u3".
noise_probs = [1, 0]
error = QuantumError(zip(noise_circs, noise_probs))
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error, "u3")
shots = 100
# target = {'00': shots}
target = {'0x0': shots}
circuit = transpile(circuit, basis_gates=noise_model.basis_gates)
qobj = assemble([circuit], backend, shots=shots)
result = backend.run(qobj, noise_model=noise_model).result()
self.is_completed(result)
self.compare_counts(result, [circuit], [target], delta=0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_specific_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'reset', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset_error_specific_qubit_50percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 50% reset noise on qubit-0 \"u3\" only.\n noise_probs = [0.5, 0.5]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [0])\n shots = 2000\n target = {'0x2': shots / 2, '0x3': shots / 2}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'reset')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset_error_specific_qubit_25percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 25% reset noise on qubit-1 \"u3\" only.\n noise_probs = [0.25, 0.75]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [1])\n shots = 2000\n # target = {'01': shots / 4, '11': 3 * shots / 4}\n target = {'0x1': shots / 4, '0x3': 3 * shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_standard_reset1_error_100percent(self):\n qr = QuantumRegister(1, 'qr')\n cr = ClassicalRegister(1, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = reset_error(0, 1)\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, ['id', 'x'])\n # Execute\n target = {'0x1': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_standard_reset0_error_100percent(self):\n qr = QuantumRegister(1, 'qr')\n cr = ClassicalRegister(1, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = reset_error(1)\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, ['id', 'x'])\n # Execute\n target = {'0x0': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_nonlocal_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr[1])\n circuit.barrier(qr)\n circuit.reset(qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'reset', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_standard_reset0reset1_error_50percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr[0])\n circuit.x(qr[1])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = reset_error(0.25, 0.25)\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, ['id', 'x'])\n # Execute\n target = {\n '0x0': 3 * shots / 16,\n '0x1': shots / 16,\n '0x2': 9 * shots / 16,\n '0x3': 3 * shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def reset_gate_error_circuits():\n circuits = []\n\n # 50% reset to 0 state on qubit 0\n qr = QuantumRegister(2, \"qr\")\n cr = ClassicalRegister(2, \"cr\")\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # 25% reset to 0 state on qubit 1\n qr = QuantumRegister(2, \"qr\")\n cr = ClassicalRegister(2, \"cr\")\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # 100% reset error to 0 on all qubits\n qr = QuantumRegister(1, \"qr\")\n cr = ClassicalRegister(1, \"cr\")\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # 100% reset error to 1 on all qubits\n qr = QuantumRegister(1, \"qr\")\n cr = ClassicalRegister(1, \"cr\")\n circuit = QuantumCircuit(qr, cr)\n circuit.i(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n # 25% reset error to 0 and 1 on all qubits\n qr = QuantumRegister(2, \"qr\")\n cr = ClassicalRegister(2, \"cr\")\n circuit = QuantumCircuit(qr, cr)\n circuit.i(qr[0])\n circuit.x(qr[1])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n circuits.append(circuit)\n\n return circuits",
"def test_all_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {'0x3': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_reset_nondeterministic(self):\n # For statevector output we can combine deterministic and non-deterministic\n # count output circuits\n shots = 2000\n circuits = ref_reset.reset_circuits_nondeterministic(final_measure=True)\n targets = ref_reset.reset_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)",
"def test_specific_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [1])\n # Execute\n target = {'0x2': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_readout_error_all_qubit(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_all_qubit_readout_error([probs_given0, probs_given1])\n\n # Expected counts\n shots = 2000\n p00 = 0.5 * (probs_given0[0]**2 + probs_given1[0]**2)\n p01 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p10 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p11 = 0.5 * (probs_given0[1]**2 + probs_given1[1]**2)\n target = target = {\n '0x0': p00 * shots,\n '0x1': p01 * shots,\n '0x2': p10 * shots,\n '0x3': p11 * shots\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset(self):\n p1 = self.player()\n p1.reset()\n self.assertEqual(p1.history, [])\n self.assertEqual(p1.genome[0], C)",
"def test_issue_reset_time(self):\n pass",
"def test_all_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_error_num_qubits(self, basis_state, wires):\n\n with pytest.raises(ValueError, match=\"'basis_state' must be of shape\"):\n BasisStatePreparation(basis_state, wires)",
"def test_measurement_failures(self):\n\n # single qubit\n projQ_backend = ProjectqQuantumSimulator(\n register_size=1,\n seed=234,\n backend=Simulator\n )\n\n circuit = [\n [\"START_SESSION\", 0, 0],\n [\"STATE_PREPARATION_ALL\", 0, 0],\n ['X', 0, 0],\n ['QUBIT_MEASURE', 0, 0]\n ]\n\n for commands in circuit:\n\n hal_cmd = command_creator(*commands)\n projQ_backend.accept_command(hal_cmd)\n\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n projQ_backend.accept_command(command_creator(*['END_SESSION', 0, 0]))\n\n # multi qubit\n projQ_backend = ProjectqQuantumSimulator(\n register_size=2,\n seed=234,\n backend=Simulator\n )\n\n circuit = [\n [\"START_SESSION\", 0, 0],\n [\"STATE_PREPARATION_ALL\", 0, 0],\n ['X', 0, 0],\n ['QUBIT_MEASURE', 0, 0]\n ]\n\n for commands in circuit:\n\n hal_cmd = command_creator(*commands)\n projQ_backend.accept_command(hal_cmd)\n\n # try double measurement\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n\n # try manipulation after measurement\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['X', 0, 0])\n )\n\n # re-prepare state of qubit, then try bit-flip and measure\n projQ_backend.accept_command(\n command_creator(*['STATE_PREPARATION', 0, 0])\n )\n projQ_backend.accept_command(\n command_creator(*['X', 0, 0])\n )\n res = projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n\n self.assertEqual(res, 1)\n\n projQ_backend.accept_command(command_creator(*['END_SESSION', 0, 0]))",
"def test_specific_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [0])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x1': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset_failure(self):\r\n problem_url_name = 'H1P1'\r\n location = InstructorTaskModuleTestCase.problem_location(problem_url_name)\r\n self.define_option_problem(problem_url_name)\r\n self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])\r\n\r\n expected_message = \"bad things happened\"\r\n with patch('courseware.models.StudentModule.save') as mock_save:\r\n mock_save.side_effect = ZeroDivisionError(expected_message)\r\n instructor_task = self.reset_problem_attempts('instructor', location)\r\n self._assert_task_failure(instructor_task.id, 'reset_problem_attempts', problem_url_name, expected_message)",
"def test_submission_reset(self):\r\n self.basic_setup(reset=True)\r\n resp = self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n # submit a second time to draw NotFoundError\r\n resp = self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.assertEqual(resp.status_code, 200)\r\n err_msg = (\r\n \"The state of this problem has changed since you loaded this page. \"\r\n \"Please refresh your page.\"\r\n )\r\n self.assertEqual(json.loads(resp.content).get(\"success\"), err_msg)",
"def test_reset(self, scml_system):\n scml_system._t = 12\n scml_system._k = 33\n state_space = scml_system.state_space\n state_positions = scml_system.state_positions\n initial_state = scml_system.reset()\n target = np.array([0, 0, 0, 0, 0, 0, 560]) / scml_system.limits\n assert np.all(initial_state == target), 'Initial states of the system are incorrect'\n assert scml_system._t == 0, 'Time of the system was not set to zero after reset'\n assert scml_system._k == 0, 'Episode step of the system was not set to zero after reset'\n assert scml_system.converter.reset_counter == scml_system.electrical_motor.reset_counter \\\n == scml_system.mechanical_load.reset_counter == scml_system.supply.reset_counter,\\\n 'The reset was not passed to all components of the SCMLSystem'\n assert scml_system._ode_solver.t == 0, 'The ode solver was not reset correctly'\n assert all(scml_system._ode_solver.y == np.zeros_like(\n scml_system.mechanical_load.state_names + scml_system.electrical_motor.CURRENTS, dtype=float\n )), ' The ode solver was not reset correctly'",
"def test_reset(self):\r\n self.p += 8\r\n self.p.reset()\r\n self.assertEqual(str(self.p), '[>............] 0%')",
"def on_reset(qutest):\n\n qutest.expect_pause()\n qutest.glb_filter(FILTER.SM)\n qutest.loc_filter(QS_OBJ_KIND.SM_AO, 'AO_Philo<2>')\n qutest.Continue() # note continue in lower case. is a reserved word in python\n qutest.expect(\"===RTC===> St-Init Obj=AO_Philo<2>,State=QP::QHsm::top->thinking\")\n qutest.expect(\"===RTC===> St-Entry Obj=AO_Philo<2>,State=thinking\")\n qutest.expect(\"%timestamp Init===> Obj=AO_Philo<2>,State=thinking\")\n qutest.glb_filter(FILTER.SM, FILTER.AO, FILTER.UA)\n qutest.current_obj(QS_OBJ_KIND.SM_AO, 'AO_Philo<2>')",
"def testReset(self):\n \n clk = Signal(0)\n rst = Signal(1)\n clock_gen = ClkDriver(clk, period=4)\n \n out = Signal(intbv(0)[4:])\n counter = Counter(out, clk, rst)\n \n def test():\n for i in range(200):\n # count up to 9 then reset\n if int(out) == 9:\n rst.next = 0\n yield delay(1)\n self.assertEqual(int(out), 0)\n # turn off reset next time\n else:\n rst.next = 1\n yield delay(1)\n \n check = test()\n sim = Simulation(counter, clock_gen, check)\n sim.run(400, quiet=1)",
"def test_successReset(self):\n for i in range(3):\n self.circuit_breaker.failure()\n self.circuit_breaker.success()\n available0 = self.circuit_breaker.available()\n self.circuit_breaker.failure()\n available1 = self.circuit_breaker.available()\n self.circuit_breaker.failure()\n available2 = self.circuit_breaker.available()\n self.circuit_breaker.failure()\n available3 = self.circuit_breaker.available()\n available4 = self.circuit_breaker.available()\n self.assertEqual((available0, available1, available2, available3, available4),\n (True, True, True, False, False))",
"def test_qubits_not_on_device(self, valkmusa, qubit):\n\n with pytest.raises(ValueError, match='Qubit not on device'):\n valkmusa.validate_operation(cirq.X(qubit))",
"def test_reset_computer(self):\n computer1 = computer.Computer(1)\n computer1.reset_computer()\n res = computer1.greediness == 7 and computer1.rolls == 0\n self.assertTrue(res)",
"def test_reset_deterministic(self):\n # For statevector output we can combine deterministic and non-deterministic\n # count output circuits\n shots = 100\n circuits = ref_reset.reset_circuits_deterministic(final_measure=True)\n targets = ref_reset.reset_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)",
"def test_readout_error_qubit1(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_readout_error([probs_given0, probs_given1], [1])\n\n shots = 2000\n target = {\n '0x0': probs_given0[0] * shots / 2,\n '0x1': probs_given1[0] * shots / 2,\n '0x2': probs_given0[1] * shots / 2,\n '0x3': probs_given1[1] * shots / 2\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)"
]
| [
"0.7634604",
"0.7615573",
"0.7602229",
"0.75521535",
"0.7240932",
"0.7234621",
"0.7206218",
"0.7155868",
"0.65695375",
"0.64620274",
"0.6300707",
"0.6275485",
"0.62622374",
"0.62479293",
"0.6206906",
"0.6189297",
"0.6177947",
"0.6099705",
"0.60909253",
"0.6077174",
"0.6065224",
"0.6050937",
"0.6047965",
"0.5992931",
"0.5969198",
"0.5967038",
"0.5961121",
"0.59585094",
"0.5957442",
"0.59421194"
]
| 0.7933952 | 0 |
Test 100% Pauli error on id gates | def test_all_qubit_pauli_error_gate_100percent(self):
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.iden(qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
backend = QasmSimulator()
shots = 100
# test noise model
error = pauli_error([('X', 1)])
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error, 'id')
# Execute
target = {'0x3': shots}
circuit = transpile(circuit, basis_gates=noise_model.basis_gates)
qobj = assemble([circuit], backend, shots=shots)
result = backend.run(qobj, noise_model=noise_model).result()
self.is_completed(result)
self.compare_counts(result, [circuit], [target], delta=0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_specific_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [1])\n # Execute\n target = {'0x2': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_specific_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [0])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x1': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_pauli_error_1q_gate_from_pauli(self):\n paulis = [Pauli.from_label(s) for s in ['I', 'X', 'Y', 'Z']]\n probs = [0.4, 0.3, 0.2, 0.1]\n error = pauli_error(zip(paulis, probs), standard_gates=True)\n\n target_circs = [[{\"name\": \"id\", \"qubits\": [0]}],\n [{\"name\": \"x\", \"qubits\": [0]}],\n [{\"name\": \"y\", \"qubits\": [0]}],\n [{\"name\": \"z\", \"qubits\": [0]}]]\n target_probs = probs.copy()\n\n for j in range(len(paulis)):\n circ, p = error.error_term(j)\n self.remove_if_found(p, target_probs)\n self.remove_if_found(circ, target_circs)\n self.assertEqual(target_probs, [], msg=\"Incorrect probabilities\")\n self.assertEqual(target_circs, [], msg=\"Incorrect circuits\")",
"def test_all_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_nonlocal_pauli_error_gate_25percent(self):\n qr = QuantumRegister(3, 'qr')\n cr = ClassicalRegister(3, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.cx(qr[0], qr[1])\n circuit.barrier(qr)\n circuit.cx(qr[1], qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('XII', 0.25), ('III', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'cx', [0, 1], [0, 1, 2])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x4': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_pauli_error_raise_invalid(self):\n self.assertRaises(NoiseError, lambda: pauli_error([('S', 1)]))",
"def test_depolarizing_error_1q_gate(self):\n p_depol = 0.3\n error = depolarizing_error(p_depol, 1, standard_gates=True)\n target_circs = [[{\"name\": \"id\", \"qubits\": [0]}],\n [{\"name\": \"x\", \"qubits\": [0]}],\n [{\"name\": \"y\", \"qubits\": [0]}],\n [{\"name\": \"z\", \"qubits\": [0]}]]\n for j in range(4):\n circ, p = error.error_term(j)\n self.assertEqual(circ[0]['qubits'], [0])\n if circ[0]['name'] == \"id\":\n self.assertAlmostEqual(p, 1 - p_depol + p_depol / 4,\n msg=\"Incorrect identity probability\")\n else:\n self.assertAlmostEqual(p, p_depol / 4, msg=\"Incorrect Pauli probability\")\n self.remove_if_found(circ, target_circs)\n self.assertEqual(target_circs, [], msg=\"Incorrect unitaries\")",
"def test_pauli_error_1q_gate_from_string(self):\n paulis = ['I', 'X', 'Y', 'Z']\n probs = [0.4, 0.3, 0.2, 0.1]\n error = pauli_error(zip(paulis, probs), standard_gates=True)\n\n target_circs = [[{\"name\": \"id\", \"qubits\": [0]}],\n [{\"name\": \"x\", \"qubits\": [0]}],\n [{\"name\": \"y\", \"qubits\": [0]}],\n [{\"name\": \"z\", \"qubits\": [0]}]]\n target_probs = probs.copy()\n\n for j in range(len(paulis)):\n circ, p = error.error_term(j)\n self.remove_if_found(p, target_probs)\n self.remove_if_found(circ, target_circs)\n self.assertEqual(target_probs, [], msg=\"Incorrect probabilities\")\n self.assertEqual(target_circs, [], msg=\"Incorrect circuits\")",
"def test_gpus_raises():\n gpus = \"1\"\n\n with pytest.raises(ValueError):\n cli._gpus(gpus)",
"def test_non_integral_validation(self):",
"def test_non_integral_validation(self):",
"def experiment3():\n raise FAKE_ERROR",
"def test_depolarizing_error_identity_unitary(self):\n # 1 qubit\n error = depolarizing_error(0, 1, standard_gates=False)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"ideal probability\")\n self.assertEqual(circ[0], {\"name\": \"id\", \"qubits\": [0]}, msg=\"ideal circuit\")\n # 2-qubit\n error = depolarizing_error(0, 2, standard_gates=False)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"ideal probability\")\n self.assertEqual(circ[0], {\"name\": \"id\", \"qubits\": [0]}, msg=\"ideal circuit\")",
"def test_failure(database):\n approp = AppropriationFactory(status_of_budgetary_resour_cpe=Decimal(101.23),\n total_budgetary_resources_cpe=Decimal(102.34))\n\n errors = number_of_errors(_FILE, database, models=[approp])\n assert errors == 1",
"def test_specific_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'reset', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_G_2_by_2_bad_data(self):\r\n self.assertRaises(ValueError, G_2_by_2, 1, -1, 1, 1)",
"def test_specific_qubit_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'measure', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_invalid_gaitid_idaa_program(self):\n idaa_index = 3\n expected_discrepancies = 1\n\n idaa_program = self.idaa_json['value'][idaa_index]['fields']\n\n idaa_program['GaitIDs'][0] = {'LookupValue': '1237a'}\n\n upload_program = program.ProgramUpload(idaa_program=idaa_program,\n msr_country_codes_list=msr_country_codes_list, msr_gaitid_list=msr_gaitid_list, duplicated_gaitids=self.duplicated_gaitids\n )\n\n self.assertFalse(upload_program.is_valid())\n self.assertEquals(upload_program.discrepancy_count, expected_discrepancies)\n self.assertTrue(upload_program.has_discrepancy('gaitid'))",
"def test_error_at_98tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.98))",
"def test__validate_status__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_status(input_value)",
"def test_error_at_99tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.99))",
"def test_nonlocal_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n # use barrier to ensure measure qubit 0 is before qubit 1\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'measure', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_pauli_error_1q_unitary_from_pauli(self):\n paulis = [Pauli.from_label(s) for s in ['I', 'X', 'Y', 'Z']]\n probs = [0.4, 0.3, 0.2, 0.1]\n error = pauli_error(zip(paulis, probs), standard_gates=False)\n\n target_unitaries = [standard_gate_unitary('x'),\n standard_gate_unitary('y'),\n standard_gate_unitary('z')]\n target_probs = probs.copy()\n target_identity_count = 0\n for j in range(len(paulis)):\n circ, p = error.error_term(j)\n name = circ[0]['name']\n self.assertIn(name, ('unitary', 'id'))\n self.assertEqual(circ[0]['qubits'], [0])\n self.remove_if_found(p, target_probs)\n if name == \"unitary\":\n self.remove_if_found(circ[0]['params'][0], target_unitaries)\n else:\n target_identity_count += 1\n self.assertEqual(target_probs, [], msg=\"Incorrect probabilities\")\n self.assertEqual(target_unitaries, [], msg=\"Incorrect unitaries\")\n self.assertEqual(target_identity_count, 1, msg=\"Incorrect identities\")",
"def test_cadastros_de_registros_no_site_rpa_challenge():",
"def test_is_gene_continuously_amplified_wrong_input(self):\n self.assertEqual(\"Wrong input data\", is_gene_continuously_amplified(13))",
"def test_pauli_error_2q_gate_from_pauli(self):\n paulis = [Pauli.from_label(s) for s in ['XZ', 'YX', 'ZY']]\n probs = [0.5, 0.3, 0.2]\n error = pauli_error(zip(paulis, probs), standard_gates=True)\n\n target_circs = [[{\"name\": \"z\", \"qubits\": [0]}, {\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"x\", \"qubits\": [0]}, {\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [0]}, {\"name\": \"z\", \"qubits\": [1]}]]\n target_probs = probs.copy()\n\n for j in range(len(paulis)):\n circ, p = error.error_term(j)\n self.remove_if_found(p, target_probs)\n self.remove_if_found(circ, target_circs)\n self.assertEqual(target_probs, [], msg=\"Incorrect probabilities\")\n self.assertEqual(target_circs, [], msg=\"Incorrect circuits\")",
"def test_failure(database):\n\n op = ObjectClassProgramActivityFactory(gross_outlay_amount_by_pro_fyb=1, gross_outlays_undelivered_fyb=1,\n gross_outlays_delivered_or_fyb=1)\n\n assert number_of_errors(_FILE, database, models=[op]) == 1",
"def test_get_counturingErr(self):\n for app_num, servo_type in app_nr.items():\n try:\n par = self.get_parameter(servo_type, app_num, COUNTURING_ERR_IDX, COUNTURING_ERR_SUB)\n param_obj = self.__dict__[servo_type]._get_counturingErr()\n acs_par, completion = param_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in counturingErr...\"\n continue\n self.data_match(acs_par, par)\n except NackEx:\n continue",
"def test_missing_gaitid_idaa_program(self):\n idaa_index = 3\n expected_discrepancies = 1\n\n idaa_program = self.idaa_json['value'][idaa_index]['fields']\n\n idaa_program['GaitIDs'] = []\n\n upload_program = program.ProgramUpload(idaa_program=idaa_program,\n msr_country_codes_list=msr_country_codes_list, msr_gaitid_list=msr_gaitid_list, duplicated_gaitids=self.duplicated_gaitids\n )\n\n self.assertFalse(upload_program.is_valid())\n self.assertEquals(upload_program.discrepancy_count, expected_discrepancies)\n self.assertTrue(upload_program.has_discrepancy('gaitid'))",
"def test_depolarizing_error_ideal(self):\n # 1 qubit\n error = depolarizing_error(0, 1, standard_gates=True)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"ideal probability\")\n self.assertEqual(circ[0], {\"name\": \"id\", \"qubits\": [0]}, msg=\"ideal circuit\")\n # 2-qubit\n error = depolarizing_error(0, 2, standard_gates=True)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"ideal probability\")\n self.assertEqual(circ[0], {\"name\": \"id\", \"qubits\": [0]}, msg=\"ideal circuit\")"
]
| [
"0.6823113",
"0.65089923",
"0.63360244",
"0.62340903",
"0.620764",
"0.6184912",
"0.6162606",
"0.60578805",
"0.59943485",
"0.5974759",
"0.5974759",
"0.59589094",
"0.59528273",
"0.5899597",
"0.5862406",
"0.5817934",
"0.579591",
"0.576339",
"0.5763351",
"0.57591534",
"0.5753654",
"0.5752284",
"0.57515687",
"0.5748975",
"0.573403",
"0.57269865",
"0.57221055",
"0.5700973",
"0.5672098",
"0.56676674"
]
| 0.6648989 | 1 |
Test 100% Pauli error on id gates on qubit1 | def test_specific_qubit_pauli_error_gate_100percent(self):
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.iden(qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
backend = QasmSimulator()
shots = 100
# test noise model
error = pauli_error([('X', 1)])
noise_model = NoiseModel()
noise_model.add_quantum_error(error, 'id', [1])
# Execute
target = {'0x2': shots}
circuit = transpile(circuit, basis_gates=noise_model.basis_gates)
qobj = assemble([circuit], backend, shots=shots)
result = backend.run(qobj, noise_model=noise_model).result()
self.is_completed(result)
self.compare_counts(result, [circuit], [target], delta=0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_all_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {'0x3': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_specific_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [0])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x1': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_pauli_error_1q_gate_from_pauli(self):\n paulis = [Pauli.from_label(s) for s in ['I', 'X', 'Y', 'Z']]\n probs = [0.4, 0.3, 0.2, 0.1]\n error = pauli_error(zip(paulis, probs), standard_gates=True)\n\n target_circs = [[{\"name\": \"id\", \"qubits\": [0]}],\n [{\"name\": \"x\", \"qubits\": [0]}],\n [{\"name\": \"y\", \"qubits\": [0]}],\n [{\"name\": \"z\", \"qubits\": [0]}]]\n target_probs = probs.copy()\n\n for j in range(len(paulis)):\n circ, p = error.error_term(j)\n self.remove_if_found(p, target_probs)\n self.remove_if_found(circ, target_circs)\n self.assertEqual(target_probs, [], msg=\"Incorrect probabilities\")\n self.assertEqual(target_circs, [], msg=\"Incorrect circuits\")",
"def test_specific_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'reset', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_pauli_error_1q_gate_from_string(self):\n paulis = ['I', 'X', 'Y', 'Z']\n probs = [0.4, 0.3, 0.2, 0.1]\n error = pauli_error(zip(paulis, probs), standard_gates=True)\n\n target_circs = [[{\"name\": \"id\", \"qubits\": [0]}],\n [{\"name\": \"x\", \"qubits\": [0]}],\n [{\"name\": \"y\", \"qubits\": [0]}],\n [{\"name\": \"z\", \"qubits\": [0]}]]\n target_probs = probs.copy()\n\n for j in range(len(paulis)):\n circ, p = error.error_term(j)\n self.remove_if_found(p, target_probs)\n self.remove_if_found(circ, target_circs)\n self.assertEqual(target_probs, [], msg=\"Incorrect probabilities\")\n self.assertEqual(target_circs, [], msg=\"Incorrect circuits\")",
"def test_depolarizing_error_1q_gate(self):\n p_depol = 0.3\n error = depolarizing_error(p_depol, 1, standard_gates=True)\n target_circs = [[{\"name\": \"id\", \"qubits\": [0]}],\n [{\"name\": \"x\", \"qubits\": [0]}],\n [{\"name\": \"y\", \"qubits\": [0]}],\n [{\"name\": \"z\", \"qubits\": [0]}]]\n for j in range(4):\n circ, p = error.error_term(j)\n self.assertEqual(circ[0]['qubits'], [0])\n if circ[0]['name'] == \"id\":\n self.assertAlmostEqual(p, 1 - p_depol + p_depol / 4,\n msg=\"Incorrect identity probability\")\n else:\n self.assertAlmostEqual(p, p_depol / 4, msg=\"Incorrect Pauli probability\")\n self.remove_if_found(circ, target_circs)\n self.assertEqual(target_circs, [], msg=\"Incorrect unitaries\")",
"def test_qubits_not_on_device(self, valkmusa, qubit):\n\n with pytest.raises(ValueError, match='Qubit not on device'):\n valkmusa.validate_operation(cirq.X(qubit))",
"def test_quality_gt_one(self):\n with pytest.raises(StateError):\n State(substance=\"water\", x=Q_(2.0, \"dimensionless\"), p=Q_(101325, \"Pa\"))",
"def test_specific_qubit_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'measure', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_pauli_error_2q_gate_from_string_1qonly(self):\n paulis = ['XI', 'YI', 'ZI']\n probs = [0.5, 0.3, 0.2]\n error = pauli_error(zip(paulis, probs), standard_gates=True)\n\n target_circs = [[{\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"z\", \"qubits\": [1]}]]\n target_probs = probs.copy()\n\n for j in range(len(paulis)):\n circ, p = error.error_term(j)\n self.remove_if_found(p, target_probs)\n self.remove_if_found(circ, target_circs)\n self.assertEqual(target_probs, [], msg=\"Incorrect probabilities\")\n self.assertEqual(target_circs, [], msg=\"Incorrect circuits\")",
"def test_all_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'reset')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_nonlocal_pauli_error_gate_25percent(self):\n qr = QuantumRegister(3, 'qr')\n cr = ClassicalRegister(3, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.cx(qr[0], qr[1])\n circuit.barrier(qr)\n circuit.cx(qr[1], qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('XII', 0.25), ('III', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'cx', [0, 1], [0, 1, 2])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x4': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def experiment3():\n raise FAKE_ERROR",
"def test_gpus_raises():\n gpus = \"1\"\n\n with pytest.raises(ValueError):\n cli._gpus(gpus)",
"def test_reset_error_specific_qubit_50percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 50% reset noise on qubit-0 \"u3\" only.\n noise_probs = [0.5, 0.5]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [0])\n shots = 2000\n target = {'0x2': shots / 2, '0x3': shots / 2}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'measure')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset_error_all_qubit_100percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 100% reset noise on all qubit \"u3\".\n noise_probs = [1, 0]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, \"u3\")\n shots = 100\n # target = {'00': shots}\n target = {'0x0': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_nonlocal_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n # use barrier to ensure measure qubit 0 is before qubit 1\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'measure', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset_error_specific_qubit_25percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 25% reset noise on qubit-1 \"u3\" only.\n noise_probs = [0.25, 0.75]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [1])\n shots = 2000\n # target = {'01': shots / 4, '11': 3 * shots / 4}\n target = {'0x1': shots / 4, '0x3': 3 * shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_readout_error_all_qubit(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_all_qubit_readout_error([probs_given0, probs_given1])\n\n # Expected counts\n shots = 2000\n p00 = 0.5 * (probs_given0[0]**2 + probs_given1[0]**2)\n p01 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p10 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p11 = 0.5 * (probs_given0[1]**2 + probs_given1[1]**2)\n target = target = {\n '0x0': p00 * shots,\n '0x1': p01 * shots,\n '0x2': p10 * shots,\n '0x3': p11 * shots\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_pauli_error_2q_gate_from_pauli(self):\n paulis = [Pauli.from_label(s) for s in ['XZ', 'YX', 'ZY']]\n probs = [0.5, 0.3, 0.2]\n error = pauli_error(zip(paulis, probs), standard_gates=True)\n\n target_circs = [[{\"name\": \"z\", \"qubits\": [0]}, {\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"x\", \"qubits\": [0]}, {\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [0]}, {\"name\": \"z\", \"qubits\": [1]}]]\n target_probs = probs.copy()\n\n for j in range(len(paulis)):\n circ, p = error.error_term(j)\n self.remove_if_found(p, target_probs)\n self.remove_if_found(circ, target_circs)\n self.assertEqual(target_probs, [], msg=\"Incorrect probabilities\")\n self.assertEqual(target_circs, [], msg=\"Incorrect circuits\")",
"def test_pauli_error_1q_unitary_from_pauli(self):\n paulis = [Pauli.from_label(s) for s in ['I', 'X', 'Y', 'Z']]\n probs = [0.4, 0.3, 0.2, 0.1]\n error = pauli_error(zip(paulis, probs), standard_gates=False)\n\n target_unitaries = [standard_gate_unitary('x'),\n standard_gate_unitary('y'),\n standard_gate_unitary('z')]\n target_probs = probs.copy()\n target_identity_count = 0\n for j in range(len(paulis)):\n circ, p = error.error_term(j)\n name = circ[0]['name']\n self.assertIn(name, ('unitary', 'id'))\n self.assertEqual(circ[0]['qubits'], [0])\n self.remove_if_found(p, target_probs)\n if name == \"unitary\":\n self.remove_if_found(circ[0]['params'][0], target_unitaries)\n else:\n target_identity_count += 1\n self.assertEqual(target_probs, [], msg=\"Incorrect probabilities\")\n self.assertEqual(target_unitaries, [], msg=\"Incorrect unitaries\")\n self.assertEqual(target_identity_count, 1, msg=\"Incorrect identities\")",
"def test_quality_lt_zero(self):\n with pytest.raises(StateError):\n State(substance=\"water\", x=Q_(-1.0, \"dimensionless\"), p=Q_(101325, \"Pa\"))",
"def test_math_domain_error(self):\n self.qp.load_qasm_file(self._get_resource_path('qasm/math_domain_error.qasm'), name='test')\n coupling_map = {0: [2], 1: [2], 2: [3], 3: []}\n result1 = self.qp.execute([\"test\"], backend=\"local_qasm_simulator\", coupling_map=coupling_map, seed=self.seed)\n self.assertEqual(result1.get_counts(\"test\"), {'0001': 507, '0101': 517})",
"def test_error_if_not_expval_batched(self):\n qml.enable_tape()\n dev = qml.device(\"orquestra.qiskit\", wires=2)\n\n with qml.tape.QuantumTape() as tape1:\n qml.expval(qml.PauliZ(wires=[0]))\n qml.var(qml.PauliZ(wires=[0]))\n\n with qml.tape.QuantumTape() as tape2:\n qml.expval(qml.PauliZ(wires=[0]))\n\n circuits = [tape1, tape2]\n with pytest.raises(NotImplementedError):\n res = dev.batch_execute(circuits)\n\n qml.disable_tape()",
"def test_depolarizing_error_identity_unitary(self):\n # 1 qubit\n error = depolarizing_error(0, 1, standard_gates=False)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"ideal probability\")\n self.assertEqual(circ[0], {\"name\": \"id\", \"qubits\": [0]}, msg=\"ideal circuit\")\n # 2-qubit\n error = depolarizing_error(0, 2, standard_gates=False)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"ideal probability\")\n self.assertEqual(circ[0], {\"name\": \"id\", \"qubits\": [0]}, msg=\"ideal circuit\")",
"def test_depolarizing_error_2q_gate(self):\n p_depol = 0.3\n error = depolarizing_error(p_depol, 2, standard_gates=True)\n target_circs = [[{\"name\": \"id\", \"qubits\": [0]}],\n [{\"name\": \"x\", \"qubits\": [0]}],\n [{\"name\": \"y\", \"qubits\": [0]}],\n [{\"name\": \"z\", \"qubits\": [0]}],\n [{\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"z\", \"qubits\": [1]}],\n [{\"name\": \"x\", \"qubits\": [0]}, {\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"x\", \"qubits\": [0]}, {\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"x\", \"qubits\": [0]}, {\"name\": \"z\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [0]}, {\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [0]}, {\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [0]}, {\"name\": \"z\", \"qubits\": [1]}],\n [{\"name\": \"z\", \"qubits\": [0]}, {\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"z\", \"qubits\": [0]}, {\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"z\", \"qubits\": [0]}, {\"name\": \"z\", \"qubits\": [1]}]]\n for j in range(16):\n circ, p = error.error_term(j)\n self.remove_if_found(circ, target_circs)\n if circ == [{\"name\": \"id\", \"qubits\": [0]}]:\n self.assertAlmostEqual(p, 1 - p_depol + p_depol / 16,\n msg=\"Incorrect identity probability\")\n else:\n self.assertAlmostEqual(p, p_depol / 16, msg=\"Incorrect Pauli probability\")\n self.assertEqual(target_circs, [], msg=\"Incorrect unitaries\")",
"def test_nonlocal_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr[1])\n circuit.barrier(qr)\n circuit.reset(qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'reset', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_measurement_failures(self):\n\n # single qubit\n projQ_backend = ProjectqQuantumSimulator(\n register_size=1,\n seed=234,\n backend=Simulator\n )\n\n circuit = [\n [\"START_SESSION\", 0, 0],\n [\"STATE_PREPARATION_ALL\", 0, 0],\n ['X', 0, 0],\n ['QUBIT_MEASURE', 0, 0]\n ]\n\n for commands in circuit:\n\n hal_cmd = command_creator(*commands)\n projQ_backend.accept_command(hal_cmd)\n\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n projQ_backend.accept_command(command_creator(*['END_SESSION', 0, 0]))\n\n # multi qubit\n projQ_backend = ProjectqQuantumSimulator(\n register_size=2,\n seed=234,\n backend=Simulator\n )\n\n circuit = [\n [\"START_SESSION\", 0, 0],\n [\"STATE_PREPARATION_ALL\", 0, 0],\n ['X', 0, 0],\n ['QUBIT_MEASURE', 0, 0]\n ]\n\n for commands in circuit:\n\n hal_cmd = command_creator(*commands)\n projQ_backend.accept_command(hal_cmd)\n\n # try double measurement\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n\n # try manipulation after measurement\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['X', 0, 0])\n )\n\n # re-prepare state of qubit, then try bit-flip and measure\n projQ_backend.accept_command(\n command_creator(*['STATE_PREPARATION', 0, 0])\n )\n projQ_backend.accept_command(\n command_creator(*['X', 0, 0])\n )\n res = projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n\n self.assertEqual(res, 1)\n\n projQ_backend.accept_command(command_creator(*['END_SESSION', 0, 0]))"
]
| [
"0.6948121",
"0.6896298",
"0.67069465",
"0.6486029",
"0.64153993",
"0.63180083",
"0.631731",
"0.6288314",
"0.62688136",
"0.6246572",
"0.62225777",
"0.6220967",
"0.62184393",
"0.61696744",
"0.6145834",
"0.606778",
"0.60577255",
"0.6056523",
"0.59679675",
"0.5959239",
"0.59498006",
"0.5945161",
"0.5880977",
"0.58570087",
"0.58548415",
"0.58488953",
"0.5847445",
"0.5835604",
"0.58176756",
"0.5815293"
]
| 0.7067444 | 0 |
Test 100% Pauli error on id gates qubit0 | def test_specific_qubit_pauli_error_gate_25percent(self):
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.iden(qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
backend = QasmSimulator()
shots = 2000
# test noise model
error = pauli_error([('X', 0.25), ('I', 0.75)])
noise_model = NoiseModel()
noise_model.add_quantum_error(error, 'id', [0])
# Execute
target = {'0x0': 3 * shots / 4, '0x1': shots / 4}
circuit = transpile(circuit, basis_gates=noise_model.basis_gates)
qobj = assemble([circuit], backend, shots=shots)
result = backend.run(qobj, noise_model=noise_model).result()
self.is_completed(result)
self.compare_counts(result, [circuit], [target], delta=0.05 * shots) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_specific_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [1])\n # Execute\n target = {'0x2': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_all_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {'0x3': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_all_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def experiment3():\n raise FAKE_ERROR",
"def test_specific_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'reset', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_nonlocal_pauli_error_gate_25percent(self):\n qr = QuantumRegister(3, 'qr')\n cr = ClassicalRegister(3, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.cx(qr[0], qr[1])\n circuit.barrier(qr)\n circuit.cx(qr[1], qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('XII', 0.25), ('III', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'cx', [0, 1], [0, 1, 2])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x4': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_specific_qubit_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'measure', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_qubits_not_on_device(self, valkmusa, qubit):\n\n with pytest.raises(ValueError, match='Qubit not on device'):\n valkmusa.validate_operation(cirq.X(qubit))",
"def test_pauli_error_1q_gate_from_pauli(self):\n paulis = [Pauli.from_label(s) for s in ['I', 'X', 'Y', 'Z']]\n probs = [0.4, 0.3, 0.2, 0.1]\n error = pauli_error(zip(paulis, probs), standard_gates=True)\n\n target_circs = [[{\"name\": \"id\", \"qubits\": [0]}],\n [{\"name\": \"x\", \"qubits\": [0]}],\n [{\"name\": \"y\", \"qubits\": [0]}],\n [{\"name\": \"z\", \"qubits\": [0]}]]\n target_probs = probs.copy()\n\n for j in range(len(paulis)):\n circ, p = error.error_term(j)\n self.remove_if_found(p, target_probs)\n self.remove_if_found(circ, target_circs)\n self.assertEqual(target_probs, [], msg=\"Incorrect probabilities\")\n self.assertEqual(target_circs, [], msg=\"Incorrect circuits\")",
"def test_gpus_raises():\n gpus = \"1\"\n\n with pytest.raises(ValueError):\n cli._gpus(gpus)",
"def test_all_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'reset')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_nonlocal_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n # use barrier to ensure measure qubit 0 is before qubit 1\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'measure', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'measure')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_quality_gt_one(self):\n with pytest.raises(StateError):\n State(substance=\"water\", x=Q_(2.0, \"dimensionless\"), p=Q_(101325, \"Pa\"))",
"def test_pauli_error_raise_invalid(self):\n self.assertRaises(NoiseError, lambda: pauli_error([('S', 1)]))",
"def test_pauli_error_1q_gate_from_string(self):\n paulis = ['I', 'X', 'Y', 'Z']\n probs = [0.4, 0.3, 0.2, 0.1]\n error = pauli_error(zip(paulis, probs), standard_gates=True)\n\n target_circs = [[{\"name\": \"id\", \"qubits\": [0]}],\n [{\"name\": \"x\", \"qubits\": [0]}],\n [{\"name\": \"y\", \"qubits\": [0]}],\n [{\"name\": \"z\", \"qubits\": [0]}]]\n target_probs = probs.copy()\n\n for j in range(len(paulis)):\n circ, p = error.error_term(j)\n self.remove_if_found(p, target_probs)\n self.remove_if_found(circ, target_circs)\n self.assertEqual(target_probs, [], msg=\"Incorrect probabilities\")\n self.assertEqual(target_circs, [], msg=\"Incorrect circuits\")",
"def test_nonlocal_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr[1])\n circuit.barrier(qr)\n circuit.reset(qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'reset', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_quality_lt_zero(self):\n with pytest.raises(StateError):\n State(substance=\"water\", x=Q_(-1.0, \"dimensionless\"), p=Q_(101325, \"Pa\"))",
"def test_error_if_not_expval(self):\n dev = qml.device(\"orquestra.qiskit\", wires=2)\n\n @qml.qnode(dev)\n def circuit():\n return qml.var(qml.PauliZ(0))\n\n with pytest.raises(NotImplementedError):\n circuit()",
"def work3():\n logging.info(\"work3 doing a job\")\n if random.randint(1, 5) == 1:\n logging.error(\"Error in work3: bad input\")",
"def test_reset_error_specific_qubit_50percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 50% reset noise on qubit-0 \"u3\" only.\n noise_probs = [0.5, 0.5]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [0])\n shots = 2000\n target = {'0x2': shots / 2, '0x3': shots / 2}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_pauli_error_2q_gate_from_string_1qonly(self):\n paulis = ['XI', 'YI', 'ZI']\n probs = [0.5, 0.3, 0.2]\n error = pauli_error(zip(paulis, probs), standard_gates=True)\n\n target_circs = [[{\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"z\", \"qubits\": [1]}]]\n target_probs = probs.copy()\n\n for j in range(len(paulis)):\n circ, p = error.error_term(j)\n self.remove_if_found(p, target_probs)\n self.remove_if_found(circ, target_circs)\n self.assertEqual(target_probs, [], msg=\"Incorrect probabilities\")\n self.assertEqual(target_circs, [], msg=\"Incorrect circuits\")",
"def test_pauli_error_2q_gate_from_pauli(self):\n paulis = [Pauli.from_label(s) for s in ['XZ', 'YX', 'ZY']]\n probs = [0.5, 0.3, 0.2]\n error = pauli_error(zip(paulis, probs), standard_gates=True)\n\n target_circs = [[{\"name\": \"z\", \"qubits\": [0]}, {\"name\": \"x\", \"qubits\": [1]}],\n [{\"name\": \"x\", \"qubits\": [0]}, {\"name\": \"y\", \"qubits\": [1]}],\n [{\"name\": \"y\", \"qubits\": [0]}, {\"name\": \"z\", \"qubits\": [1]}]]\n target_probs = probs.copy()\n\n for j in range(len(paulis)):\n circ, p = error.error_term(j)\n self.remove_if_found(p, target_probs)\n self.remove_if_found(circ, target_circs)\n self.assertEqual(target_probs, [], msg=\"Incorrect probabilities\")\n self.assertEqual(target_circs, [], msg=\"Incorrect circuits\")",
"def test_reset_error_all_qubit_100percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 100% reset noise on all qubit \"u3\".\n noise_probs = [1, 0]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, \"u3\")\n shots = 100\n # target = {'00': shots}\n target = {'0x0': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_error_if_not_expval_batched(self):\n qml.enable_tape()\n dev = qml.device(\"orquestra.qiskit\", wires=2)\n\n with qml.tape.QuantumTape() as tape1:\n qml.expval(qml.PauliZ(wires=[0]))\n qml.var(qml.PauliZ(wires=[0]))\n\n with qml.tape.QuantumTape() as tape2:\n qml.expval(qml.PauliZ(wires=[0]))\n\n circuits = [tape1, tape2]\n with pytest.raises(NotImplementedError):\n res = dev.batch_execute(circuits)\n\n qml.disable_tape()",
"def test_readout_error_all_qubit(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_all_qubit_readout_error([probs_given0, probs_given1])\n\n # Expected counts\n shots = 2000\n p00 = 0.5 * (probs_given0[0]**2 + probs_given1[0]**2)\n p01 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p10 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p11 = 0.5 * (probs_given0[1]**2 + probs_given1[1]**2)\n target = target = {\n '0x0': p00 * shots,\n '0x1': p01 * shots,\n '0x2': p10 * shots,\n '0x3': p11 * shots\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_pauli_error_1q_unitary_from_pauli(self):\n paulis = [Pauli.from_label(s) for s in ['I', 'X', 'Y', 'Z']]\n probs = [0.4, 0.3, 0.2, 0.1]\n error = pauli_error(zip(paulis, probs), standard_gates=False)\n\n target_unitaries = [standard_gate_unitary('x'),\n standard_gate_unitary('y'),\n standard_gate_unitary('z')]\n target_probs = probs.copy()\n target_identity_count = 0\n for j in range(len(paulis)):\n circ, p = error.error_term(j)\n name = circ[0]['name']\n self.assertIn(name, ('unitary', 'id'))\n self.assertEqual(circ[0]['qubits'], [0])\n self.remove_if_found(p, target_probs)\n if name == \"unitary\":\n self.remove_if_found(circ[0]['params'][0], target_unitaries)\n else:\n target_identity_count += 1\n self.assertEqual(target_probs, [], msg=\"Incorrect probabilities\")\n self.assertEqual(target_unitaries, [], msg=\"Incorrect unitaries\")\n self.assertEqual(target_identity_count, 1, msg=\"Incorrect identities\")",
"def test_kraus_error(self):\n A0 = [[1, 0], [0, np.sqrt(1 - 0.3)]]\n A1 = [[0, 0], [0, np.sqrt(0.3)]]\n targets = [A0, A1]\n error = kraus_error(targets)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1)\n kraus = circ[0]\n self.assertEqual(kraus['name'], 'kraus')\n self.assertEqual(kraus['qubits'], [0])\n for op in kraus['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus QuantumError\")",
"def test_is_prime_invalid(self):\n sol = solution.Solution();\n self.assertFalse(sol.isPrime(1))\n self.assertFalse(sol.isPrime(4))\n self.assertFalse(sol.isPrime(6))\n #self.assertFalse(sol.isPrime(864))",
"def test_quintic(self):\n fun = get_problem('quintic', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array6), 0.0)"
]
| [
"0.714219",
"0.70144165",
"0.68234855",
"0.67411816",
"0.6593448",
"0.6544619",
"0.6453381",
"0.6432425",
"0.64307654",
"0.64216",
"0.64054894",
"0.63167316",
"0.6282768",
"0.6226604",
"0.6193651",
"0.61926645",
"0.6139788",
"0.60511786",
"0.6022499",
"0.6021682",
"0.60182846",
"0.60126215",
"0.60025287",
"0.5995731",
"0.59954846",
"0.5975086",
"0.596227",
"0.59588265",
"0.5955381",
"0.5946894"
]
| 0.7020256 | 1 |
Test 100% nonlocal Pauli error on cx(0, 1) gate | def test_nonlocal_pauli_error_gate_25percent(self):
qr = QuantumRegister(3, 'qr')
cr = ClassicalRegister(3, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.cx(qr[0], qr[1])
circuit.barrier(qr)
circuit.cx(qr[1], qr[0])
circuit.barrier(qr)
circuit.measure(qr, cr)
backend = QasmSimulator()
shots = 2000
# test noise model
error = pauli_error([('XII', 0.25), ('III', 0.75)])
noise_model = NoiseModel()
noise_model.add_nonlocal_quantum_error(error, 'cx', [0, 1], [0, 1, 2])
# Execute
target = {'0x0': 3 * shots / 4, '0x4': shots / 4}
circuit = transpile(circuit, basis_gates=noise_model.basis_gates)
qobj = assemble([circuit], backend, shots=shots)
result = backend.run(qobj, noise_model=noise_model).result()
self.is_completed(result)
self.compare_counts(result, [circuit], [target], delta=0.05 * shots) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_controlled_by_error():\n c = Circuit(3)\n c.add(gates.H(0))\n c.add(gates.Y(1).controlled_by(0, 2))\n with pytest.raises(ValueError):\n c.to_qasm()",
"def test_aux_ops_raises(self):\n\n problem = TimeEvolutionProblem(\n self.hamiltonian, time=0.02, aux_operators=[self.hamiltonian, self.observable]\n )\n\n sampler = Sampler()\n fidelity_primitive = ComputeUncompute(sampler)\n\n pvqd = PVQD(\n fidelity_primitive,\n self.ansatz,\n self.initial_parameters,\n optimizer=SPSA(maxiter=0, learning_rate=0.1, perturbation=0.01),\n )\n\n with self.assertRaises(ValueError):\n _ = pvqd.evolve(problem)",
"def test_non_differentiable_error(self):\r\n psi = np.array([1, 0, 1, 0]) / np.sqrt(2)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.QubitStateVector(psi, wires=[0, 1])\r\n qml.RX(0.543, wires=[0])\r\n qml.RY(-0.654, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.probs(wires=[0, 1])\r\n\r\n # by default all parameters are assumed to be trainable\r\n with pytest.raises(\r\n ValueError, match=r\"Cannot differentiate with respect to parameter\\(s\\) {0}\"\r\n ):\r\n finite_diff(tape)\r\n\r\n # setting trainable parameters avoids this\r\n tape.trainable_params = {1, 2}\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n tapes, fn = finite_diff(tape)\r\n\r\n # For now, we must squeeze the results of the device execution, since\r\n # qml.probs results in a nested result. Later, we will revisit device\r\n # execution to avoid this issue.\r\n res = fn(qml.math.squeeze(dev.batch_execute(tapes)))\r\n assert res.shape == (4, 2)",
"def test_nonlocal_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr[1])\n circuit.barrier(qr)\n circuit.reset(qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'reset', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_superposition_ud_cnot():\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n H q1\n CNOT q1 q0\n \"\"\"\n )\n\n result = run(program, run_gate_array, return_distribution=True)\n assert isclose(result, [0.5, 0.0, 0.0, 0.5]).all()",
"def test_cnot():\n\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n X q0\n CNOT q0 q1\n \"\"\"\n )\n\n result = run(program, run_gate_array, return_distribution=True)\n assert isclose(result, [0.0, 0.0, 0.0, 1.0]).all()",
"def test_initial_state_raises(self):\n initial_state = QuantumCircuit(2)\n initial_state.x(0)\n\n problem = TimeEvolutionProblem(\n self.hamiltonian,\n time=0.02,\n initial_state=initial_state,\n )\n\n sampler = Sampler()\n fidelity_primitive = ComputeUncompute(sampler)\n\n pvqd = PVQD(\n fidelity_primitive,\n self.ansatz,\n self.initial_parameters,\n optimizer=SPSA(maxiter=0, learning_rate=0.1, perturbation=0.01),\n )\n\n with self.assertRaises(NotImplementedError):\n _ = pvqd.evolve(problem)",
"def test_non_integral_validation(self):",
"def test_non_integral_validation(self):",
"def test_specific_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [0])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x1': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_ud_cnot():\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n register q2[2]\n register q3[3]\n X q2\n CNOT q2 q0\n \"\"\"\n )\n\n result = run(program, run_gate_array)\n assert isclose(result, [1.0, 0.0, 1.0, 0.0]).all()",
"def test_specific_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [1])\n # Execute\n target = {'0x2': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_foo(self):\n self.ran = True\n 1 / 0",
"def test_superposition_cnot():\n\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n H q0\n CNOT q0 q1\n \"\"\"\n )\n\n result = run(program, run_gate_array, return_distribution=True)\n assert isclose(result, [0.5, 0.0, 0.0, 0.5]).all()",
"def test_all_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {'0x3': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_special_PSX(self, angexp):\n a, b, c = angexp[0]\n tgt = U3Gate(a, b, c).to_matrix()\n exp = {(\"p\", \"sx\")[g]: angexp[1][g] for g in (0, 1) if angexp[1][g]}\n self.check_oneq_special_cases(tgt, \"PSX\", exp)",
"def test_thermal_relaxation_error_gate(self):\n t1, t2, time, p1 = (2, 1, 1, 0.3)\n error = thermal_relaxation_error(t1, t2, time, p1)\n targets = [[{'name': 'id', 'qubits': [0]}],\n [{'name': 'z', 'qubits': [0]}],\n [{'name': 'reset', 'qubits': [0]}],\n [{'name': 'reset', 'qubits': [0]}, {'name': 'x', 'qubits': [0]}]]\n p_reset0 = (1 - p1) * (1 - np.exp(-1 / t1))\n p_reset1 = p1 * (1 - np.exp(-1 / t1))\n p_z = 0.5 * np.exp(-1 / t1) * (1 - np.exp(-(1 / t2 - 1 / t1) * time))\n p_id = 1 - p_z - p_reset0 - p_reset1\n for j in range(4):\n circ, p = error.error_term(j)\n self.remove_if_found(circ, targets)\n name = circ[0]['name']\n if circ[0]['name'] == 'id':\n self.assertAlmostEqual(p, p_id, msg=\"identity probability\")\n elif name == 'z':\n self.assertAlmostEqual(p, p_z, msg=\"Z error probability\")\n elif len(circ) == 1:\n self.assertAlmostEqual(p, p_reset0, msg=\"reset-0 probability\")\n else:\n self.assertAlmostEqual(p, p_reset1, msg=\"reset-1 probability\")\n self.assertEqual(targets, [], msg=\"relaxation circuits\")",
"def phase_seperation_detection(g_x_func, s, p, P, T, n=100, LLE_only=False,\n VLE_only=False): \n # TODO: Update this documentation\n \n # Generate sampling points.\n import numpy\n from UQToolbox.sobol_lib import i4_sobol_generate\n from tgo import tgo\n m = p.m['n'] - 1\n skip = 4\n Points = i4_sobol_generate(m, n, skip)\n Points = numpy.column_stack([Points[i] for i in range(m)])\n Points = Points[numpy.sum(Points, axis=1) <= 1.0]\n S = numpy.empty(n, dtype=bool)\n\n # Update P, T to specified value\n print Points[0]\n s = s.update_state(s, p, P=P, T=T, X = Points[0], Force_Update=True)\n \n def subset_eqp(Points, X_I, X_II):\n # Retunrs a subset of \"Points\" outside EQP\n import numpy\n for i in range(p.m['n']-1):\n P_new_low = Points[Points[:,i] < \n min(X_I[i], X_II[i])]\n \n P_new_high = Points[Points[:,i] > \n max(X_I[i], X_II[i])] \n \n return numpy.append(P_new_low, P_new_high, axis=0)\n \n \n # Detect instability in a same volume root phase:\n if not VLE_only:\n # define LLE instability func\n def instability_point_calc(Points, g_x_func, s, p, n, k, P=P, T=T):\n # Find an instability point, calculated equilibrium and return\n # new feasible subset.\n Stop = False # Boolean to run main while loop\n for i, X in zip(range(n), Points):\n # Test for instability at current equilibrium point.\n S[i] = stability(X, g_x_func, s, p, k=ph)\n if not S[i]: # If point is unstable find equilibrium point.\n s = phase_equilibrium_calculation(s, p, g_x_func, X, k=k,\n P=P, T=T, \n tol=1e-9, \n Print_Results=False, \n Plot_Results=False) \n \n s.m['ph equil P'] = [s.m['X_I'], s.m['X_II']]\n # TODO: Improve finding feasible subspace of points.\n P_new = Points[(i+1):]\n\n P_new = subset_eqp(P_new, s.m['X_I'], s.m['X_II'])\n \n # Stop if no values in subset\n if numpy.shape(P_new)[0] == 0: \n Stop = True\n \n return P_new, s.m['ph equil P'], Stop\n \n # If no instability was found, stop the main for loop and set eq.\n # point to None.\n s.m['ph equil P'] = None\n Stop = True\n return Points, s.m['ph equil P'], Stop\n \n # Main looping\n s.m['ph equil'] = {} # Range of equilibrium points.\n for ph in p.m['Valid phases']:\n Stop = False\n s.m['ph equil'][ph] = []\n while not Stop:\n Points, s.m['ph equil P'], Stop = instability_point_calc(\n Points, \n g_x_func, \n s, p, n, ph)\n \n # Save an equilibrium point to the range of points in the\n # current phase if found.\n if s.m['ph equil P'] is not None: \n s.m['ph equil'][ph].append(s.m['ph equil P'])\n \n \n # Detect phase seperation accross volume root phases:\n if not LLE_only:\n # Define difference function\n def g_diff(X, g_x_func, s, p, ph1, ph2, ref):\n # Returns difference between Gibbs energy of phases 'ph1' & 'ph2'\n # Note, all phases must be at same composition for meaningful \n # comparison\n s = s.update_state(s, p, X = X, Force_Update=True)\n return (g_x_func(s, p, k=ph1, ref=ref).m['g_mix'][ph1]\n - g_x_func(s, p, k=ph2, ref=ref).m['g_mix'][ph2])\n \n \n # Define objective function for feed search\n def g_diff_obj(X, g_x_func, s, p, ph1, ph2, ref):\n # Returns difference between Gibbs energy of phases 'ph1' & 'ph2'\n # Note, all phases must be at same composition for meaningful \n # comparison\n s = s.update_state(s, p, X = X, Force_Update=True)\n return abs(g_x_func(s, p, k=ph1, ref=ref).m['g_mix'][ph1]\n - g_x_func(s, p, k=ph2, ref=ref).m['g_mix'][ph2])\n \n # Calculated difference of Gibbs energies between all phases at all\n # sampling points.\n s.m['mph equil'] = []\n s.m['mph phase'] = []\n for i in range(len(p.m['Valid phases'])):\n for j in range(i + 1, len(p.m['Valid phases'])):\n ph1 = p.m['Valid phases'][i]\n ph2 = p.m['Valid phases'][j]\n print ph1\n print ph2\n Fd = numpy.empty(n)\n for l, X in zip(range(n), Points):\n Fd[l] = g_diff(X, g_x_func, s, p, ph1, ph2, ph1)\n \n # Look for sign cross phase seperation\n if not numpy.all(Fd > 0) or numpy.all(Fd < 0):\n # (if all values are not greater than or less than zero)\n Bounds = [(1e-6, 0.99999)]\n Args=(g_x_func, s, p, ph1, ph2, ph1)\n Z_0 = tgo(g_diff_obj, Bounds, args=Args, n=1000, k_t = 5)\n print Z_0\n \n s = phase_equilibrium_calculation(s, p, g_x_func, Z_0,\n P=P, T=T, \n tol=1e-2, \n Print_Results=True, \n Plot_Results=True) \n \n s.m['mph equil P'] = [s.m['X_I'], s.m['X_II']]\n s.m['mph equil'].append(s.m['mph equil P'])\n s.m['mph phase'].append([s.m['Phase eq. I'], \n s.m['Phase eq. II']])\n return s",
"def test_cx_equivalence_0cx(self, seed=0):\n state = np.random.default_rng(seed)\n rnd = 2 * np.pi * state.random(size=6)\n\n qr = QuantumRegister(2, name=\"q\")\n qc = QuantumCircuit(qr)\n\n qc.u(rnd[0], rnd[1], rnd[2], qr[0])\n qc.u(rnd[3], rnd[4], rnd[5], qr[1])\n\n sim = UnitarySimulatorPy()\n unitary = execute(qc, sim).result().get_unitary()\n self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 0)\n self.assertTrue(Operator(two_qubit_cnot_decompose(unitary)).equiv(unitary))",
"def test_thermal_relaxation_error_raises_invalid_t1(self):\n # T1 == 0\n self.assertRaises(NoiseError, lambda: thermal_relaxation_error(0, 0, 0))\n # T1 < 0\n self.assertRaises(NoiseError, lambda: thermal_relaxation_error(-0.1, 0.1, 0))",
"def test_thermal_relaxation_error_t1_t2_inf_ideal(self):\n error = thermal_relaxation_error(np.inf, np.inf, 0)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"ideal probability\")\n self.assertEqual(circ[0], {\"name\": \"id\", \"qubits\": [0]},\n msg=\"ideal circuit\")",
"def test_all_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_gradient_exception_on_sample(self):\n dev = qml.device(\"default.qubit\", wires=2, shots=1000)\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.sample(qml.PauliZ(0)), qml.sample(qml.PauliX(1))\n\n with pytest.raises(\n qml.QuantumFunctionError,\n match=\"Circuits that include sampling can not be differentiated.\",\n ):\n grad_fn = autograd.jacobian(circuit)\n grad_fn(1.0)",
"def act(self, state, eps=0.):",
"def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1",
"def test_source_error(self):\n # reproducible arbitrariness\n np.random.seed(12321)\n\n nsteps = 10\n nchan = 3\n tmax = nsteps*self.dt\n sequence = np.random.randn(nsteps, self.N)\n\n target = np.random.randn(nchan, nsteps)\n controller = LinearController(self.G, target, tau=None)\n controller.W = np.random.randn(*controller.W.shape)\n\n self.G.out_fct = lambda i: sequence[i]\n\n class SourceErrorGrabber(object):\n def __init__(self, target):\n self.target = target\n self.order = 10\n \n def prepare(self, tmax, dt):\n nsteps = int_r(tmax/dt)\n self.motor_error = np.zeros((nsteps, self.target.source.N))\n\n def evolve(self, t, dt):\n i = int_r(t/dt)\n self.motor_error[i, :] = self.target.get_source_error()\n\n M = SourceErrorGrabber(controller)\n M1 = simulation.StateMonitor(controller, 'out')\n\n sim = simulation.Simulation(self.G, controller, M, M1, dt=self.dt)\n sim.run(tmax)\n\n for i in xrange(int_r(tmax/self.dt)):\n diff = M1.out[:, i] - target[:, i]\n self.assertTrue(np.allclose(M.motor_error[i],\n np.dot(diff, controller.W)))",
"def test_cx_gate_nondeterministic_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_2q_clifford.cx_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_2q_clifford.cx_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)",
"def test_non_pauli_error(self):\n circuit = hadamard_circuit(3)\n bits, recipes = circuit()\n shadow = ClassicalShadow(bits, recipes)\n\n H = qml.Hadamard(0) @ qml.Hadamard(2)\n\n msg = \"Observable must be a linear combination of Pauli observables\"\n with pytest.raises(ValueError, match=msg):\n shadow.expval(H, k=10)",
"def OF0_TestFunction_SimpleParabolic(x):\n return x ** 2",
"def u_crit(state, sys):\n s = state[0]\n i = state[1]\n tau = scipy.interpolate.interp1d(sys.tau.s, sys.tau.i, kind = \"cubic\")\n phi = scipy.interpolate.interp1d(sys.phi.s, sys.phi.i, kind = \"cubic\")\n cc = scipy.interpolate.interp1d(sys.commutation_curve[0],\n sys.commutation_curve[1],\n kind = \"cubic\")\n if i > sys.imax:\n return sys.umax\n if s <= sys.commutation_curve[0][-1]:\n #print(\"Case 1\")\n if s < sys.sbar or i < tau(s):\n return 0\n return sys.umax\n elif s > sys.commutation_curve[0][-1] and s < sys.commutation_curve[0][0]:\n #print(\"Case 2\")\n if ((i > tau(s)) and (i < cc(s))) or (i > sys.imax):\n return sys.umax\n elif i > cc(s) and i < sys.imax:\n return 0\n else:\n return 0\n else:\n #print(\"Case 3\")\n if i > sys.imax:\n return sys.umax\n elif s > sys.sstar and i > phi(s):\n return sys.umax\n return 0"
]
| [
"0.6160264",
"0.60424984",
"0.60073066",
"0.5985922",
"0.5964086",
"0.58918685",
"0.5890566",
"0.5871174",
"0.5871174",
"0.584095",
"0.58154696",
"0.5802302",
"0.57217896",
"0.5697638",
"0.56964415",
"0.56962276",
"0.56796205",
"0.5670367",
"0.56680375",
"0.56564397",
"0.5653369",
"0.5653351",
"0.5630583",
"0.56222785",
"0.5621404",
"0.5603777",
"0.5598769",
"0.5597561",
"0.5570402",
"0.55600023"
]
| 0.7001833 | 0 |
Test 25% PauliX error on measure of qubit1 | def test_specific_qubit_pauli_error_measure_25percent(self):
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.measure(qr, cr)
backend = QasmSimulator()
shots = 2000
# test noise model
error = pauli_error([('X', 0.25), ('I', 0.75)])
noise_model = NoiseModel()
noise_model.add_quantum_error(error, 'measure', [1])
# Execute
target = {'0x0': 3 * shots / 4, '0x2': shots / 4}
circuit = transpile(circuit, basis_gates=noise_model.basis_gates)
qobj = assemble([circuit], backend, shots=shots)
result = backend.run(qobj, noise_model=noise_model).result()
self.is_completed(result)
self.compare_counts(result, [circuit], [target], delta=0.05 * shots) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_all_qubit_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'measure')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_specific_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [0])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x1': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_specific_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [1])\n # Execute\n target = {'0x2': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_nonlocal_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n # use barrier to ensure measure qubit 0 is before qubit 1\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'measure', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_specific_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'reset', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {'0x3': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_all_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'reset')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def run_single_qubit_measure():\n\n # Construct Hamiltonian.\n a = random.random()\n b = random.random()\n c = random.random()\n hamil = (a * ops.PauliX() + b * ops.PauliY() + c * ops.PauliZ())\n\n # Compute known minimum eigenvalue.\n eigvals = np.linalg.eigvalsh(hamil)\n\n min_val = 1000.0\n for i in range(0, 360, 5):\n for j in range(0, 180, 5):\n\n theta = np.pi * i / 360.0\n phi = np.pi * j / 180.0\n\n # X Basis\n qc = single_qubit_ansatz(theta, phi)\n qc.h(0)\n val_a = a * qc.pauli_expectation(0)\n\n # Y Basis\n qc = single_qubit_ansatz(theta, phi)\n qc.sdag(0)\n qc.h(0)\n val_b = b * qc.pauli_expectation(0)\n\n # Z Basis\n qc = single_qubit_ansatz(theta, phi)\n val_c = c * qc.pauli_expectation(0)\n\n expectation = val_a + val_b + val_c\n if expectation < min_val:\n min_val = expectation\n\n print('Minimum eigenvalue: {:.3f}, Delta: {:.3f}'\n .format(eigvals[0], min_val - eigvals[0]))",
"def test_reset_error_specific_qubit_25percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 25% reset noise on qubit-1 \"u3\" only.\n noise_probs = [0.25, 0.75]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [1])\n shots = 2000\n # target = {'01': shots / 4, '11': 3 * shots / 4}\n target = {'0x1': shots / 4, '0x3': 3 * shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_measure(self):\n\n result = qubit.measure(polarization)\n self.assertEqual(0, result)",
"def test_reset_error_specific_qubit_50percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 50% reset noise on qubit-0 \"u3\" only.\n noise_probs = [0.5, 0.5]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [0])\n shots = 2000\n target = {'0x2': shots / 2, '0x3': shots / 2}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def error(self): \n if not self.terminal:\n err = sum([v**2 for v in self.state + self.q[:-1]])\n else:\n err = sum([v**2 for v in LIMITS[:9]] + [1.0 - LIMITS[9]**2])\n err *= (self.max_steps - self.steps)\n return err",
"def test_reset_error_all_qubit_100percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 100% reset noise on all qubit \"u3\".\n noise_probs = [1, 0]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, \"u3\")\n shots = 100\n # target = {'00': shots}\n target = {'0x0': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_valid_calculation_of_quantile(alpha: Any) -> None:\n n = 30\n check_alpha_and_n_samples(alpha, n)",
"def err_func(x,rv,valore,specn,lcrop,models='da2014'):\n tmp = tmp_func(x[0], x[1], rv, specn, lcrop, models)\n if tmp != 1: return abs(tmp[3]-(valore+1.)) #this is quantity that gets minimized \n else: return 1E30",
"def test_readout_error_all_qubit(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_all_qubit_readout_error([probs_given0, probs_given1])\n\n # Expected counts\n shots = 2000\n p00 = 0.5 * (probs_given0[0]**2 + probs_given1[0]**2)\n p01 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p10 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p11 = 0.5 * (probs_given0[1]**2 + probs_given1[1]**2)\n target = target = {\n '0x0': p00 * shots,\n '0x1': p01 * shots,\n '0x2': p10 * shots,\n '0x3': p11 * shots\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_qqlnu_np_scalar(self):\n\n R_mg = np.asarray([1.00938347, 1.01333147, 1.01762706])\n # Number of NP events generated in MG [25603, 15708, 9833]\n # Number of SM events generated in MG [23536, 10207, 4851]\n\n bins = np.asarray([1200., 1400., 1600., 1800.])\n nbins = len(bins)-1\n\n for i in range(nbins):\n center = 0.5*(bins[i]+bins[i+1])\n wc = wcxf.WC('SMEFT', 'Warsaw up', center, {'lequ1_2232': 1e-7})\n wc_obj = flavio.WilsonCoefficients()\n wc_obj.set_initial_wcxf(wc)\n R = pplnu.R_sigma_qqlnu_int(13e3**2, bins[i], bins[i+1], 'mu', wc_obj, par2)\n err = (R-R_mg[i])/R_mg[i]\n self.assertAlmostEqual(err,0,delta=0.02,msg=f'error in bin {i}: {err}')",
"def test_readout_error_qubit1(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_readout_error([probs_given0, probs_given1], [1])\n\n shots = 2000\n target = {\n '0x0': probs_given0[0] * shots / 2,\n '0x1': probs_given1[0] * shots / 2,\n '0x2': probs_given0[1] * shots / 2,\n '0x3': probs_given1[1] * shots / 2\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_set_1(self):\n\n qubit.set(1, 0)\n\n result = qubit.measure(polarization)\n self.assertEqual(1, result)",
"def test_quintic(self):\n fun = get_problem('quintic', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array6), 0.0)",
"def test_nonlocal_pauli_error_gate_25percent(self):\n qr = QuantumRegister(3, 'qr')\n cr = ClassicalRegister(3, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.cx(qr[0], qr[1])\n circuit.barrier(qr)\n circuit.cx(qr[1], qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('XII', 0.25), ('III', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'cx', [0, 1], [0, 1, 2])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x4': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def sum_simulated_test():\n f = open(\"./results/simulated_sigmoid_sum.csv\", \"w\")\n #f1 = open(\"./results/avg_pres.txt\", \"w\")\n #f.write(\"num. of qubits; precision\\n\")\n\n\n computable_qubits = 27\n num_subtest = 1000\n\n acum_precision = 0\n coeffs = []\n temp = -10\n while temp < 11:\n coeffs.append(temp)\n temp += 0.25\n #for coeff in coeffs:\n # variables.c_summation = coeff\n # print(coeff)\n for i in range(2, computable_qubits):\n #print(\"qubit: \", i)\n precision = 0\n x = []\n for j in range(num_subtest):\n\n random_dict = get_random_dict(i)\n\n # compute real answer\n real_answer = 0\n for value in random_dict.values():\n real_answer += value\n # f1.write(str(real_answer)+\";\")\n x.append(real_answer)\n\n # assign spin value to real_answer\n if real_answer < 0:\n real_answer = -1\n elif real_answer > 0:\n real_answer = 1\n else:\n real_answer = 0\n bqm = get_bqm()\n quantum_sigmoid_sum(bqm, random_dict, \"target\")\n sampler = get_simulated_sampler()\n result = sampler.sample(bqm)\n if real_answer == 0:\n precision += 1\n # f1.write(\"1\\n\")\n elif real_answer == result.first.sample['target']:\n precision += 1\n # f1.write(\"1\\n\")\n# else:\n # f1.write(\"0\\n\")\n\n precision /= num_subtest\n # acum_precision+= precision\n\n f.write(str(i) + \";\" + str(precision) + \"\\n\")\n f.close()\n #f1.write(str(coeff)+\";\"+ str(round(acum_precision/(computable_qubits-1), 4)) + \"\\n\")\n # acum_precision = 0\n #f1.close()",
"def test_9(self):\n\n sq_qe = gen_step_qe(1.42, 0.9)\n test_ill = Illumination()\n # test_qef = qe_filter(sq_qe)\n\n filtered_ill = test_ill * sq_qe\n\n assert isinstance(filtered_ill, Illumination)\n\n #plt.plot(filtered_ill.get_spectrum('eV')[0, :], filtered_ill.get_spectrum('eV')[1, :], label=\"filtered\")\n #plt.plot(test_ill.get_spectrum('eV')[0, :], test_ill.get_spectrum('eV')[1, :], label=\"original\")\n\n #plt.xlabel('wavelength (eV)')\n #plt.ylabel('spectrum (W/eV/m^2)')\n\n #plt.legend()\n\n #plt.show()",
"def test_quintic2(self):\n fun = get_problem('quintic', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array7), 0.0)",
"def quasi_optimalityTV(f, lam_init = 2.0, q = 0.9):\n \n lam = lam_init\n max_iter = 50\n error = np.zeros(max_iter)\n #alt_error = np.zeros(max_iter)\n u_old = ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n for i in range(1, max_iter):\n lam = lam_init * (q ** i)\n u_new = ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n error[i] = np.linalg.norm(u_old - u_new)\n #alt_error[i] = np.linalg.norm(u_old - u_new) /abs(lam_init*(q ** i - q ** (i-1)))\n u_old = np.copy(u_new)\n\n #plt.plot(error)\n #plt.plot(alt_error)\n #plt.show()\n opt_idx = np.argmin(error[error != 0.0])\n t = 1.0 / (1.0 + lam_init * (q ** opt_idx))\n lam = lam_init * (q ** opt_idx)\n u= ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n \n return u, t",
"def test_readout_error_qubit0(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_readout_error([probs_given0, probs_given1], [0])\n\n shots = 2000\n target = {\n '0x0': probs_given0[0] * shots / 2,\n '0x1': probs_given0[1] * shots / 2,\n '0x2': probs_given1[0] * shots / 2,\n '0x3': probs_given1[1] * shots / 2\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_invalid_calculation_of_quantile(alpha: Any) -> None:\n n = 10\n with pytest.raises(\n ValueError, match=r\".*Number of samples of the score is too low*\"\n ):\n check_alpha_and_n_samples(alpha, n)",
"def test_nonlocal_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr[1])\n circuit.barrier(qr)\n circuit.reset(qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'reset', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def error(self,pt,eta):\n return self._data[self.__ptBin(pt)][self.__etaBin(eta)][1]"
]
| [
"0.7395869",
"0.705778",
"0.6959764",
"0.6951617",
"0.69448733",
"0.6924721",
"0.6907305",
"0.6776238",
"0.67016417",
"0.66793114",
"0.666238",
"0.65331316",
"0.63741654",
"0.6357188",
"0.6355327",
"0.63139987",
"0.63046926",
"0.63001764",
"0.6269647",
"0.6225133",
"0.6215649",
"0.61829716",
"0.61455035",
"0.6108052",
"0.6078931",
"0.6045601",
"0.6044009",
"0.60361844",
"0.6015167",
"0.59661907"
]
| 0.74995834 | 0 |
Test 25% PauliX error on reset of qubit1 | def test_specific_qubit_pauli_error_reset_25percent(self):
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.barrier(qr)
circuit.reset(qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
backend = QasmSimulator()
shots = 2000
# test noise model
error = pauli_error([('X', 0.25), ('I', 0.75)])
noise_model = NoiseModel()
noise_model.add_quantum_error(error, 'reset', [1])
# Execute
target = {'0x0': 3 * shots / 4, '0x2': shots / 4}
circuit = transpile(circuit, basis_gates=noise_model.basis_gates)
qobj = assemble([circuit], backend, shots=shots)
result = backend.run(qobj, noise_model=noise_model).result()
self.is_completed(result)
self.compare_counts(result, [circuit], [target], delta=0.05 * shots) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_all_qubit_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'reset')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset_error_specific_qubit_25percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 25% reset noise on qubit-1 \"u3\" only.\n noise_probs = [0.25, 0.75]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [1])\n shots = 2000\n # target = {'01': shots / 4, '11': 3 * shots / 4}\n target = {'0x1': shots / 4, '0x3': 3 * shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_nonlocal_pauli_error_reset_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.barrier(qr)\n circuit.reset(qr[1])\n circuit.barrier(qr)\n circuit.reset(qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'reset', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset_error_all_qubit_100percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 100% reset noise on all qubit \"u3\".\n noise_probs = [1, 0]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, \"u3\")\n shots = 100\n # target = {'00': shots}\n target = {'0x0': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_reset_error_specific_qubit_50percent(self):\n\n # Test circuit: ideal outcome \"11\"\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n noise_circs = [[{\n \"name\": \"reset\",\n \"qubits\": [0]\n }], [{\n \"name\": \"id\",\n \"qubits\": [0]\n }]]\n\n # 50% reset noise on qubit-0 \"u3\" only.\n noise_probs = [0.5, 0.5]\n error = QuantumError(zip(noise_circs, noise_probs))\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, \"u3\", [0])\n shots = 2000\n target = {'0x2': shots / 2, '0x3': shots / 2}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_specific_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [0])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x1': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {'0x3': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_specific_qubit_pauli_error_gate_100percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = pauli_error([('X', 1)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [1])\n # Execute\n target = {'0x2': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_specific_qubit_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'measure', [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_all_qubit_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'measure')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_standard_reset1_error_100percent(self):\n qr = QuantumRegister(1, 'qr')\n cr = ClassicalRegister(1, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = reset_error(0, 1)\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, ['id', 'x'])\n # Execute\n target = {'0x1': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_nonlocal_pauli_error_measure_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n # use barrier to ensure measure qubit 0 is before qubit 1\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'measure', [0], [1])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x2': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_standard_reset0reset1_error_50percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr[0])\n circuit.x(qr[1])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = reset_error(0.25, 0.25)\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, ['id', 'x'])\n # Execute\n target = {\n '0x0': 3 * shots / 16,\n '0x1': shots / 16,\n '0x2': 9 * shots / 16,\n '0x3': 3 * shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_nonlocal_pauli_error_gate_25percent(self):\n qr = QuantumRegister(3, 'qr')\n cr = ClassicalRegister(3, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.cx(qr[0], qr[1])\n circuit.barrier(qr)\n circuit.cx(qr[1], qr[0])\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('XII', 0.25), ('III', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_nonlocal_quantum_error(error, 'cx', [0, 1], [0, 1, 2])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x4': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_standard_reset0_error_100percent(self):\n qr = QuantumRegister(1, 'qr')\n cr = ClassicalRegister(1, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.x(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 100\n # test noise model\n error = reset_error(1)\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, ['id', 'x'])\n # Execute\n target = {'0x0': shots}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0)",
"def test_readout_error_all_qubit(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_all_qubit_readout_error([probs_given0, probs_given1])\n\n # Expected counts\n shots = 2000\n p00 = 0.5 * (probs_given0[0]**2 + probs_given1[0]**2)\n p01 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p10 = 0.5 * (probs_given0[0] * probs_given0[1] +\n probs_given1[0] * probs_given1[1])\n p11 = 0.5 * (probs_given0[1]**2 + probs_given1[1]**2)\n target = target = {\n '0x0': p00 * shots,\n '0x1': p01 * shots,\n '0x2': p10 * shots,\n '0x3': p11 * shots\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_readout_error_qubit1(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_readout_error([probs_given0, probs_given1], [1])\n\n shots = 2000\n target = {\n '0x0': probs_given0[0] * shots / 2,\n '0x1': probs_given1[0] * shots / 2,\n '0x2': probs_given0[1] * shots / 2,\n '0x3': probs_given1[1] * shots / 2\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def experiment3():\n raise FAKE_ERROR",
"def test_readout_error_qubit0(self):\n\n # Test circuit: ideal bell state\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n # Ensure qubit 0 is measured before qubit 1\n circuit.barrier(qr)\n circuit.measure(qr[0], cr[0])\n circuit.barrier(qr)\n circuit.measure(qr[1], cr[1])\n backend = QasmSimulator()\n\n # Asymetric readout error on qubit-0 only\n probs_given0 = [0.9, 0.1]\n probs_given1 = [0.3, 0.7]\n noise_model = NoiseModel()\n noise_model.add_readout_error([probs_given0, probs_given1], [0])\n\n shots = 2000\n target = {\n '0x0': probs_given0[0] * shots / 2,\n '0x1': probs_given0[1] * shots / 2,\n '0x2': probs_given1[0] * shots / 2,\n '0x3': probs_given1[1] * shots / 2\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)",
"def test_reset(self):\r\n self.p += 8\r\n self.p.reset()\r\n self.assertEqual(str(self.p), '[>............] 0%')",
"def test_measurement_failures(self):\n\n # single qubit\n projQ_backend = ProjectqQuantumSimulator(\n register_size=1,\n seed=234,\n backend=Simulator\n )\n\n circuit = [\n [\"START_SESSION\", 0, 0],\n [\"STATE_PREPARATION_ALL\", 0, 0],\n ['X', 0, 0],\n ['QUBIT_MEASURE', 0, 0]\n ]\n\n for commands in circuit:\n\n hal_cmd = command_creator(*commands)\n projQ_backend.accept_command(hal_cmd)\n\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n projQ_backend.accept_command(command_creator(*['END_SESSION', 0, 0]))\n\n # multi qubit\n projQ_backend = ProjectqQuantumSimulator(\n register_size=2,\n seed=234,\n backend=Simulator\n )\n\n circuit = [\n [\"START_SESSION\", 0, 0],\n [\"STATE_PREPARATION_ALL\", 0, 0],\n ['X', 0, 0],\n ['QUBIT_MEASURE', 0, 0]\n ]\n\n for commands in circuit:\n\n hal_cmd = command_creator(*commands)\n projQ_backend.accept_command(hal_cmd)\n\n # try double measurement\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n\n # try manipulation after measurement\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['X', 0, 0])\n )\n\n # re-prepare state of qubit, then try bit-flip and measure\n projQ_backend.accept_command(\n command_creator(*['STATE_PREPARATION', 0, 0])\n )\n projQ_backend.accept_command(\n command_creator(*['X', 0, 0])\n )\n res = projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n\n self.assertEqual(res, 1)\n\n projQ_backend.accept_command(command_creator(*['END_SESSION', 0, 0]))",
"def test_set_1(self):\n\n qubit.set(1, 0)\n\n result = qubit.measure(polarization)\n self.assertEqual(1, result)",
"def test_foo(self):\n self.ran = True\n 1 / 0",
"def test_error_num_qubits(self, basis_state, wires):\n\n with pytest.raises(ValueError, match=\"'basis_state' must be of shape\"):\n BasisStatePreparation(basis_state, wires)",
"def test_reset(self):\r\n self.p += 8\r\n self.p.reset()\r\n self.assertEqual(str(self.p), '0% [....................]')",
"def error(self): \n if not self.terminal:\n err = sum([v**2 for v in self.state + self.q[:-1]])\n else:\n err = sum([v**2 for v in LIMITS[:9]] + [1.0 - LIMITS[9]**2])\n err *= (self.max_steps - self.steps)\n return err",
"def run_single_qubit_measure():\n\n # Construct Hamiltonian.\n a = random.random()\n b = random.random()\n c = random.random()\n hamil = (a * ops.PauliX() + b * ops.PauliY() + c * ops.PauliZ())\n\n # Compute known minimum eigenvalue.\n eigvals = np.linalg.eigvalsh(hamil)\n\n min_val = 1000.0\n for i in range(0, 360, 5):\n for j in range(0, 180, 5):\n\n theta = np.pi * i / 360.0\n phi = np.pi * j / 180.0\n\n # X Basis\n qc = single_qubit_ansatz(theta, phi)\n qc.h(0)\n val_a = a * qc.pauli_expectation(0)\n\n # Y Basis\n qc = single_qubit_ansatz(theta, phi)\n qc.sdag(0)\n qc.h(0)\n val_b = b * qc.pauli_expectation(0)\n\n # Z Basis\n qc = single_qubit_ansatz(theta, phi)\n val_c = c * qc.pauli_expectation(0)\n\n expectation = val_a + val_b + val_c\n if expectation < min_val:\n min_val = expectation\n\n print('Minimum eigenvalue: {:.3f}, Delta: {:.3f}'\n .format(eigvals[0], min_val - eigvals[0]))",
"def test_quintic(self):\n fun = get_problem('quintic', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array6), 0.0)",
"def reset(self):\n self.error_p = 0.0\n self.error_i = 0.0\n self.error_d = 0.0\n self.errors = [ 0.0 ] * self.samples\n if callable(self.debug_callback):\n self.debug_callback(\"reset\")"
]
| [
"0.80031896",
"0.76712894",
"0.7635047",
"0.75500256",
"0.7529674",
"0.7217046",
"0.71857226",
"0.71389174",
"0.71040606",
"0.7099653",
"0.70686287",
"0.7023759",
"0.69967765",
"0.6946481",
"0.6828879",
"0.6803246",
"0.66288227",
"0.6546882",
"0.64287186",
"0.63982475",
"0.6220329",
"0.60568637",
"0.60484695",
"0.601186",
"0.59908944",
"0.5981152",
"0.5977596",
"0.59681976",
"0.59606236",
"0.5906314"
]
| 0.8109222 | 0 |
Test amplitude damping error damps to correct state | def test_amplitude_damping_error(self):
qr = QuantumRegister(1, 'qr')
cr = ClassicalRegister(1, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.x(qr) # prepare + state
for _ in range(30):
# Add noisy identities
circuit.barrier(qr)
circuit.iden(qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
shots = 2000
backend = QasmSimulator()
# test noise model
error = amplitude_damping_error(0.75, 0.25)
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error, 'id')
# Execute
target = {'0x0': 3 * shots / 4, '0x1': shots / 4}
circuit = transpile(circuit, basis_gates=noise_model.basis_gates)
qobj = assemble([circuit], backend, shots=shots)
result = backend.run(qobj, noise_model=noise_model).result()
self.is_completed(result)
self.compare_counts(result, [circuit], [target], delta=0.05 * shots) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_amplitude_damping_error_raises_invalid_excited_state_pop(self):\n self.assertRaises(NoiseError,\n lambda: phase_amplitude_damping_error(0, 0, -0.5))\n self.assertRaises(NoiseError,\n lambda: phase_amplitude_damping_error(0, 0, 1.1))",
"def test_amplitude_damping_error_raises_invalid_amp_param(self):\n self.assertRaises(NoiseError,\n lambda: phase_amplitude_damping_error(-0.5, 0, 0))\n self.assertRaises(NoiseError,\n lambda: phase_amplitude_damping_error(1.1, 0, 0))",
"def test_amplitude_damping_error_full_1state_noncanonical(self):\n error = amplitude_damping_error(1, excited_state_population=1,\n canonical_kraus=False)\n targets = [np.diag([0, 1]), np.array([[0, 0], [1, 0]])]\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n for op in circ[0]['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus matrices\")",
"def test_amplitude_damping_error_full_0state_noncanonical(self):\n error = amplitude_damping_error(1, excited_state_population=0,\n canonical_kraus=False)\n targets = [np.diag([1, 0]), np.array([[0, 1], [0, 0]])]\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n for op in circ[0]['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus matrices\")",
"def test_amplitude_damping_error_raises_invalid_combined_params(self):\n self.assertRaises(NoiseError,\n lambda: phase_amplitude_damping_error(0.5, 0.6, 0))",
"def test_amplitude_damping_error_full_1state_canonical(self):\n error = amplitude_damping_error(1, excited_state_population=1,\n canonical_kraus=True)\n targets = [np.diag([0, 1]), np.array([[0, 0], [1, 0]])]\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n for op in circ[0]['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus matrices\")",
"def test_amplitude_damping_error_full_0state_canonical(self):\n error = amplitude_damping_error(1, excited_state_population=0,\n canonical_kraus=True)\n targets = [np.diag([1, 0]), np.array([[0, 1], [0, 0]])]\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n for op in circ[0]['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus matrices\")",
"def test_phase_amplitude_damping_error_noncanonical(self):\n error = phase_amplitude_damping_error(0.25, 0.5, 0.3, canonical_kraus=False)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n self.assertEqual(len(circ[0]['params']), 6,\n msg=\"Incorrect number of kraus matrices\")",
"def test_amplitude_damping_error_ideal_noncanonical(self):\n error = amplitude_damping_error(0, excited_state_population=0.5,\n canonical_kraus=False)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"ideal probability\")\n self.assertEqual(circ[0], {\"name\": \"id\", \"qubits\": [0]},\n msg=\"ideal circuit\")",
"def test_amplitude_damping_error_raises_invalid_phase_param(self):\n self.assertRaises(NoiseError,\n lambda: phase_amplitude_damping_error(0, -0.5, 0))\n self.assertRaises(NoiseError,\n lambda: phase_amplitude_damping_error(0, 1.1, 0))",
"def test_phase_amplitude_damping_error_canonical(self):\n error = phase_amplitude_damping_error(0.25, 0.5, 0.3, canonical_kraus=True)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n self.assertEqual(len(circ[0]['params']), 4,\n msg=\"Incorrect number of kraus matrices\")",
"def test_phase_damping_error_ideal(self):\n error = phase_damping_error(0)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"ideal probability\")\n self.assertEqual(circ[0], {\"name\": \"id\", \"qubits\": [0]},\n msg=\"ideal circuit\")",
"def calc_error_amp(amp_pred, pdur, model):\n theta_pred = list(forward_pass(model, pdur, amp_pred))[0]\n return np.log(np.maximum(1e-10, (theta_pred - model.theta) ** 2))",
"def test_phase_damping_error_full_noncanonical(self):\n error = phase_damping_error(1, canonical_kraus=False)\n circ, p = error.error_term(0)\n targets = [np.diag([1, 0]), np.diag([0, 1])]\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n for op in circ[0]['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus matrices\")",
"def test_phase_damping_error_noncanonical(self):\n p_phase = 0.3\n error = phase_damping_error(0.3, canonical_kraus=False)\n circ, p = error.error_term(0)\n targets = [np.array([[1, 0], [0, np.sqrt(1 - p_phase)]]),\n np.array([[0, 0], [0, np.sqrt(p_phase)]])]\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n for op in circ[0]['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus matrices\")",
"def test_fold_along_delay_amplitude_check():\n delays = np.arange(-10, 11) * units.s\n array = np.ones((1, 10, 21)) * units.mK**2 * units.Mpc**3\n array[:, :, 11:] *= np.sqrt(2)\n array[:, :, 10] *= 3\n axis = -1\n errs = np.ones_like(array)\n array_out, errs_out = utils.fold_along_delay(delays, array, errs, axis=axis)\n test_value_array = np.ones((1, 10, 11)) * np.mean([np.sqrt(2), 1])\n test_value_array[:, :, 0] = 3\n assert np.allclose(test_value_array, array_out.value)",
"def test_phase_damping_error_full_canonical(self):\n error = phase_damping_error(1, canonical_kraus=True)\n circ, p = error.error_term(0)\n targets = [np.diag([1, 0]), np.diag([0, 1])]\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n for op in circ[0]['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus matrices\")",
"def test_tensor_network_amplitude(self, state, want_amplitude):\n circuit = jet.Circuit(num_wires=2)\n circuit.append_gate(jet.GateFactory.create(\"H\"), wire_ids=[0])\n circuit.append_gate(jet.GateFactory.create(\"CNOT\"), wire_ids=[0, 1])\n circuit.append_state(state, wire_ids=[0, 1])\n have_amplitude = circuit.tensor_network().contract().scalar\n assert have_amplitude == want_amplitude",
"def test_single_ended_wls_estimate_synthetic():\n\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 50\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, 500)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = (\n C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n\n print(\"alphaint\", cable_len * (dalpha_p - dalpha_m))\n print(\"alpha\", dalpha_p - dalpha_m)\n print(\"C\", np.log(C_p / C_m))\n print(\"x0\", x.max())\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"0\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, 0.5 * cable_len)],\n \"warm\": [slice(0.5 * cable_len, cable_len)],\n }\n\n # WLS\n ds.calibration_single_ended(\n sections=sections, st_var=1.0, ast_var=1.0, method=\"wls\", solver=\"sparse\"\n )\n\n assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=6)\n assert_almost_equal_verbose(ds.dalpha.values, dalpha_p - dalpha_m, decimal=8)\n assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=4)\n\n pass",
"def test4(self):\n sig1 = np.array([0, 1, 0])\n sig2 = np.array([0, 0, 1])\n d, p = EventSync.estimate_delay(sig1, sig2)\n self.assertTrue(d == -1)",
"def test3(self):\n sig1 = np.array([0, 1, 0])\n sig2 = np.array([0, 1, 0, 0])\n d, p = EventSync.estimate_delay(sig1, sig2)\n self.assertTrue(d == 0)",
"def test9(self):\n sig1 = np.array([0, 1, 0])\n sig2 = np.array([0, 0, 1, 0])\n d, p = EventSync.estimate_delay(sig1, sig2)\n self.assertTrue(d == -1)",
"def test_loguniform(self):\n\n times = np.logspace(-4, -2, 3)\n\n waveObj1 = vrm.waveforms.StepOff(t0=0.0)\n waveObj2 = vrm.waveforms.SquarePulse(delt=0.02)\n\n chi0 = np.array([0.0])\n dchi = np.array([0.01])\n tau1 = np.array([1e-10])\n tau2 = np.array([1e3])\n\n decay1b = (dchi / np.log(tau2 / tau1)) * waveObj2.getCharDecay(\"b\", times)\n decay2b = waveObj2.getLogUniformDecay(\"b\", times, chi0, dchi, tau1, tau2)\n\n decay1dbdt = (dchi / np.log(tau2 / tau1)) * waveObj1.getCharDecay(\"dbdt\", times)\n decay2dbdt = waveObj1.getLogUniformDecay(\"dbdt\", times, chi0, dchi, tau1, tau2)\n decay3dbdt = (dchi / np.log(tau2 / tau1)) * waveObj2.getCharDecay(\"dbdt\", times)\n decay4dbdt = waveObj2.getLogUniformDecay(\"dbdt\", times, chi0, dchi, tau1, tau2)\n\n err1 = np.max(np.abs((decay2b - decay1b) / decay1b))\n err2 = np.max(np.abs((decay2dbdt - decay1dbdt) / decay1dbdt))\n err3 = np.max(np.abs((decay4dbdt - decay3dbdt) / decay3dbdt))\n\n self.assertTrue(err1 < 0.01 and err2 < 0.01 and err3 < 0.01)",
"def idealOpAmp():",
"def test5(self):\n sig1 = np.array([0, 0, 1])\n sig2 = np.array([0, 1, 0])\n d, p = EventSync.estimate_delay(sig1, sig2)\n self.assertTrue(d == 1)",
"def test7(self):\n sig1 = np.array([0, 0, 1])\n sig2 = np.array([0, 1, 0, 0])\n d, p = EventSync.estimate_delay(sig1, sig2)\n self.assertTrue(d == 1)",
"def test_noise_amplitude():\n test_sample = np.ones((100, 1000)) * 3\n test_noise = utils.generate_noise(test_sample)\n noise_power = test_noise.std(1)\n noise_power_uncertainty = noise_power.std()\n assert np.isclose(test_noise.std(), 3, atol=noise_power_uncertainty)",
"def test6(self):\n sig1 = np.array([0, 0, 0, 1])\n sig2 = np.array([0, 0, 1])\n d, p = EventSync.estimate_delay(sig1, sig2)\n self.assertTrue(d == 1)",
"def test_phase_damping_error_canonical(self):\n p_phase = 0.3\n error = phase_damping_error(p_phase, canonical_kraus=True)\n # The canonical form of this channel should be a mixed\n # unitary dephasing channel\n targets = [[{'name': 'z', 'qubits': [0]}],\n [{'name': 'id', 'qubits': [0]}]]\n for j in range(2):\n circ, p = error.error_term(j)\n self.assertEqual(circ[0][\"qubits\"], [0])\n self.remove_if_found(circ, targets)\n self.assertEqual(targets, [], msg=\"Incorrect canonical circuits\")",
"def test_single_ended_wls_fix_gamma_synthetic():\n\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 50\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, 500)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = (\n C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n\n print(\"alphaint\", cable_len * (dalpha_p - dalpha_m))\n print(\"alpha\", dalpha_p - dalpha_m)\n print(\"C\", np.log(C_p / C_m))\n print(\"x0\", x.max())\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"0\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, 0.5 * cable_len)],\n \"warm\": [slice(0.5 * cable_len, cable_len)],\n }\n\n # WLS\n ds.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n solver=\"sparse\",\n fix_gamma=(gamma, 0.0),\n )\n\n assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=18)\n assert_almost_equal_verbose(ds.dalpha.values, dalpha_p - dalpha_m, decimal=10)\n assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=8)\n\n pass"
]
| [
"0.7397735",
"0.7351404",
"0.7334159",
"0.7277401",
"0.716212",
"0.7102862",
"0.7066937",
"0.7050793",
"0.7002974",
"0.6957776",
"0.67443824",
"0.6685103",
"0.63724357",
"0.629844",
"0.62963206",
"0.62188",
"0.61539865",
"0.6148564",
"0.6147389",
"0.6135113",
"0.6122521",
"0.61092556",
"0.6080257",
"0.60788",
"0.606583",
"0.60640734",
"0.6050078",
"0.60256284",
"0.6013539",
"0.6010585"
]
| 0.7710369 | 0 |
Test noise model basis_gates | def test_noise_model_basis_gates(self):
basis_gates = ['u1', 'u2', 'u3', 'cx']
model = NoiseModel(basis_gates)
target = sorted(basis_gates)
self.assertEqual(model.basis_gates, target)
# Check adding readout errors doesn't add to basis gates
model = NoiseModel(basis_gates)
target = sorted(basis_gates)
model.add_all_qubit_readout_error([[0.9, 0.1], [0, 1]], False)
self.assertEqual(model.basis_gates, target)
model.add_readout_error([[0.9, 0.1], [0, 1]], [2], False)
self.assertEqual(model.basis_gates, target)
# Check a reset instruction error isn't added to basis gates
model = NoiseModel(basis_gates)
target = sorted(basis_gates)
model.add_all_qubit_quantum_error(reset_error(0.2), ['reset'], False)
self.assertEqual(model.basis_gates, target)
# Check a non-standard gate isn't added to basis gates
model = NoiseModel(basis_gates)
target = sorted(basis_gates)
model.add_all_qubit_quantum_error(reset_error(0.2), ['label'], False)
self.assertEqual(model.basis_gates, target)
# Check a standard gate is added to basis gates
model = NoiseModel(basis_gates)
target = sorted(basis_gates + ['h'])
model.add_all_qubit_quantum_error(reset_error(0.2), ['h'], False)
self.assertEqual(model.basis_gates, target) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_gaussian_basis_hon(self):\n def row_generator():\n return [random.gauss(0, 1) for i in range(self.d)]\n\n self._test_sample_basis_hon(row_generator)",
"def test_Gaussian_NB_estimators():",
"def noise_generator(n, mean, std, fractindex):\n if fractindex not in VALID_FRACT:\n raise ValueError(\"results: status must be one of %r.\" % VALID_FRACT)\n \n stdev = std\n \n b = 2*fractindex-1\n print('beta: ', b)\n \n bdis = np.zeros(n)\n\n bdis[0] = 1\n for i in range(1,n):\n bdis[i] = bdis[i-1] * (0.5 * b + (i-1))/i # note that b is the shape parementer (b)\n\n plt.plot(bdis)\n plt.show\n\n wnt = np.random.normal(mean, stdev, size = n)\n print('WhiteNoise Stdev: ', np.std(wnt))\n plt.plot(wnt)\n plt.show()\n\n bdis_freq = np.fft.fft(bdis)\n wnt_freq = np.fft.fft(wnt)\n\n bdis_freq = bdis_freq[1:n+1]\n wnt_freq = wnt_freq[1:n+1]\n\n freq_total = bdis_freq * wnt_freq\n \n NumUniquePts = n/2 + 1\n NumUniquePts = int(NumUniquePts)\n j = np.arange(1, NumUniquePts)\n \n if fractindex > 1.0:\n j = j\n elif fractindex <= 1.0:\n j = j**0.5\n \n ft_half1 = freq_total[1:NumUniquePts]/j\n\n real = np.real(freq_total[1:NumUniquePts+1])\n real = np.flip(real, axis=0)\n\n imaginary = np.imag(freq_total[1:NumUniquePts+1])\n imaginary = np.flip(imaginary, axis=0)\n imaginary = 1j * imaginary\n\n ft_half2 = real - imaginary\n\n ft = np.hstack((ft_half1, ft_half2))\n \n x = np.fft.ifft(ft)\n x = np.real(x[:n])\n\n mean_diff = mean - np.mean(x)\n x = mean_diff + x\n print(np.mean(x))\n print(np.std(x))\n plt.plot(x)\n plt.show()\n \n return x",
"def test_self_consistency_noise(self):\n # test with SNR = 100\n SNR = self.p_gt[0] / 9\n noisy_data = self.data + SNR * RNG.normal(size=self.data.shape)\n popt, pcov = sine_fit(noisy_data, self.periods)\n assert_allclose(*fixed_signs(self.p_gt, popt), 5e-1)",
"def test_noise_models_not_equal(self):\n error = pauli_error([['X', 1]])\n\n model1 = NoiseModel()\n model1.add_all_qubit_quantum_error(error, ['u3'], False)\n\n model2 = NoiseModel(basis_gates=['u3', 'cx'])\n model2.add_all_qubit_quantum_error(error, ['u3'], False)",
"def generate_with_noise(self, noise):\n with tf.compat.v1.Session() as sess:\n self.saver.restore(sess, \"data/\" + self.name + \".ckpt\")\n return self.X_fake.eval(feed_dict={self.z: noise, self.is_training: False})",
"def noise(self):\n # Extract parameters\n pzs = self.params[0]\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n return 1.0 / ngals",
"def test_sdg_gate_nondeterministic_waltz_basis_gates(self):\n shots = 2000\n circuits = ref_1q_clifford.sdg_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_1q_clifford.sdg_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)",
"def _gaussian_for_learn_denosing_model(image):\n return add_gaussian_noise(image, 0, 0.2)",
"def createSignalModelExponential(data):\n print \"Creating model\"\n switchpoint = DiscreteUniform('switchpoint', lower=0, upper=len(data))\n \n noise_sigma = HalfNormal('noise_sigma', tau=sigToTau(.01))\n exp_sigma = HalfNormal('exp_sigma', tau=sigToTau(.05))\n \n #Modeling these parameters this way is why wf needs to be normalized\n exp_rate = Uniform('exp_rate', lower=0, upper=.1)\n exp_scale = Uniform('exp_scale', lower=0, upper=.1)\n \n timestamp = np.arange(0, len(data), dtype=np.float)\n \n @deterministic(plot=False, name=\"test\")\n def uncertainty_model(s=switchpoint, n=noise_sigma, e=exp_sigma):\n ''' Concatenate Poisson means '''\n out = np.empty(len(data))\n out[:s] = n\n out[s:] = e\n return out\n \n @deterministic\n def tau(eps=uncertainty_model):\n return np.power(eps, -2)\n \n## @deterministic(plot=False, name=\"test2\")\n## def adjusted_scale(s=switchpoint, s1=exp_scale):\n## out = np.empty(len(data))\n## out[:s] = s1\n## out[s:] = s1\n## return out\n#\n# scale_param = adjusted_scale(switchpoint, exp_scale)\n\n @deterministic(plot=False)\n def baseline_model(s=switchpoint, r=exp_rate, scale=exp_scale):\n out = np.zeros(len(data))\n out[s:] = scale * ( np.exp(r * (timestamp[s:] - s)) - 1.)\n \n# plt.figure(fig.number)\n# plt.clf()\n# plt.plot(out ,color=\"blue\" )\n# plt.plot(data ,color=\"red\" )\n# value = raw_input(' --> Press q to quit, any other key to continue\\n')\n\n return out\n\n baseline_observed = Normal(\"baseline_observed\", mu=baseline_model, tau=tau, value=data, observed= True )\n return locals()",
"def test_raw_predict_numerical_stability(self):\n\n # set seed for reproducability\n np.random.seed(3)\n # Definition of the Branin test function\n def branin(X):\n y = (X[:,1]-5.1/(4*np.pi**2)*X[:,0]**2+5*X[:,0]/np.pi-6)**2\n y += 10*(1-1/(8*np.pi))*np.cos(X[:,0])+10\n return(y)\n # Training set defined as a 5*5 grid:\n xg1 = np.linspace(-5,10,5)\n xg2 = np.linspace(0,15,5)\n X = np.zeros((xg1.size * xg2.size,2))\n for i,x1 in enumerate(xg1):\n for j,x2 in enumerate(xg2):\n X[i+xg1.size*j,:] = [x1,x2]\n Y = branin(X)[:,None]\n # Fit a GP\n # Create an exponentiated quadratic plus bias covariance function\n k = GPy.kern.RBF(input_dim=2, ARD = True)\n # Build a GP model\n m = GPy.models.GPRegression(X,Y,k)\n # fix the noise variance\n m.likelihood.variance.fix(1e-5)\n # Randomize the model and optimize\n m.randomize()\n m.optimize()\n # Compute the mean of model prediction on 1e5 Monte Carlo samples\n Xp = np.random.uniform(size=(int(1e5),2))\n Xp[:,0] = Xp[:,0]*15-5\n Xp[:,1] = Xp[:,1]*15\n _, var = m.predict(Xp)\n self.assertTrue(np.all(var>=0.))",
"def noise(self):\n # Extract parameters\n pzs = self.params[0]\n # retrieve number of galaxies in each bins\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n if isinstance(self.config[\"sigma_e\"], list):\n sigma_e = np.array([s for s in self.config[\"sigma_e\"]])\n else:\n sigma_e = self.config[\"sigma_e\"]\n return sigma_e ** 2 / ngals",
"def test_deterministic(self):\n add_noise = self.variant(exploration.add_ornstein_uhlenbeck_noise)\n # Test that noisy and noisless actions match for zero stddev\n noise_tm1 = np.zeros((self._num_actions,))\n for _ in range(10):\n action = np.random.normal(0., 1., self._num_actions)\n # Test output.\n self._rng_key, key = jax.random.split(self._rng_key)\n noisy_action = add_noise(key, action, noise_tm1, 1., 0.)\n noise_tm1 = action - noisy_action\n np.testing.assert_allclose(action, noisy_action)",
"def test_s_gate_nondeterministic_waltz_basis_gates(self):\n shots = 2000\n circuits = ref_1q_clifford.s_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_1q_clifford.s_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)",
"def test_sdg_gate_deterministic_waltz_basis_gates(self):\n shots = 100\n circuits = ref_1q_clifford.sdg_gate_circuits_deterministic(final_measure=True)\n targets = ref_1q_clifford.sdg_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)",
"def _gauss_noise(self, shape):\n\n n = np.random.normal(0, 1, shape)\n return self.e*n",
"def gen_in_weights(self):\n\n gen = Generator(device = self.device).manual_seed(self.random_seed)\n n, m = self.n_nodes_, self.n_inputs_\n in_w_shape_ = (n, m)\n print('m,n', m,n)\n\n #at the moment all input weight matrices use uniform bias.\n self.bias = rand( n, 1, generator = gen, device = self.device) * 2 - 1\n\n #weights\n if self.input_weight_type_ == \"uniform\":\n self.in_weights = rand((n,m), generator = gen, device = self.device)\n self.in_weights = self.in_weights * 2 - 1\n print('in_weights', self.in_weights.shape)\n\n elif self.input_weight_type_ == \"exponential\":\n printc(\"BUILDING SIGN_\", 'fail')\n sign1 = random_state.choice([-1, 1], size= (in_w_shape_[0], in_w_shape_[1]//2))\n sign2 = random_state.choice([-1, 1], size= (in_w_shape_[0], in_w_shape_[1]//2))\n\n self.sign_dual = (sign1, sign2)\n self.sign = np.concatenate((sign1, sign2), axis = 1)\n\n #regularization\n self.feedback_weights = rand(n, 1, **self.tensorArgs, generator = gen) * 2 - 1\n\n #regularization\n self.noise_z = normal(0, 1, size = (n, m), **self.tensorArgs, generator = gen)",
"def gnoise(mag, sigma, mu):\n noise = np.random.normal(mu,sigma,n)\n mag = mag + noise\n return mag, noise",
"def test_gan():\n nbr_qubits = 5\n\n # Normal law\n # N = 5*10 ** 3\n #\n # Database = np.random.normal(0, 1, N)\n # test_gan_qiskit(nbr_qubits, Database)\n\n # beta\n arr_beta = beta_proba(nbr_qubits, 2, 5)\n\n general_gantest(arr_beta, nbr_qubits)\n\n # uniform not on [0, 32]\n if nbr_qubits == 5:\n arr_unif = [1 / 24] * 24 + 8 * [0]\n general_gantest(arr_unif, nbr_qubits)",
"def noise(self, stddev):\n #add noise to weights\n pass",
"def generator(noise_dim=NOISE_DIM):\n model = nn.Sequential(\n nn.Linear(noise_dim, 1024),\n nn.ReLU(inplace=True),\n nn.Linear(1024, 1024),\n nn.ReLU(inplace=True),\n nn.Linear(1024, 784),#784\n nn.Tanh(),\n )\n return model",
"def test_deterministic(self):\n add_noise = self.variant(exploration.add_gaussian_noise)\n # Test that noisy and noisless actions match for zero stddev\n for _ in range(10):\n action = np.random.normal(0., 1., self._num_actions)\n # Test output.\n self._rng_key, key = jax.random.split(self._rng_key)\n noisy_action = add_noise(key, action, 0.)\n np.testing.assert_allclose(action, noisy_action)",
"def test_s_gate_deterministic_waltz_basis_gates(self):\n shots = 100\n circuits = ref_1q_clifford.s_gate_circuits_deterministic(final_measure=True)\n targets = ref_1q_clifford.s_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)",
"def model_gauss_noise(sigma, nx, ny=1, nz=1):\n\te = EMData()\n\te.set_size(nx, ny, nz)\n\te.process_inplace(\"testimage.noise.gauss\", {\"sigma\":sigma})\n\treturn e",
"def test_noise_equiv_bandwidth():\n win = windows.blackmanharris(2000)\n assert np.isclose(2, 1.0 / utils.noise_equivalent_bandwidth(win), rtol=1e-2)",
"def test_grovers_waltz_basis_gates(self):\n shots = 2000\n circuits = ref_algorithms.grovers_circuit(final_measure=True,\n allow_sampling=True)\n targets = ref_algorithms.grovers_counts(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)",
"def test_x_gate_deterministic_waltz_basis_gates(self):\n shots = 100\n circuits = ref_1q_clifford.x_gate_circuits_deterministic(final_measure=True)\n targets = ref_1q_clifford.x_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)",
"def test_w_and_without():\n A = Node(\"A\", [\"B\"], {\"B\": np.array([[1,0],[1,.1]])})\n B = Node(\"B\", [], {})\n net = CyberNet([A,B])\n T=10\n data = gen_data(T, net, {\"A\": \"normal\", \"B\":\"normal\"})\n logn_fact = gen_logn_fact(data)\n pdata_no_a = prob_model_no_attacker(net, data, T, logn_fact)\n pdata_a = prob_model_given_data_times(net, data, {}, T, logn_fact,\n {\"A\": \"normal\",\n \"B\":\"normal\"})\n np.testing.assert_almost_equal(pdata_no_a, pdata_a)\n\n np.testing.assert_almost_equal(np.log(poisson.pmf(len(data[0]), 10)), pdata_a)",
"def sampling(args):\n z_mean, z_log_sigma = args\n epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean = 0., stddev=0.1)\n return z_mean + K.exp(z_log_sigma) * epsilon",
"def _modified_decoherence_noise_model(\n gates: Sequence[Gate],\n T1: Union[Dict[int, float], float] = 30e-6,\n T2: Union[Dict[int, float], float] = 30e-6,\n gate_time_1q: float = 50e-9,\n gate_time_2q: float = 150e-09,\n ro_fidelity: Union[Dict[int, float], float] = 0.95,\n) -> NoiseModel:\n all_qubits = set(sum(([t.index for t in g.qubits] for g in gates), []))\n if isinstance(T1, dict):\n all_qubits.update(T1.keys())\n if isinstance(T2, dict):\n all_qubits.update(T2.keys())\n if isinstance(ro_fidelity, dict):\n all_qubits.update(ro_fidelity.keys())\n\n if not isinstance(T1, dict):\n T1 = {q: T1 for q in all_qubits}\n\n if not isinstance(T2, dict):\n T2 = {q: T2 for q in all_qubits}\n\n if not isinstance(ro_fidelity, dict):\n ro_fidelity = {q: ro_fidelity for q in all_qubits}\n\n \n kraus_maps = []\n for g in gates:\n targets = tuple(t.index for t in g.qubits)\n key = (g.name, tuple(g.params))\n if g.name in NO_NOISE:\n if not g.dd:\n g.gate_time = gate_time_1q\n continue\n matrix, _ = get_modified_noisy_gate(g.name, g.params)\n\n if len(targets) == 1:\n if g.gate_time == None:\n g.gate_time = gate_time_1q\n noisy_I = damping_after_dephasing(T1.get(targets[0], INFINITY), T2.get(targets[0], INFINITY),\n g.gate_time)\n else:\n if len(targets) != 2:\n raise ValueError(\"Noisy gates on more than 2Q not currently supported\")\n if g.gate_time == None:\n g.gate_time = gate_time_2q\n\n # note this ordering of the tensor factors is necessary due to how the QVM orders\n # the wavefunction basis\n noisy_I = tensor_kraus_maps(damping_after_dephasing(T1.get(targets[1], INFINITY),\n T2.get(targets[1], INFINITY),\n g.gate_time),\n damping_after_dephasing(T1.get(targets[0], INFINITY),\n T2.get(targets[0], INFINITY),\n g.gate_time))\n kraus_maps.append(KrausModel(g.name, tuple(g.params), targets,\n combine_kraus_maps(noisy_I, [matrix]),\n 1.0))\n aprobs = {}\n for q, f_ro in ro_fidelity.items():\n aprobs[q] = np.array([[f_ro, 1. - f_ro],\n [1. - f_ro, f_ro]])\n\n return NoiseModel(kraus_maps, aprobs)"
]
| [
"0.62887645",
"0.6246167",
"0.61668515",
"0.6104143",
"0.61000156",
"0.5940454",
"0.59359336",
"0.5919544",
"0.5901059",
"0.58504856",
"0.58291674",
"0.5822305",
"0.581121",
"0.5807476",
"0.57912207",
"0.5785746",
"0.577846",
"0.5721244",
"0.5677354",
"0.5672393",
"0.5672285",
"0.56699675",
"0.5669244",
"0.5663579",
"0.56553197",
"0.5640729",
"0.5607471",
"0.56048",
"0.5596675",
"0.55894864"
]
| 0.7889592 | 0 |
Test two noise models are Equal | def test_noise_models_equal(self):
roerror = [[0.9, 0.1], [0.5, 0.5]]
error1 = pauli_error([['X', 1]], standard_gates=False)
error2 = pauli_error([['X', 1]], standard_gates=True)
model1 = NoiseModel()
model1.add_all_qubit_quantum_error(error1, ['u3'], False)
model1.add_quantum_error(error1, ['u3'], [2], False)
model1.add_nonlocal_quantum_error(error1, ['cx'], [0, 1], [3], False)
model1.add_all_qubit_readout_error(roerror, False)
model1.add_readout_error(roerror, [0], False)
model2 = NoiseModel()
model2.add_all_qubit_quantum_error(error2, ['u3'], False)
model2.add_quantum_error(error2, ['u3'], [2], False)
model2.add_nonlocal_quantum_error(error2, ['cx'], [0, 1], [3], False)
model2.add_all_qubit_readout_error(roerror, False)
model2.add_readout_error(roerror, [0], False)
self.assertEqual(model1, model2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_noise_models_not_equal(self):\n error = pauli_error([['X', 1]])\n\n model1 = NoiseModel()\n model1.add_all_qubit_quantum_error(error, ['u3'], False)\n\n model2 = NoiseModel(basis_gates=['u3', 'cx'])\n model2.add_all_qubit_quantum_error(error, ['u3'], False)",
"def assert_models_equal(first, second):\n # layer names and settings\n assert first.get_config() == second.get_config()\n # model weights\n assert len(first.get_weights()) == len(second.get_weights())\n for w1, w2 in zip(first.get_weights(), second.get_weights()):\n np.testing.assert_array_equal(w1, w2)\n # optimizer\n assert first.optimizer.get_config() == second.optimizer.get_config()",
"def _compare_models(self, alpha1, alpha2):\n return np.array_equal(alpha1, alpha2)",
"def test_different_seeds_result_in_different_models(dbdiskrepo):\n fit1 = fit_model(seed=0)\n fit2 = fit_model(seed=1)\n\n assert p.hash(fit1) != p.hash(fit2)\n assert fit1.artifact.id != fit2.artifact.id\n assert fit1.artifact.value_id != fit2.artifact.value_id",
"def compare_models(model1,model2):\n\n # initialisation:\n n_radial = 0\n n_radial_numax = 0\n n_non_radial = 0\n n_non_radial_numax = 0\n result = np.zeros((6+nglb,),dtype=gtype)\n # define frequency interval around numax:\n numax = 0.5*(model1.numax/model1.glb[ifreq_ref] \\\n + model2.numax/model2.glb[ifreq_ref])\n a = 0.8*numax\n b = 1.2*numax\n\n # compare frequency spectra:\n size1 = len(model1.modes)\n size2 = len(model2.modes)\n i1 = i2 = 0\n while((i1 < size1) and (i2 < size2)):\n if (model1.modes['l'][i1] < model2.modes['l'][i2]): i1+=1; continue\n if (model1.modes['l'][i1] > model2.modes['l'][i2]): i2+=1; continue\n if (model1.modes['n'][i1] < model2.modes['n'][i2]): i1+=1; continue\n if (model1.modes['n'][i1] > model2.modes['n'][i2]): i2+=1; continue\n\n # now the two modes have the same n and l values:\n diff = abs(model1.modes['freq'][i1] - model2.modes['freq'][i2])\n avg_freq =(model1.modes['freq'][i1] + model2.modes['freq'][i2])/2.0\n if (model1.modes['l'][i1] == 0):\n if (result[0] < diff): result[0] = diff\n diff *= diff # square diff\n result[1] += diff\n n_radial += 1\n # in python, this is called an interval comparison:\n if (a <= avg_freq <= b):\n result[2] += diff\n n_radial_numax += 1\n else:\n if (result[3] < diff): result[3] = diff\n diff *= diff # square diff\n result[4] += diff\n n_non_radial += 1\n if (a <= avg_freq <= b):\n result[5] += diff\n n_non_radial_numax += 1\n i1+=1\n i2+=1\n\n # avoid divisions by zero:\n if (n_radial > 0):\n result[1] = math.sqrt(result[1]/float(n_radial))\n else:\n result[1] = np.nan\n\n if (n_radial_numax > 0):\n result[2] = math.sqrt(result[2]/float(n_radial_numax))\n else:\n result[2] = np.nan\n\n if (n_non_radial > 0):\n result[4] = math.sqrt(result[4]/float(n_non_radial))\n else:\n result[4] = np.nan\n\n if (n_non_radial_numax > 0):\n result[5] = math.sqrt(result[5]/float(n_non_radial_numax))\n else:\n result[5] = np.nan\n\n # absolute differences on global parameters:\n result[6:6+nglb] = np.absolute(model1.glb - model2.glb)\n\n return result",
"def assert_wrappers_equal(first, second):\n assert first.sk_params == second.sk_params\n assert first.history_ == second.history_\n if not first.model_ or not second.model_:\n assert first.model_ == second.model_\n else:\n assert_models_equal(first.model, second.model)",
"def test_equal_models_opt():\n dmd = DMD(svd_rank=2, opt=True)\n dmd.fit(X=sample_data)\n\n dmd_xy = DMD(svd_rank=2, opt=True)\n dmd_xy.fit(X=sample_data_1, Y=sample_data_2)\n\n assert_equal_models(dmd_xy, dmd, rtol=0.05)",
"def test_equal_models_exact():\n dmd = DMD(svd_rank=2, exact=True)\n dmd.fit(X=sample_data)\n\n dmd_xy = DMD(svd_rank=2, exact=True)\n dmd_xy.fit(X=sample_data_1, Y=sample_data_2)\n\n assert_equal_models(dmd_xy, dmd)",
"def models_are_equivalent(model_a: TopLevelOscalModel, model_b: TopLevelOscalModel) -> bool:\n # this will change the second model as a side-effect\n model_b.metadata.last_modified = model_a.metadata.last_modified\n return model_a == model_b",
"def test_asymmetric_noise_signal(self):\n np.random.seed(0)\n test_ts = self.create_ts(length=100 * 24, freq=\"1h\", signal_to_noise_ratio=0)\n ts1 = self.create_ts(length=100 * 24, freq=\"1h\", signal_to_noise_ratio=0)\n ts2 = self.create_ts(length=100 * 24, freq=\"1h\", signal_to_noise_ratio=0)\n\n noise = (np.random.rand(100 * 24) - 0.5) * (np.random.rand(100 * 24) > 2 / 3)\n noise *= noise > 0\n\n # add strictly positive noise to ts1 and strictly negative noise to ts2\n ts1.value += abs(ts1.value * noise)\n ts2.value -= abs(ts2.value * noise)\n\n ts1.value[93 * 24] += 20\n ts1.value[96 * 24] -= 20\n ts2.value[93 * 24] += 20\n ts2.value[96 * 24] -= 20\n\n model = ProphetDetectorModel(score_func=\"z_score\")\n response1 = model.fit_predict(test_ts[90 * 24 :], ts1[: 90 * 24])\n response2 = model.fit_predict(test_ts[90 * 24 :], ts2[: 90 * 24])\n\n self.assertGreater(\n response2.scores.value[3 * 24], response1.scores.value[3 * 24]\n )\n self.assertGreater(\n response2.scores.value[6 * 24], response1.scores.value[6 * 24]\n )",
"def test_same_models(self):\n\t\t\n\t\t# TODO: finish\n\t\tpass",
"def test_same_models_are_equal(dbdiskrepo):\n fit1 = fit_model()\n fit2 = fit_model()\n assert fit1.artifact.id == fit2.artifact.id\n assert fit1.artifact.value_id == fit2.artifact.value_id\n assert hash(fit1) == hash(fit2)",
"def test_reproducible(self):\n model_1 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)\n model_1.train(epochs=2)\n\n model_2 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)\n model_2.train(epochs=2)\n self.assertTrue(np.allclose(model_1.kv.syn0, model_2.kv.syn0))",
"def test_equal_models_opt_exact():\n dmd = DMD(svd_rank=2, opt=True, exact=True)\n dmd.fit(X=sample_data)\n\n dmd_xy = DMD(svd_rank=2, opt=True, exact=True)\n dmd_xy.fit(X=sample_data_1, Y=sample_data_2)\n\n assert_equal_models(dmd_xy, dmd)",
"def test_equal_models_default():\n dmd = DMD(svd_rank=2)\n dmd.fit(X=sample_data)\n\n dmd_xy = DMD(svd_rank=2)\n dmd_xy.fit(X=sample_data_1, Y=sample_data_2)\n\n assert_equal_models(dmd_xy, dmd)",
"def test_same_seeds_result_in_same_models(dbdiskrepo):\n fit1 = fit_model(seed=0)\n fit2 = fit_model(seed=0)\n\n assert p.hash(fit1) == p.hash(fit2)\n assert fit1.artifact.id == fit2.artifact.id\n assert fit1.artifact.value_id == fit2.artifact.value_id",
"def test_equal_basic(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"equal\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::eq\"},\n )",
"def assert_predictions_equal(first, second, x):\n preds1 = first.predict(x, batch_size=batch_size)\n preds2 = second.predict(x, batch_size=batch_size)\n np.testing.assert_array_equal(preds1, preds2)",
"def compare_models(model_1: torch.nn.Module, model_2: torch.nn.Module, is_equal: bool = True):\n with contextlib.nullcontext() if is_equal else pytest.raises(Exception):\n # Compare model module attributes since algorithms like StochasticDepth monkeypatch\n # on new attributes. We only check this on ComposerClassifier models that have .module\n if isinstance(model_1, ComposerClassifier) and isinstance(model_2, ComposerClassifier):\n model_1_modules = list(model_1.module.modules())\n model_2_modules = list(model_2.module.modules())\n assert len(model_1_modules) == len(model_2_modules)\n for module_1, module_2 in zip(model_1_modules, model_2_modules):\n assert sorted(list(module_1.__dict__.keys())) == sorted(list(module_2.__dict__.keys()))\n # Compare model parameters\n for (name0, tensor0), (name1, tensor1) in zip(model_1.state_dict().items(), model_2.state_dict().items()):\n assert name0 == name1\n assert torch.equal(tensor0, tensor1)",
"def test_equal12():\n x = np.array([[True, False, True], [True, False, True], [True, False, True]])\n y = np.array([[True, False, True], [False, False, False], [True, True, False]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def test_same_weights(): # pylint : disable=too-many-locals\n # make models to test\n model1, X, _, _, _ = make_small_model(num_hidden_layers=1)\n model2, _, _, _, _ = make_small_model(num_hidden_layers=2)\n input_data = Input(shape=X[0].shape)\n initializer = tf.keras.initializers.Zeros()\n xx = Dense(64, activation=\"relu\", kernel_initializer=initializer)(input_data)\n output = Dense(n_classes, activation=\"softmax\", kernel_initializer=initializer)(xx)\n model1a = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n\n # same\n same1, _ = safekeras.same_weights(model1, model1)\n assert same1 is True\n\n # different num layers\n same2, _ = safekeras.same_weights(model1, model2)\n assert same2 is False\n\n # different sized layers\n same3, _ = safekeras.same_weights(model1, model1a)\n errstr = (\n \"model1 hidden layer has \"\n f\" {len(model1.layers[1].get_weights()[0][0])} units\"\n f\" but model2 has {len(model1a.layers[1].get_weights()[0][0])}.\\n\"\n )\n assert same3 is False, errstr",
"def test_equal6():\n x = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def test_noise_equiv_bandwidth():\n win = windows.blackmanharris(2000)\n assert np.isclose(2, 1.0 / utils.noise_equivalent_bandwidth(win), rtol=1e-2)",
"def test_multiple_rng(self):\r\n rng1 = RandomStreams(1234)\r\n rng2 = RandomStreams(2392)\r\n assert rng1.random_state_variables is not rng2.random_state_variables",
"def test_model_equality(self):\r\n class EqualityModel0(Model):\r\n pk = columns.Integer(primary_key=True)\r\n\r\n class EqualityModel1(Model):\r\n kk = columns.Integer(primary_key=True)\r\n\r\n m0 = EqualityModel0(pk=0)\r\n m1 = EqualityModel1(kk=1)\r\n\r\n self.assertEqual(m0, m0)\r\n self.assertNotEqual(m0, m1)",
"def assert_models_equal(self, benchmark1, benchmark2):\n if (not isinstance(benchmark1, detection_comp.FeatureDetectionComparison) or\n not isinstance(benchmark2, detection_comp.FeatureDetectionComparison)):\n self.fail('object was not a FeatureDetectionComparison')\n self.assertEqual(benchmark1.identifier, benchmark2.identifier)\n self.assertEqual(benchmark1._acceptable_radius, benchmark2._acceptable_radius)",
"def test_model_equality(self):\n class EqualityModel0(Model):\n pk = columns.Integer(primary_key=True)\n\n class EqualityModel1(Model):\n kk = columns.Integer(primary_key=True)\n\n m0 = EqualityModel0(pk=0)\n m1 = EqualityModel1(kk=1)\n\n self.assertEqual(m0, m0)\n self.assertNotEqual(m0, m1)",
"def test_equal7():\n x = randtool(\"float\", -10, 10, [3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)",
"def compareAB(model1_name, model2_name, X_test_B, X_test_S, analysis_dir=\"Analysis/\"):\n #Load best weights\n model = tf.keras.models.load_model(\"Models/\"+model1_name)\n bkg_preds1 = model.predict(X_test_B).flatten()\n sig_preds1 = model.predict(X_test_S).flatten()\n\n model = tf.keras.models.load_model(\"Models/\"+model2_name)\n bkg_preds2 = model.predict(X_test_B).flatten()\n sig_preds2 = model.predict(X_test_S).flatten()\n\n sig_eff = []\n bkg_eff = []\n sig_eff_50 = 1.0\n bkg_eff_50 = 1.0\n for thresh in (1-np.arange(0.00005, 0.8, 0.01)):\n bkg_eff_temp = np.sum(bkg_preds1 > thresh)/len(bkg_preds1)\n sig_eff_temp = np.sum(sig_preds1 > thresh)/len(sig_preds1)\n sig_eff.append(sig_eff_temp)\n bkg_eff.append(1/bkg_eff_temp)\n if abs(sig_eff_temp-0.5) < abs(sig_eff_50-0.5):\n sig_eff_50 = sig_eff_temp\n bkg_eff_50 = 1/bkg_eff_temp\n plt.semilogy(sig_eff, bkg_eff)\n plt.annotate(model1_name + ' Background rejection @0.5 Signal efficiency = {:.2e}'.format(bkg_eff_50), xy=(0.05, 0.95), xycoords='axes fraction')\n print(sig_eff_50)\n\n sig_eff = []\n bkg_eff = []\n sig_eff_50 = 1.0\n bkg_eff_50 = 1.0\n for thresh in (1-np.arange(0.00005, 0.8, 0.01)):\n bkg_eff_temp = np.sum(bkg_preds2 > thresh)/len(bkg_preds2)\n sig_eff_temp = np.sum(sig_preds2 > thresh)/len(sig_preds2)\n sig_eff.append(sig_eff_temp)\n bkg_eff.append(1/bkg_eff_temp)\n if abs(sig_eff_temp-0.5) < abs(sig_eff_50-0.5):\n sig_eff_50 = sig_eff_temp\n bkg_eff_50 = 1/bkg_eff_temp\n plt.semilogy(sig_eff, bkg_eff)\n plt.annotate(model2_name + ' Background rejection @0.5 Signal efficiency = {:.3e}'.format(bkg_eff_50), xy=(0.05, 0.88), xycoords='axes fraction')\n print(sig_eff_50)\n\n plt.legend([model1_name, model2_name])\n plt.xlabel(\"Signal efficiency\")\n plt.ylabel(\"Background rejection\")\n plt.gcf().set_size_inches(8.3, 5.85)\n plt.savefig(analysis_dir+\"ROC\" + model1_name + \"VS\" + model2_name + \".pdf\", format=\"pdf\")\n plt.show()",
"def test_equals_with_different_sources(self):\n measurement_1 = Measurement(self.metric(), sources=[{\"source_uuid\": SOURCE_ID}])\n measurement_2 = Measurement(self.metric())\n self.assertFalse(measurement_1.equals(measurement_2))"
]
| [
"0.7456823",
"0.706848",
"0.68467826",
"0.6754829",
"0.6733205",
"0.6702121",
"0.66993004",
"0.6589811",
"0.656786",
"0.6565996",
"0.6552568",
"0.6550797",
"0.6517824",
"0.65070695",
"0.64580256",
"0.64347464",
"0.6351226",
"0.63377047",
"0.6276327",
"0.624758",
"0.6246909",
"0.62323713",
"0.6189924",
"0.6150565",
"0.61242664",
"0.611674",
"0.6113169",
"0.61022025",
"0.6062837",
"0.60533345"
]
| 0.80302626 | 0 |
Test two noise models are not equal | def test_noise_models_not_equal(self):
error = pauli_error([['X', 1]])
model1 = NoiseModel()
model1.add_all_qubit_quantum_error(error, ['u3'], False)
model2 = NoiseModel(basis_gates=['u3', 'cx'])
model2.add_all_qubit_quantum_error(error, ['u3'], False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_noise_models_equal(self):\n roerror = [[0.9, 0.1], [0.5, 0.5]]\n error1 = pauli_error([['X', 1]], standard_gates=False)\n error2 = pauli_error([['X', 1]], standard_gates=True)\n\n model1 = NoiseModel()\n model1.add_all_qubit_quantum_error(error1, ['u3'], False)\n model1.add_quantum_error(error1, ['u3'], [2], False)\n model1.add_nonlocal_quantum_error(error1, ['cx'], [0, 1], [3], False)\n model1.add_all_qubit_readout_error(roerror, False)\n model1.add_readout_error(roerror, [0], False)\n\n model2 = NoiseModel()\n model2.add_all_qubit_quantum_error(error2, ['u3'], False)\n model2.add_quantum_error(error2, ['u3'], [2], False)\n model2.add_nonlocal_quantum_error(error2, ['cx'], [0, 1], [3], False)\n model2.add_all_qubit_readout_error(roerror, False)\n model2.add_readout_error(roerror, [0], False)\n self.assertEqual(model1, model2)",
"def test_not_equal(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"notEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::ne\"},\n )",
"def test_asymmetric_noise_signal(self):\n np.random.seed(0)\n test_ts = self.create_ts(length=100 * 24, freq=\"1h\", signal_to_noise_ratio=0)\n ts1 = self.create_ts(length=100 * 24, freq=\"1h\", signal_to_noise_ratio=0)\n ts2 = self.create_ts(length=100 * 24, freq=\"1h\", signal_to_noise_ratio=0)\n\n noise = (np.random.rand(100 * 24) - 0.5) * (np.random.rand(100 * 24) > 2 / 3)\n noise *= noise > 0\n\n # add strictly positive noise to ts1 and strictly negative noise to ts2\n ts1.value += abs(ts1.value * noise)\n ts2.value -= abs(ts2.value * noise)\n\n ts1.value[93 * 24] += 20\n ts1.value[96 * 24] -= 20\n ts2.value[93 * 24] += 20\n ts2.value[96 * 24] -= 20\n\n model = ProphetDetectorModel(score_func=\"z_score\")\n response1 = model.fit_predict(test_ts[90 * 24 :], ts1[: 90 * 24])\n response2 = model.fit_predict(test_ts[90 * 24 :], ts2[: 90 * 24])\n\n self.assertGreater(\n response2.scores.value[3 * 24], response1.scores.value[3 * 24]\n )\n self.assertGreater(\n response2.scores.value[6 * 24], response1.scores.value[6 * 24]\n )",
"def assert_models_equal(first, second):\n # layer names and settings\n assert first.get_config() == second.get_config()\n # model weights\n assert len(first.get_weights()) == len(second.get_weights())\n for w1, w2 in zip(first.get_weights(), second.get_weights()):\n np.testing.assert_array_equal(w1, w2)\n # optimizer\n assert first.optimizer.get_config() == second.optimizer.get_config()",
"def test_different_seeds_result_in_different_models(dbdiskrepo):\n fit1 = fit_model(seed=0)\n fit2 = fit_model(seed=1)\n\n assert p.hash(fit1) != p.hash(fit2)\n assert fit1.artifact.id != fit2.artifact.id\n assert fit1.artifact.value_id != fit2.artifact.value_id",
"def test_equal_models_exact():\n dmd = DMD(svd_rank=2, exact=True)\n dmd.fit(X=sample_data)\n\n dmd_xy = DMD(svd_rank=2, exact=True)\n dmd_xy.fit(X=sample_data_1, Y=sample_data_2)\n\n assert_equal_models(dmd_xy, dmd)",
"def test_noise_equiv_bandwidth():\n win = windows.blackmanharris(2000)\n assert np.isclose(2, 1.0 / utils.noise_equivalent_bandwidth(win), rtol=1e-2)",
"def test_equal_models_opt():\n dmd = DMD(svd_rank=2, opt=True)\n dmd.fit(X=sample_data)\n\n dmd_xy = DMD(svd_rank=2, opt=True)\n dmd_xy.fit(X=sample_data_1, Y=sample_data_2)\n\n assert_equal_models(dmd_xy, dmd, rtol=0.05)",
"def test_equal_models_opt_exact():\n dmd = DMD(svd_rank=2, opt=True, exact=True)\n dmd.fit(X=sample_data)\n\n dmd_xy = DMD(svd_rank=2, opt=True, exact=True)\n dmd_xy.fit(X=sample_data_1, Y=sample_data_2)\n\n assert_equal_models(dmd_xy, dmd)",
"def test_equal_models_default():\n dmd = DMD(svd_rank=2)\n dmd.fit(X=sample_data)\n\n dmd_xy = DMD(svd_rank=2)\n dmd_xy.fit(X=sample_data_1, Y=sample_data_2)\n\n assert_equal_models(dmd_xy, dmd)",
"def test_reproducible(self):\n model_1 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)\n model_1.train(epochs=2)\n\n model_2 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)\n model_2.train(epochs=2)\n self.assertTrue(np.allclose(model_1.kv.syn0, model_2.kv.syn0))",
"def test_w_and_without():\n A = Node(\"A\", [\"B\"], {\"B\": np.array([[1,0],[1,.1]])})\n B = Node(\"B\", [], {})\n net = CyberNet([A,B])\n T=10\n data = gen_data(T, net, {\"A\": \"normal\", \"B\":\"normal\"})\n logn_fact = gen_logn_fact(data)\n pdata_no_a = prob_model_no_attacker(net, data, T, logn_fact)\n pdata_a = prob_model_given_data_times(net, data, {}, T, logn_fact,\n {\"A\": \"normal\",\n \"B\":\"normal\"})\n np.testing.assert_almost_equal(pdata_no_a, pdata_a)\n\n np.testing.assert_almost_equal(np.log(poisson.pmf(len(data[0]), 10)), pdata_a)",
"def _compare_models(self, alpha1, alpha2):\n return np.array_equal(alpha1, alpha2)",
"def assert_wrappers_equal(first, second):\n assert first.sk_params == second.sk_params\n assert first.history_ == second.history_\n if not first.model_ or not second.model_:\n assert first.model_ == second.model_\n else:\n assert_models_equal(first.model, second.model)",
"def compare_models(model1,model2):\n\n # initialisation:\n n_radial = 0\n n_radial_numax = 0\n n_non_radial = 0\n n_non_radial_numax = 0\n result = np.zeros((6+nglb,),dtype=gtype)\n # define frequency interval around numax:\n numax = 0.5*(model1.numax/model1.glb[ifreq_ref] \\\n + model2.numax/model2.glb[ifreq_ref])\n a = 0.8*numax\n b = 1.2*numax\n\n # compare frequency spectra:\n size1 = len(model1.modes)\n size2 = len(model2.modes)\n i1 = i2 = 0\n while((i1 < size1) and (i2 < size2)):\n if (model1.modes['l'][i1] < model2.modes['l'][i2]): i1+=1; continue\n if (model1.modes['l'][i1] > model2.modes['l'][i2]): i2+=1; continue\n if (model1.modes['n'][i1] < model2.modes['n'][i2]): i1+=1; continue\n if (model1.modes['n'][i1] > model2.modes['n'][i2]): i2+=1; continue\n\n # now the two modes have the same n and l values:\n diff = abs(model1.modes['freq'][i1] - model2.modes['freq'][i2])\n avg_freq =(model1.modes['freq'][i1] + model2.modes['freq'][i2])/2.0\n if (model1.modes['l'][i1] == 0):\n if (result[0] < diff): result[0] = diff\n diff *= diff # square diff\n result[1] += diff\n n_radial += 1\n # in python, this is called an interval comparison:\n if (a <= avg_freq <= b):\n result[2] += diff\n n_radial_numax += 1\n else:\n if (result[3] < diff): result[3] = diff\n diff *= diff # square diff\n result[4] += diff\n n_non_radial += 1\n if (a <= avg_freq <= b):\n result[5] += diff\n n_non_radial_numax += 1\n i1+=1\n i2+=1\n\n # avoid divisions by zero:\n if (n_radial > 0):\n result[1] = math.sqrt(result[1]/float(n_radial))\n else:\n result[1] = np.nan\n\n if (n_radial_numax > 0):\n result[2] = math.sqrt(result[2]/float(n_radial_numax))\n else:\n result[2] = np.nan\n\n if (n_non_radial > 0):\n result[4] = math.sqrt(result[4]/float(n_non_radial))\n else:\n result[4] = np.nan\n\n if (n_non_radial_numax > 0):\n result[5] = math.sqrt(result[5]/float(n_non_radial_numax))\n else:\n result[5] = np.nan\n\n # absolute differences on global parameters:\n result[6:6+nglb] = np.absolute(model1.glb - model2.glb)\n\n return result",
"def test_no_jitter(self):\n n = 10\n t_max = 25\n dt = 0.1\n G = RateHVCLayer(n)\n G.burst_noise = 0.0\n\n M1 = simulation.StateMonitor(G, 'out')\n\n sim1 = simulation.Simulation(G, M1, dt=dt)\n sim1.run(t_max)\n \n M2 = simulation.StateMonitor(G, 'out')\n sim2 = simulation.Simulation(G, M2, dt=dt)\n sim2.run(t_max)\n\n self.assertTrue(np.allclose(M1.out, M2.out))",
"def test_not_equal_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"notEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::ne\"},\n )",
"def test_decaydata___ne__(self):\n\n data1 = decaydata.DecayData(\"icrp107\")\n data2 = decaydata.DecayData(\"icrp107\")\n data2.dataset = \"icrp07\"\n self.assertNotEqual(data1, data2)",
"def test_multiple_rng(self):\r\n rng1 = RandomStreams(1234)\r\n rng2 = RandomStreams(2392)\r\n assert rng1.random_state_variables is not rng2.random_state_variables",
"def test_modeling__mapped_noise_from_model_works(y_data, train_Yvar):\n\n # define temporary class\n class Tmp:\n def __init__(self, y_data, train_Yvar):\n self.y_data = y_data\n self.train_Yvar = train_Yvar\n self.train_Y = torch.from_numpy(y_data.values)\n\n self.dtype = torch.double\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n from greattunes._modeling import _mapped_noise_from_model\n\n cls = Tmp(y_data=y_data, train_Yvar=train_Yvar)\n\n # run the method\n train_Yvar_mapped = cls._mapped_noise_from_model()\n\n # assert size\n assert y_data.shape[0] == train_Yvar_mapped.size()[0]\n\n # assert output type\n assert isinstance(train_Yvar_mapped, torch.DoubleTensor)\n\n # special case for cases which are not functions, to assert level of noise\n if not isinstance(cls.train_Yvar, types.FunctionType):\n if isinstance(cls.train_Yvar, torch.DoubleTensor):\n if len(list(cls.train_Yvar.size())) == 0:\n assert train_Yvar_mapped[0,0].item() == train_Yvar.item()\n elif len(list(cls.train_Yvar.size())) == 1:\n assert train_Yvar_mapped[0,0].item() == train_Yvar[0].item()\n elif isinstance(cls.train_Yvar, float) or isinstance(cls.train_Yvar, int):\n assert train_Yvar_mapped[0,0].item() == float(train_Yvar)",
"def test_mc_t_two_sample_no_mc(self):\r\n x = array([1, 1, 1])\r\n y = array([0, 0, 0])\r\n self.assertEqual(mc_t_two_sample(x, x), (nan, nan, [], nan))",
"def test_none(self):\n esnA = ESN(N_in,N_out,random_state=None)\n esnB = ESN(N_in,N_out,random_state=None)\n self._compare(esnA,esnB,should_be=\"different\")",
"def test_same_models(self):\n\t\t\n\t\t# TODO: finish\n\t\tpass",
"def models_are_equivalent(model_a: TopLevelOscalModel, model_b: TopLevelOscalModel) -> bool:\n # this will change the second model as a side-effect\n model_b.metadata.last_modified = model_a.metadata.last_modified\n return model_a == model_b",
"def test_not_eq(self):\n st_1 = State(substance=\"water\", T=Q_(400.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n st_2 = State(substance=\"water\", T=Q_(300.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n assert not st_1 == st_2",
"def test_same_weights(): # pylint : disable=too-many-locals\n # make models to test\n model1, X, _, _, _ = make_small_model(num_hidden_layers=1)\n model2, _, _, _, _ = make_small_model(num_hidden_layers=2)\n input_data = Input(shape=X[0].shape)\n initializer = tf.keras.initializers.Zeros()\n xx = Dense(64, activation=\"relu\", kernel_initializer=initializer)(input_data)\n output = Dense(n_classes, activation=\"softmax\", kernel_initializer=initializer)(xx)\n model1a = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n\n # same\n same1, _ = safekeras.same_weights(model1, model1)\n assert same1 is True\n\n # different num layers\n same2, _ = safekeras.same_weights(model1, model2)\n assert same2 is False\n\n # different sized layers\n same3, _ = safekeras.same_weights(model1, model1a)\n errstr = (\n \"model1 hidden layer has \"\n f\" {len(model1.layers[1].get_weights()[0][0])} units\"\n f\" but model2 has {len(model1a.layers[1].get_weights()[0][0])}.\\n\"\n )\n assert same3 is False, errstr",
"def test_modeling__mapped_noise_from_model_fails(y_data, train_Yvar):\n\n # define temporary class\n class Tmp:\n def __init__(self, y_data, train_Yvar):\n self.y_data = y_data\n self.train_Yvar = train_Yvar\n self.train_Y = torch.from_numpy(y_data.values)\n\n self.dtype = torch.double\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n self.model = {\"model_type\": \"SingleTaskGP\"}\n\n from greattunes._modeling import _mapped_noise_from_model\n\n cls = Tmp(y_data=y_data, train_Yvar=train_Yvar)\n\n # run the method\n if isinstance(cls.train_Yvar, torch.DoubleTensor): # special case if tensor is provided\n with pytest.raises(Exception) as e:\n train_Yvar_mapped = cls._mapped_noise_from_model()\n assert str(e.value) == \"greattunes.greattunes._observe._mapped_noise_from_model: tensor provided for 'train_Yvar' has unacceptable dimensions. Only tensors of dimension 0 or 1 are accepted, provided tensor has dimension \" + str(len(list(train_Yvar.size()))) + \".\"\n else:\n with pytest.raises(Exception) as e:\n train_Yvar_mapped = cls._mapped_noise_from_model()\n assert str(e.value) == \"greattunes.greattunes._observe._mapped_noise_from_model: provided object for 'train_Yvar' is not acceptable. It must be either (i) a tensor of dimension 0 or 1, (ii) a float or int, or (iii) a function which operates on self.y_data. Provided object is of type \" + str(type(train_Yvar)) + \".\"",
"def test_ne():\n # Define some universal gsps\n gsp = galsim.GSParams(maxk_threshold=1.1e-3, folding_threshold=5.1e-3)\n\n # Pixel. Params include scale, flux, gsparams.\n # gsparams.\n # The following should all test unequal:\n gals = [galsim.Pixel(scale=1.0),\n galsim.Pixel(scale=1.1),\n galsim.Pixel(scale=1.0, flux=1.1),\n galsim.Pixel(scale=1.0, gsparams=gsp)]\n all_obj_diff(gals)\n\n # Box. Params include width, height, flux, gsparams.\n # gsparams.\n # The following should all test unequal:\n gals = [galsim.Box(width=1.0, height=1.0),\n galsim.Box(width=1.1, height=1.0),\n galsim.Box(width=1.0, height=1.1),\n galsim.Box(width=1.0, height=1.0, flux=1.1),\n galsim.Box(width=1.0, height=1.0, gsparams=gsp)]\n all_obj_diff(gals)\n\n # TopHat. Params include radius, flux, gsparams.\n # gsparams.\n # The following should all test unequal:\n gals = [galsim.TopHat(radius=1.0),\n galsim.TopHat(radius=1.1),\n galsim.TopHat(radius=1.0, flux=1.1),\n galsim.TopHat(radius=1.0, gsparams=gsp)]\n all_obj_diff(gals)",
"def testWithoutNoise(self):\n self.checkMatching(self.references)",
"def test_and_dlrne_fails_on_same_dl():\n secrets, secret_values, secret_dict = get_secrets(4)\n generators = make_generators(4)\n\n lhs_values = [x * g for x, g in zip(secret_values, generators)]\n y3 = secret_values[1] * generators[3]\n p1 = DLNotEqual(\n [lhs_values[0], generators[0]],\n [lhs_values[1], generators[1]],\n secrets[0],\n bind=True,\n )\n p2 = DLNotEqual([lhs_values[1], generators[1]], [y3, generators[3]], secrets[1])\n\n andp = p1 & p2\n p1_prime = DLNotEqual(\n [lhs_values[0], generators[0]],\n [lhs_values[1], generators[1]],\n secrets[0],\n bind=True,\n )\n\n p2_prime = DLNotEqual(\n [lhs_values[1], generators[1]], [y3, generators[3]], secrets[1]\n )\n\n andp_prime = p1_prime & p2_prime\n protocol = SigmaProtocol(andp_prime.get_verifier(), andp.get_prover(secret_dict))\n with pytest.raises(ValidationError):\n protocol.verify()"
]
| [
"0.820037",
"0.69289684",
"0.6738332",
"0.66467637",
"0.6599596",
"0.65457606",
"0.65447915",
"0.6543903",
"0.6452476",
"0.64383215",
"0.64159083",
"0.638408",
"0.6375067",
"0.6360007",
"0.63317573",
"0.62647396",
"0.626305",
"0.62161314",
"0.6211834",
"0.6180281",
"0.615478",
"0.61192214",
"0.6109684",
"0.61011535",
"0.6097588",
"0.60975254",
"0.60831124",
"0.60677516",
"0.60663706",
"0.60444707"
]
| 0.8125664 | 1 |
Creates one hot vector of n classes with "index" class | def oneHot(index, n):
x = np.zeros(n)
x[index] = 1
return x | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def onehot(index):\n classNum=2#1\n onehot = np.zeros(classNum)#这代表种类类型\n onehot[index] = 1.0\n return onehot",
"def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]",
"def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]",
"def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]",
"def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]",
"def indices_to_one_hot(cls_indeces, nb_classes):\n targets = np.array(cls_indeces).reshape(-1)\n return np.eye(nb_classes)[targets]",
"def one_hot(index):\n\toutput = np.zeros(100)\n\toutput[index] = 1\n\treturn output",
"def indices_to_one_hot(data, nb_classes):\n\ttargets = np.array(data).reshape(-1)\n\treturn np.eye(nb_classes)[targets]",
"def one_hot_encode(x, n_classes):\n return np.eye(n_classes)[x]",
"def as_one_hot(ind, n):\n vec = np.zeros(n)\n vec[ind] = 1\n return vec",
"def to_one_hot(class_indices, num_classes):\n one_hot_vectors = np.zeros((len(class_indices), num_classes))\n for vector_arg, class_args in enumerate(class_indices):\n one_hot_vectors[vector_arg, class_args] = 1.0\n return one_hot_vectors",
"def to_onehot(x, num_classes):\n return np.eye(num_classes, dtype='float32')[x]",
"def one_hot(index, dims, dtype=np.uint8):\n\n seq_len = len(index)\n ret = np.zeros((seq_len, dims), dtype)\n for i in range(seq_len):\n ret[i][index[i]]=1\n\n return ret",
"def to_onehot(vec, num_classes, fill=1000):\n onehot_result = vec.new(vec.size(0), num_classes).float().fill_(-fill)\n arange_inds = vec.new(vec.size(0)).long()\n torch.arange(0, vec.size(0), out=arange_inds)\n onehot_result.view(-1)[vec + num_classes * arange_inds] = fill\n return onehot_result",
"def one_hot(x, num_classes, dtype=jnp.float32):\n return jax.nn.one_hot(x, num_classes).astype(dtype)",
"def onehot(t, num_classes):\n assert isinstance(t, torch.LongTensor)\n return torch.zeros(t.size()[0], num_classes).scatter_(1, t.view(-1, 1), 1)",
"def one_hot(y, num_classes):\n return np.eye(num_classes)[y]",
"def to_one_hot(arr, num_classes):\n arr = arr.data.astype(int)\n a = np.zeros((arr.shape[0], num_classes))\n a[np.arange(len(a)), arr] = 1\n return tensor.Tensor(a, requires_grad=True)",
"def to_one_hot(arr, num_classes):\n arr = arr.data.astype(int)\n a = np.zeros((arr.shape[0], num_classes))\n a[np.arange(len(a)), arr] = 1\n return tensor.Tensor(a, requires_grad=True)",
"def indices_one_hot(labels_indices, num_classes=10):\n\n num_labels = labels_indices.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_indices.ravel()] = 1\n\n return labels_one_hot",
"def convert_to_onehot(vector, num_classes=None):\n assert isinstance(vector, np.ndarray)\n assert len(vector) > 0\n\n if num_classes is None:\n num_classes = np.max(vector)+1\n else:\n assert num_classes > 0\n assert num_classes >= np.max(vector)\n\n result = np.zeros(shape=(len(vector), num_classes))\n result[np.arange(len(vector)), vector] = 1\n return result.astype(int)",
"def _onehot(y, n_classes=False):\n if not n_classes:\n \"\"\"Create one-hot encoded labels.\"\"\"\n n_classes = len(set(y))\n out = np.zeros((len(y), n_classes))\n for i, ii in enumerate(y):\n out[i][ii] += 1\n y_onehot = out.astype(int)\n return y_onehot",
"def one_hot(class_ids, num_classes):\n oh = np.zeros((len(class_ids), num_classes), dtype=np.float32)\n oh[np.arange(len(class_ids)), class_ids] = 1\n\n assert (oh.argmax(axis=1) == class_ids).all()\n assert (oh.sum(axis=1) == 1).all()\n\n return oh",
"def to_one_hot(v):\n n = len(v)\n m = max(v) + 1\n out = np.zeros((n, m))\n out[np.arange(n), v] = 1\n return out",
"def to_categorical(index_label, num_classes):\n return index_label, np.eye(num_classes, dtype='uint8')[index_label]",
"def indices_one_hot(labels_indices, num_classes=10):\n \n num_labels = labels_indices.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_indices.ravel()] = 1\n \n return labels_one_hot",
"def To1hot(label,num_class):\n onehot = np.zeros(num_class)\n onehot[label] = 1\n return onehot",
"def one_hot(index, dims, dtype=np.uint8):\n if isinstance(index, int):\n ret = np.zeros((dims,), dtype)\n ret[index] = 1\n elif isinstance(index, (list, tuple)):\n seq_len = len(index)\n ret = np.zeros((seq_len, dims), dtype)\n ret[range(seq_len), index] = 1.0\n else:\n raise ValueError('index should be int or list(tuple) of int.')\n return ret",
"def make_onehot(x,num_labels=7):\n enc = OneHotEncoder(n_values=num_labels)\n return enc.fit_transform(np.array(x).reshape(-1, 1)).toarray()",
"def _get_one_hot(targets, num_classes):\n ret = np.zeros((num_classes, targets.shape[0]))\n ret[targets, np.arange(targets.size)] = 1\n return ret"
]
| [
"0.8317261",
"0.81406474",
"0.81406474",
"0.81406474",
"0.81406474",
"0.8102245",
"0.80313605",
"0.80136204",
"0.8009317",
"0.79900396",
"0.7881947",
"0.77704865",
"0.7754152",
"0.7703118",
"0.765413",
"0.7646331",
"0.7600033",
"0.74995023",
"0.74995023",
"0.7410096",
"0.74079174",
"0.7407273",
"0.74039304",
"0.74014646",
"0.7398072",
"0.7395562",
"0.73928446",
"0.7390549",
"0.7371307",
"0.73492557"
]
| 0.83001465 | 1 |
Check if mode contains "check" | def isMode(mode, check):
if mode=="default" or mode=="all":
return True
if mode.__contains__(check):
return True
if check.__contains__("_"):
check_modes = check.split("_")
for check_mode in check_modes:
if not isMode(mode, check_mode):
return False
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_enable_mode(self, check_string='#'):\n return True",
"def is_valid_mode(mode: str) -> bool:\n return mode in (TEST, EASY, HARD)",
"def check_enable_mode(self, *args, **kwargs):\n pass",
"def check():",
"def check_config_mode(self, check_string=\")#\", pattern=\"\"):\n return super().check_config_mode(check_string=check_string)",
"def check_mode(self):\n if self.proximity.check_press():\n self.cycle_mode()\n return self.mode",
"def is_check_mode_enabled(self):\n return self.in_check_mode",
"def check_config_mode(\n self, check_string: str = \")#\", pattern: str = \"\", force_regex: bool = False\n ) -> bool:\n return super().check_config_mode(check_string=check_string, pattern=pattern)",
"def in_test_mode(mode: str) -> bool:\n return mode == TEST",
"def check(log=False):\n return True",
"def check( log = False):\n return True",
"def game_check(self, mode, row=None, col=None, is_set=True,\r\n show_fail=True):\r\n self.add_play_move(PlayMove.GAME_CHECK, mode=mode, row=row, is_set=is_set, show_fail=show_fail)\r\n if mode == \"h\" or mode == \"v\":\r\n part = self.get_part(type=\"edge\", sub_type=mode, row=row, col=col)\r\n if part is None:\r\n raise SelectError(f\"game_check: no edge({mode}) found at row={row} col={col}\") \r\n is_on = part.is_turned_on()\r\n if is_on != is_set:\r\n result = False\r\n msg = (f\"Unexpected test result: {result}\"\r\n f\" for line({mode}) at row={row} col={col}\")\r\n SlTrace.lg(f\"game_check: {msg}\")\r\n if show_fail:\r\n raise SelectFail(msg)\r\n return False\r\n elif mode == \"sq\":\r\n part = self.get_part(type=\"region\", row=row, col=col)\r\n is_on = part.is_turned_on()\r\n if is_on != is_set:\r\n result = False\r\n msg = (f\"Unexpected test result: {result}\"\r\n f\" for square at row={row} col={col}\")\r\n SlTrace.lg(f\"game_check: {msg}\")\r\n if show_fail:\r\n raise SelectFail(msg)\r\n return False\r\n else:\r\n raise SelectFail(f\"Unrecognized game_check mode({mode}\")\r\n \r\n return True",
"def check_config_mode(self):\n return False",
"def check_for_bool(check):",
"def check(self, description: Description) -> bool:",
"def check(self):\n return True",
"def check_config_mode(self, check_string=\"(config\", pattern=\"\"):\n return super().check_config_mode(check_string=check_string, pattern=pattern)",
"def in_easy_mode(mode: str) -> bool:\n return mode == EASY",
"def check_config_mode(self, check_string=\">config\", pattern=\"\"):\n return super().check_config_mode(check_string=check_string, pattern=pattern)",
"def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()",
"def _checkModeChange(self, expected, target=None):\n result = self._parseModeChange(self.client.calls, target)\n self.assertEqual(result, expected)\n self.client.calls = []",
"def is_support(mode: int) -> bool:\n return mode in supported_modes\n pass",
"async def _check_multiple_mode(self):\n logger.info(\"Host {}:Checking multiple mode\".format(self._host))\n out = await self.send_command('show mode')\n if 'multiple' in out:\n self._multiple_mode = True\n\n logger.debug(\"Host {}: Multiple mode: {}\".format(self._host, self._multiple_mode))",
"def check_fw_mode(self, cat_cpuinfo_out):\n for line in cat_cpuinfo_out.splitlines():\n if \"firmware\" in line:\n if \"OPAL\" in line:\n return True\n else:\n return False\n return False",
"def check(s):\n s.checkState()",
"def verifyOpsForMode(teal: List[TealComponent], mode: Mode):\n for stmt in teal:\n if isinstance(stmt, TealOp):\n op = stmt.getOp()\n if not op.mode & mode:\n raise TealInputError(\n \"Op not supported in {} mode: {}\".format(mode.name, op)\n )",
"def check_flag(self):\n return self._flag is 0 or self._flag is 16",
"def checks_check(self, turn):\n opposite_colour = next_turn(turn)\n\n if piece_class.KING_LOCATION[opposite_colour] in self.path_dict[turn]:\n self.print_message(\"CHECK!\")\n# self.checkmate(turn)\n self.mate_double(turn)\n self.mate_pinned(turn)\n self.mate_normal(turn)\n\n if piece_class.KING_LOCATION[turn] in self.path_dict[opposite_colour]:\n return True\n\n else:\n return False",
"def check_manual_mode_change(self, event):\n if self.vehicle.get_manual_mode_change(reset=True):\n data = lambda: None\n data.mode_to_set = \"Inactive\"\n self.set_companion_mode(data)",
"def __check_mode_change(self):\n if self.mode[\"auto_mode\"] != self.mode[\"last_mode\"]:\n self.mode[\"last_mode\"] = self.mode[\"auto_mode\"]\n return True\n return False"
]
| [
"0.70960957",
"0.6581383",
"0.64866024",
"0.64746034",
"0.6376049",
"0.63402003",
"0.62630945",
"0.6178688",
"0.61621815",
"0.614451",
"0.61147934",
"0.6092406",
"0.6087381",
"0.60813844",
"0.6067212",
"0.60657835",
"0.6018074",
"0.5989378",
"0.59655833",
"0.5960526",
"0.5955485",
"0.5940441",
"0.5876395",
"0.5874341",
"0.5854142",
"0.5835945",
"0.58211374",
"0.58017087",
"0.57831234",
"0.5772918"
]
| 0.74882025 | 0 |
Coordinate embeddings of bounding boxes | def coordinate_embeddings(boxes, dim):
batch_size, num_boxes, num_loc = boxes.shape
# transform to (x_c, y_c, w, h) format
pos = boxes.new_zeros((batch_size, num_boxes, 4))
pos[:, :, 0] = (boxes[:, :, 0] + boxes[:, :, 2]) / 2 * 100
pos[:, :, 1] = (boxes[:, :, 1] + boxes[:, :, 3]) / 2 * 100
pos[:, :, 2] = (boxes[:, :, 2] - boxes[:, :, 0]) * 100
pos[:, :, 3] = (boxes[:, :, 3] - boxes[:, :, 1]) * 100
# sin/cos embedding
dim_mat = 1000 ** (torch.arange(dim, dtype=boxes.dtype, device=boxes.device) / float(dim))
sin_embedding = (pos.view((batch_size, num_boxes, 4, 1)) / dim_mat.view((1, 1, 1, -1))).sin()
cos_embedding = (pos.view((batch_size, num_boxes, 4, 1)) / dim_mat.view((1, 1, 1, -1))).cos()
return torch.cat((sin_embedding, cos_embedding), dim=-1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def xywh_xyxy(boxes):\n bbox = np.zeros(boxes.shape)\n bbox[:, 0] = boxes[:, 0] \n bbox[:, 1] = boxes[:, 1] \n bbox[:, 2] = boxes[:, 0] + 1 * boxes[:, 2]\n bbox[:, 3] = boxes[:, 1] + 1 * boxes[:, 3]\n return bbox",
"def bounding_box(self):\n latlon00 = self.ij_to_latlon(-1,-1)\n latlon01 = self.ij_to_latlon(-1,self.domain_size[1]+1)\n latlon11 = self.ij_to_latlon(self.domain_size[0]+1,self.domain_size[1]+1)\n latlon10 = self.ij_to_latlon(self.domain_size[0]+1,-1)\n return (latlon00,latlon01,latlon11,latlon10)",
"def _to_image_coords(self, boxes, height, width):\n box_coords = np.zeros_like(boxes)\n box_coords[:, 0] = boxes[:, 0] * height\n box_coords[:, 1] = boxes[:, 1] * width\n box_coords[:, 2] = boxes[:, 2] * height\n box_coords[:, 3] = boxes[:, 3] * width\n \n return box_coords",
"def _decode_boxes(raw_boxes, anchors):\n boxes = np.zeros(raw_boxes.shape, np.float32)\n SCALE = 128\n x_center = raw_boxes[..., 0] / SCALE * anchors[:, 2] + anchors[:, 0]\n y_center = raw_boxes[..., 1] / SCALE * anchors[:, 3] + anchors[:, 1]\n\n w = raw_boxes[..., 2] / SCALE * anchors[:, 2]\n h = raw_boxes[..., 3] / SCALE * anchors[:, 3]\n\n boxes[..., 0] = y_center - h / 2. # ymin\n boxes[..., 1] = x_center - w / 2. # xmin\n boxes[..., 2] = y_center + h / 2. # ymax\n boxes[..., 3] = x_center + w / 2. # xmax\n\n for k in range(6):\n offset = 4 + k * 2\n keypoint_x = raw_boxes[..., offset] / SCALE * anchors[:, 2] + anchors[:, 0]\n keypoint_y = raw_boxes[..., offset + 1] / SCALE * anchors[:, 3] + anchors[:, 1]\n boxes[..., offset] = keypoint_x\n boxes[..., offset + 1] = keypoint_y\n\n return boxes",
"def boundingBox(self):\n y_max = np.max(self.points[:,0])\n x_max = np.max(self.points[:,1])\n y_min = np.min(self.points[:,0])\n x_min = np.min(self.points[:,1])\n \n return ((x_max, y_max), (x_min, y_min))",
"def decode_boxes(raw_boxes, anchors):\n boxes = np.zeros_like(raw_boxes)\n\n x_center = raw_boxes[..., 0] / x_scale * anchors[:, 2] + anchors[:, 0]\n y_center = raw_boxes[..., 1] / y_scale * anchors[:, 3] + anchors[:, 1]\n\n w = raw_boxes[..., 2] / w_scale * anchors[:, 2]\n h = raw_boxes[..., 3] / h_scale * anchors[:, 3]\n\n boxes[..., 0] = y_center - h / 2. # ymin\n boxes[..., 1] = x_center - w / 2. # xmin\n boxes[..., 2] = y_center + h / 2. # ymax\n boxes[..., 3] = x_center + w / 2. # xmax\n\n for k in range(num_keypoints):\n offset = 4 + k*2\n keypoint_x = raw_boxes[..., offset] / x_scale * anchors[:, 2] + anchors[:, 0]\n keypoint_y = raw_boxes[..., offset + 1] / y_scale * anchors[:, 3] + anchors[:, 1]\n boxes[..., offset] = keypoint_x\n boxes[..., offset + 1] = keypoint_y\n\n return boxes",
"def boundingBox(self):\n pmodel = (glm.vec3(1, -self.y_sign, 0)\n * self.model.pos * self.transform.scale)\n x, y, _ = self.transform.pos + pmodel\n y += -self.y_sign * self.font.table['ascent'] * self.transform.scale[1]\n return x, y, self.pixwidth(), self.pixheight()",
"def bbox(self):\n return np.array(\n [[self.position[0], self.position[1]], [self.position[0], self.position[1]]]\n )",
"def to_image_coords(boxes, height, width):\n box_coords = np.zeros_like(boxes)\n box_coords[:, 0] = boxes[:, 0] * height\n box_coords[:, 1] = boxes[:, 1] * width\n box_coords[:, 2] = boxes[:, 2] * height\n box_coords[:, 3] = boxes[:, 3] * width\n \n return box_coords",
"def bbox_transform(bbox):\n with tf.variable_scope('bbox_transform') as scope:\n cx = bbox[..., 0]\n cy = bbox[..., 1]\n w = bbox[..., 2]\n h = bbox[..., 3]\n out_box = np.stack(\n [cx-w/2, cy-h/2, cx+w/2, cy+h/2],\n axis=-1\n )\n return out_box",
"def get_box_coordinates(self):\n return self.box_coordinates",
"def get_embeddings_shape(self):\n raise NotImplementedError",
"def calc_bounding_box(self):\n self.BB = self.geos.abs_el(0).BB\n for geo in self.geos.abs_iter():\n self.BB = self.BB.joinBB(geo.BB)",
"def get_bounding_box(self):\n lon, lat = self.coordinates\n\n ll = (np.min(lon),np.min(lat))\n ul = (np.min(lon),np.max(lat))\n ur = (np.max(lon),np.max(lat))\n lr = (np.max(lon),np.min(lat))\n\n return (ll, ul, ur, lr)",
"def bbox(self):\n return [self._x0, self._y0, self._x1, self._y1]",
"def point_form(boxes):\n return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin\n boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax",
"def decode_boxes(bbox, size):\n y0, x0, y1, x1 = tf.split(bbox, 4, axis=-1)\n h = tf.cast(size[0], tf.float32)\n w = tf.cast(size[1], tf.float32)\n\n y0 = tf.clip_by_value(y0 * h, 0.0, h)\n x0 = tf.clip_by_value(x0 * w, 0.0, w)\n y1 = tf.clip_by_value(y1 * h, 0.0, h)\n x1 = tf.clip_by_value(x1 * w, 0.0, w)\n\n bbox = tf.concat([x0, y0, x1, y1], axis=-1)\n return bbox",
"def point_form(boxes):\n return torch.cat((boxes[:, :2] - boxes[:, 2:] / 2, # xmin, ymin\n boxes[:, :2] + boxes[:, 2:] / 2), 1) # xmax, ymax",
"def _decode_boxes(self, parsed_tensors):\n xmin = parsed_tensors[\"image/object/bbox/xmin\"]\n xmax = parsed_tensors[\"image/object/bbox/xmax\"]\n ymin = parsed_tensors[\"image/object/bbox/ymin\"]\n ymax = parsed_tensors[\"image/object/bbox/ymax\"]\n return tf.stack([xmin, ymin, xmax, ymax], axis=-1)",
"def getLocation(bounding_box):\n ymin, xmin, ymax, xmax = bounding_box\n w=1280\n h=720\n left, right, top, bottom = (xmin * w, xmax * w,\n ymin * h, ymax * h)\n ###############################################\n\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(h, np.floor(bottom + 0.5).astype('int32'))\n right = min(w, np.floor(right + 0.5).astype('int32'))\n # print(label, (left, top), (right, bottom))\n \n return int((left + right) / 2.0), int((top + bottom) / 2.0)\n \n # xlt, ylt, xrb, yrb = bounding_box\n # return int((xlt + xrb) / 2.0), int((ylt + yrb) / 2.0)",
"def bbox_transform(bbox):\n with tf.variable_scope('bbox_transform') as scope:\n cx, cy, w, h = bbox\n out_box = [[]]*4\n out_box[0] = cx-w/2\n out_box[1] = cy-h/2\n out_box[2] = cx+w/2\n out_box[3] = cy+h/2\n\n return out_box",
"def center_point_to_coordinates(bbox):\n y, x, h, w = tf.split(bbox, 4, -1)\n h2, w2 = tf.cast(h / 2, h.dtype), tf.cast(w / 2, w.dtype)\n return tf.concat([y - h2, x - w2, y + h2, x + w2], -1)",
"def _compute_boundaries_embedding(self, boundaries):\n # Check if boundaries have been determined before\n boundaries_hash = hash(boundaries[self.n_keep_dims:].tostring())\n if boundaries_hash in self.boundaries_cache:\n boundaries_embedded = \\\n np.array(self.boundaries_cache[boundaries_hash])\n boundaries_embedded[:self.n_keep_dims] = \\\n boundaries[:self.n_keep_dims] # Overwrite keep-dim's boundaries\n return boundaries_embedded\n\n # Determine boundaries on embedded space\n boundaries_embedded = \\\n np.empty((self.n_keep_dims + self.d_embedding, 2))\n boundaries_embedded[:self.n_keep_dims] = boundaries[:self.n_keep_dims]\n for dim in range(self.n_keep_dims,\n self.n_keep_dims + self.d_embedding):\n x_embedded = np.zeros(self.n_keep_dims + self.d_embedding)\n while True:\n x = self._manifold_to_dataspace(x_embedded)\n if np.sum(np.logical_or(\n x[self.n_keep_dims:] < boundaries[self.n_keep_dims:, 0],\n x[self.n_keep_dims:] > boundaries[self.n_keep_dims:,\n 1])) \\\n > (self.d_orig - self.n_keep_dims) / 2:\n break\n x_embedded[dim] -= 0.01\n boundaries_embedded[dim, 0] = x_embedded[dim]\n\n x_embedded = np.zeros(self.n_keep_dims + self.d_embedding)\n while True:\n x = self._manifold_to_dataspace(x_embedded)\n if np.sum(np.logical_or(\n x[self.n_keep_dims:] < boundaries[self.n_keep_dims:, 0],\n x[self.n_keep_dims:] > boundaries[self.n_keep_dims:, 1])) \\\n > (self.d_orig - self.n_keep_dims) / 2:\n break\n x_embedded[dim] += 0.01\n boundaries_embedded[dim, 1] = x_embedded[dim]\n\n self.boundaries_cache[boundaries_hash] = boundaries_embedded\n\n return boundaries_embedded",
"def get_final_bounding_box(boxes, nms_idx, width: int, height: int):\n x1 = np.inf\n y1 = np.inf\n x2 = -np.inf\n y2 = -np.inf\n\n bx = [boxes[i] for i in nms_idx]\n for box in bx:\n xmin = np.min(box[[0, 2]])\n xmax = np.max(box[[0, 2]])\n ymin = np.min(box[[1, 3]])\n ymax = np.max(box[[1, 3]])\n\n x1 = np.min([xmin, x1])\n y1 = np.min([ymin, y1])\n x2 = np.max([xmax, x2])\n y2 = np.max([ymax, y2])\n return x1, y1, x2, y2",
"def get_map(img, vertices, labels, annotations, embeddings, scale, length, embedding_size):\n\n score_map = np.zeros((int(img.height * scale), int(img.width * scale), 1), np.float32)\n geo_map = np.zeros((int(img.height * scale), int(img.width * scale), 5), np.float32)\n ignored_map = np.zeros((int(img.height * scale), int(img.width * scale), 1), np.float32)\n embedding_map = np.zeros((int(img.height * scale), int(img.width * scale), embedding_size), np.float32)\n\n index = np.arange(0, length, int(1 / scale))\n index_x, index_y = np.meshgrid(index, index)\n ignored_polys = []\n polys = []\n\n for i, vertice in enumerate(vertices):\n if labels[i] == 0:\n ignored_polys.append(np.around(scale * vertice.reshape((4, 2))).astype(np.int32))\n continue\n if np.any(np.around(scale * vertice.reshape((4, 2))).astype(np.int32) <= 0):\n continue\n if np.any(np.around(scale * vertice.reshape((4, 2))).astype(np.int32) >= int(scale * img.height)):\n continue\n\n poly = np.around(scale * shrink_poly(vertice, coef=0.2).reshape((4, 2))).astype(np.int32) # scaled & shrink\n polys.append(poly)\n temp_mask = np.zeros(score_map.shape[:-1], np.float32)\n cv2.fillPoly(temp_mask, [poly], 1)\n\n theta = find_min_rect_angle(vertice)\n rotate_mat = get_rotate_mat(theta)\n\n rotated_vertices = rotate_vertices(vertice, theta)\n x_min, x_max, y_min, y_max = get_boundary(rotated_vertices)\n rotated_x, rotated_y = rotate_all_pixels(rotate_mat, vertice[0], vertice[1], length)\n\n d1 = rotated_y - y_min\n d1[d1 < 0] = 0\n d2 = y_max - rotated_y\n d2[d2 < 0] = 0\n d3 = rotated_x - x_min\n d3[d3 < 0] = 0\n d4 = x_max - rotated_x\n d4[d4 < 0] = 0\n geo_map[:, :, 0] += d1[index_y, index_x] * temp_mask\n geo_map[:, :, 1] += d2[index_y, index_x] * temp_mask\n geo_map[:, :, 2] += d3[index_y, index_x] * temp_mask\n geo_map[:, :, 3] += d4[index_y, index_x] * temp_mask\n geo_map[:, :, 4] += theta * temp_mask\n\n min_x = int(min(poly[0][0], poly[1][0], poly[2][0], poly[3][0]))\n max_x = int(max(poly[0][0], poly[1][0], poly[2][0], poly[3][0]))\n min_y = int(min(poly[0][1], poly[1][1], poly[2][1], poly[3][1]))\n max_y = int(max(poly[0][1], poly[1][1], poly[2][1], poly[3][1]))\n embedding_map[min_y:max_y, min_x:max_x] = embeddings[annotations[i]]\n\n cv2.fillPoly(ignored_map, ignored_polys, 1)\n cv2.fillPoly(score_map, polys, 1)\n\n return torch.Tensor(score_map).permute(2, 0, 1), torch.Tensor(geo_map).permute(2, 0, 1), \\\n torch.Tensor(ignored_map).permute(2, 0, 1), torch.Tensor(embedding_map).permute(2, 0, 1)",
"def edge_centers(self):\n x0, y0, width, height = self._rect_bbox\n w = width / 2.\n h = height / 2.\n xe = x0, x0 + w, x0 + width, x0 + w\n ye = y0 + h, y0, y0 + h, y0 + height\n transform = self._get_rotation_transform()\n coords = transform.transform(np.array([xe, ye]).T).T\n return coords[0], coords[1]",
"def _get_center_coordinates_and_sizes_vector(box_data):\n ymin, xmin, ymax, xmax = [np.squeeze(i) for i in np.split(box_data, 4, 0)]\n width = np.subtract(xmax, xmin)\n height = np.subtract(ymax, ymin)\n ycenter = np.add(ymin, np.multiply(height, 0.5))\n xcenter = np.add(xmin, np.multiply(width, 0.5))\n return ycenter, xcenter, height, width",
"def coordinates(self):",
"def set_embeddings(self):",
"def bbox_coordinates(label_sitk):\n\n #Setting Bounding Box\n F_statistics = sitk.LabelShapeStatisticsImageFilter()\n\n F_statistics.Execute(label_sitk)\n bbox_dims = F_statistics.GetBoundingBox(1)\n\n spacer = 3\n xmin = bbox_dims[0]-spacer\n xmax = bbox_dims[1]+spacer\n ymin = bbox_dims[2]-spacer\n ymax = bbox_dims[3]+spacer\n zmin = bbox_dims[4]-spacer\n zmax = bbox_dims[5]+spacer\n\n p1 = [xmin-spacer, ymin, zmin]\n p2 = [xmin, ymin, zmax]\n p3 = [xmin, ymax, zmin]\n p4 = [xmin, ymax, zmax]\n p5 = [xmax, ymin, zmin]\n p6 = [xmax, ymin, zmax]\n p7 = [xmax, ymax, zmin]\n p8 = [xmax, ymax, zmax]\n bbox_pts = [p1, p2, p3, p4, p5, p6, p7, p8]\n\n return bbox_pts"
]
| [
"0.6516033",
"0.6400761",
"0.6280361",
"0.61731803",
"0.61345005",
"0.6076974",
"0.60425735",
"0.60376346",
"0.6017529",
"0.5986557",
"0.59813845",
"0.59286344",
"0.5880047",
"0.5870507",
"0.58615243",
"0.5855573",
"0.5845112",
"0.58403444",
"0.581915",
"0.5816758",
"0.58124137",
"0.57809776",
"0.577644",
"0.57753503",
"0.5763588",
"0.57444555",
"0.57368684",
"0.5734177",
"0.57084495",
"0.57071817"
]
| 0.6860003 | 0 |
Set default image feature to an existing model. | def set_default_image_feature(self, image_feature):
target = self.dummy_input_imgs
# None means batch axis
x = image_feature.features.clone()[None]
assert x.shape == target.data.shape
target.data = x
target = self.dummy_image_loc
# None means batch axis
x = image_feature.image_location.clone()[None]
assert x.shape == target.data.shape
target.data = x
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_default_image(self, image):\n raise NotImplementedError",
"def _diffusion_imaging_model_changed(self, new):\n self.stages[\"Diffusion\"].config.diffusion_imaging_model = new",
"def select_default_picture(sender, instance, **kwargs):\n if not instance.id:\n instance.picture = \"/static/user%s.png\"%(\"F\" if instance.female else \"M\")",
"def set_default_model(self, model_id):\n try:\n self.default_model = self.model_dict[model_id]\n except KeyError:\n return False\n return False",
"def _init_model(self):\r\n\r\n self.model = ResNet152V2(weights='imagenet')",
"def on_image_change(self, value):\n self.current_image.setImage( self._model.image )",
"async def gpt2_set_model(self, ctx, *, arg=None):\n print('Command gpt2_set_model triggered')\n if arg:\n if arg in VALID_DEFAULT_MODELS:\n self.update_config(model_name=arg)\n else:\n await ctx.send(f\"ERROR: Invalid model name {arg}\")\n else:\n await ctx.send(\"ERROR: Argument required\")",
"def set_img(self, img):\n self.img = img",
"def load_image(default=True):\n if default:\n print(\"in heres\")\n return self.img\n else:\n img = Image.fromarray(cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB))\n self.size = img.shape\n return img",
"def setImage(self, image):\n raise NotImplementedError",
"def __init__(self):\n super(HybridImageModel, self).__init__()",
"def setImage(*args):",
"def modify_image(self, example, target_label):\n raise NotImplementedError()",
"def reassignImage(self, newImg):\n retVal = FeatureSet()\n for i in self:\n retVal.append(i.reassign(newImg))\n return retVal",
"def use_model_on_one_image(self, image_path, model_path, save_path):\n if self.cuda:\n self.unet.load_state_dict(torch.load(model_path))\n else:\n self.unet.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))\n\n ori_image = Image.open(image_path).convert('L')\n transform = ToTensor()\n\n input = transform(ori_image)\n if self.cuda:\n input = Variable(input.cuda())\n else:\n input = Variable(input)\n input = torch.squeeze(input,0)\n\n output = unet(input)\n\n if self.cuda:\n output = output.cuda()\n\n result = torch.cat([input.data, output.data], 0)\n\n torchvision.utils.save_image(result, save_path)",
"def setFlatImage(self, value=1.0):\n self.fimage = None\n self.image = numpy.zeros((self.ny, self.nx), 'float') + value\n return",
"def set_default_parameters(self):\n super().set_default_parameters()\n if not \"n_sub_images\" in vars(self):\n self.n_sub_images = -1 # do all-sub-images",
"def load_default_model(name):\n return DEFAULT_MODEL_DICT[name]",
"def setdefault(self, value: Any) -> None:\n self.default_factory = value \n return",
"def initialize_default_model(config: BareConfig, model_class) -> torch.nn.Module:\n model = model_class()\n default_model_path = f\"{config.get_default_model_folder_path()}/{model_class.__name__}.model\"\n model.load_state_dict(torch.load(default_model_path))\n return model",
"def set_image(self, image):\n self.data['image'] = image",
"def __init__(self, feature_type={'model': 'vgg16', 'input_layer': 'default', 'output_layer': 'flatten'}):\n if feature_type['model'] == 'vgg16':\n self.feature_model = keras.applications.vgg16.VGG16(include_top=True, weights='imagenet',\n input_tensor=None, input_shape=None, pooling=None, classes=1000)\n if feature_type['model'] == 'custom':\n self.load_custom_model(os.getcwd())\n self.graph = tf.get_default_graph()\n self.load_intermediate_model(feature_type['output_layer'])",
"def setdefault(self, value: Any) -> None: # type: ignore\n self.default_factory = value \n return",
"def _reset_image(self):\n print(\"Reseting image\")\n print(self.original_cv_image)\n self.working_image = self.original_cv_image\n self.cv_image = self.working_image\n self.set_image()",
"def reset_img(self):\n self.img = np.zeros((self.l_i, self.l_i), dtype='float32')\n self.img_name = ''",
"def default(self, name, new=None, erase=False):\n # Check existence\n if name not in self._defaults:\n raise tools.UnavailableException(self._defaults, name, what=\"model default\")\n # Get current\n old = self._defaults[name]\n # Set if needed\n if erase or new is not None:\n self._defaults[name] = new\n # Return current/old\n return old",
"def set_defaults(self):\n for key, constraints in self.__class__.MODEL.items():\n if key not in self.resource:\n self.resource[key] = constraints[3]",
"def __init__(self, classifier, save_file=None, name='TFModel'):\n super(SKLearnNoiseAwareModel, self).__init__(name)\n self.classifier = classifier\n if not getattr(self.classifier, 'predict_proba'):\n self.classifier = CalibratedClassifierCV(base_estimator=self.classifier)\n # Load model\n if save_file is not None and os.path.isfile(save_file):\n self.load(save_file)",
"def _setup_new_image(self):\n\n if not self._viewer is None:\n if not self._input_image is None:\n self._viewer.SetInput(self._input_image)\n else:\n self._viewer.SetInput(self._dummy_image_source.GetOutput())\n\n ii = self._viewer.GetInput()\n \n ii.UpdateInformation()\n ii.Update()\n range = ii.GetScalarRange()\n self._viewer.SetColorWindow(range[1] - range[0])\n self._viewer.SetColorLevel(0.5 * (range[1] + range[0]))\n \n icp = self._view_frame._image_control_panel\n icp.slider.SetRange(self._viewer.GetSliceMin(),\n self._viewer.GetSliceMax())\n icp.slider.SetValue(self._viewer.GetSliceMin())\n \n #self._viewer.UpdateDisplayExtent()\n self._viewer.GetRenderer().ResetCamera()",
"def _set_default_attr(self, default_attr):\n for attr, val in six.iteritems(default_attr):\n if getattr(self, attr, None) is None:\n setattr(self, attr, val)"
]
| [
"0.6984379",
"0.60346025",
"0.58383137",
"0.5692587",
"0.5637139",
"0.5607432",
"0.5505951",
"0.5462009",
"0.54486644",
"0.54372",
"0.5430467",
"0.54257965",
"0.5415532",
"0.54085255",
"0.5398587",
"0.5368907",
"0.5345402",
"0.52843153",
"0.5280943",
"0.52764785",
"0.5224097",
"0.5223526",
"0.5210242",
"0.5205444",
"0.5193569",
"0.5161674",
"0.5145091",
"0.5135851",
"0.50955725",
"0.50774217"
]
| 0.7342939 | 0 |
initialize a bdb hash | def bdb_init_hash(db_file, cache_size=None):
db_dir = dirname(db_file)
if not isdir(db_dir):
makedirs(db_dir)
db = DB()
if cache_size is None:
cache_size = _cache_size
db.set_cachesize (
cache_size / (1024*1024*1024),
cache_size % (1024*1024*1024)
)
db.open(db_file, dbtype=DB_HASH, flags=DB_CREATE)
return db | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hfreq_bdb_init(db_file, cache_size=None):\n\treturn bdb_init_hash(db_file, cache_size)",
"def __init__(self):\n _snap.TStrHashF_DJB_swiginit(self, _snap.new_TStrHashF_DJB())",
"def __init__(self):\n # better to be a prime number, less collision\n self.key_space = 2069\n self.hash_table = [Bucket() for i in range(self.key_space)]",
"def __init__(self):\n self.size = 1000\n self.hash_table = [None] * self.size",
"def init_hash_state(self) -> None:\n self.hash_states = [hashlib.sha1()]",
"def __init__(self):\n self.hash = [[] for _ in range(20011)]",
"def __init__(self):\n self.bucket = 1000\n self.bucketItem = 1000\n \n self.hashset = [None] * self.bucket",
"def __init__(self):\n\n self._dict = OrderedDict(zip(const.BFHCOLS, [0] * 111))",
"def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()",
"def __init__(self,key):\n self.block_size = 32\n self.key = hashlib.sha256(key).digest()",
"def __init__(self):\n \n self.hashset=[None]* 1000",
"def sha_init(self):\n pass",
"def __init__(self, seed=None):\n self.seed(seed)\n self.hashfun = \"SHA-256\"\n self._basehash()",
"def test_init_hash(self):\n bill = Bill(self.input_hash)\n for key, value in self.input_hash.iteritems():\n self.assertEqual(value, bill.__dict__[key])",
"def __init__(self, table_capacity=101, hash_base=31):\n\n self.table_capacity = table_capacity\n self.hash_base = hash_base\n self.array = [None] * self.table_capacity # Array initialized with 'None\" in each element.\n self.count = 0 # the number of items in the table, initially set to '0'\n self.rehash_count = 0\n self.probe_array = []",
"def __init__(self):\n self.size = 997\n self.hash_set = [None] * self.size",
"def _basehash(self):\n if self.baseseed is not None:\n hashinput = (str(self.baseseed) + ',').encode()\n self.basehash = hashlib.sha256(hashinput)\n else:\n self.basehash = None",
"def initialize_(self):\n super(NeighborhoodHash, self).initialize_()\n\n if not self.initialized_[\"random_seed\"]:\n seed(self.random_seed)\n self.initialized_[\"random_seed\"] = True\n\n if not self.initialized_[\"R\"]:\n if type(self.R) is not int or self.R <= 0:\n raise TypeError('R must be an intger bigger than zero')\n self.initialized_[\"R\"] = True\n\n if not self.initialized_[\"nh_type\"]:\n if self.nh_type == 'simple':\n self._noc_f = False\n self._NH = lambda G: self.neighborhood_hash_simple(G)\n elif self.nh_type == 'count_sensitive':\n self._noc_f = True\n self._NH = lambda G: self.neighborhood_hash_count_sensitive(G)\n else:\n raise TypeError('unrecognised neighborhood hashing type')\n self.initialized_[\"nh_type\"] = True\n\n if not self.initialized_[\"bits\"]:\n if type(self.bits) is not int or self.bits <= 0:\n raise TypeError('illegal number of bits for hashing')\n\n self._max_number = 1 << self.bits\n self._mask = self._max_number-1\n self.initialized_[\"bits\"] = True",
"def __init__(self):\n super(HashCheck, self).__init__()\n self.hash_log_curr = {}\n self.hash_curr_files = {}\n self.log_cut_off_date = self.get_timestamp(self.number_of_log_days)\n #holds hash codes for which duplicates are excitable\n self.valid = None",
"def __init__(self, d):\n\t\tself.d = d\n\t\tself.st = SeparateChainingHashST()",
"def initialize(self):\n\n db = dict()\n\n db['meta'] = Meta(None)\n db['race'] = Race(None, None, None, None, None)\n db['track'] = Track(None, None)\n db['classes'] = set([])\n db['teams'] = set([])\n db['drivers'] = set([])\n\n self.db = db",
"def __init__(self, filename=None, hashbits=20, depth=100, maxtime=16384):\n if filename is not None:\n self.load(filename)\n else:\n self.hashbits = hashbits\n self.depth = depth\n self.maxtimebits = _bitsfor(maxtime)\n # allocate the big table\n size = 2 ** hashbits\n self.table = np.zeros((size, depth), dtype=np.uint32)\n # keep track of number of entries in each list\n self.counts = np.zeros(size, dtype=np.int32)\n # map names to IDs\n self.names = []\n # track number of hashes stored per id\n self.hashesperid = np.zeros(0, np.uint32)\n # Empty params\n self.params = {}\n # Record the current version\n self.ht_version = HT_VERSION\n # Mark as unsaved\n self.dirty = True",
"def __init__(self, user_num, bsdb):\n self.user_num = user_num\n self.bsdb = bsdb",
"def __init__(self):\n self._root = None\n self._size = 0\n self._my_hash = {'preorder':self.preorder, 'postorder': self.postorder, 'inorder': self.inorder,\n \"breadthfirst\": self.breadthfirst}",
"def __init__(self, hash_str, salt):\n self.hash = hash_str\n self.salt = salt",
"def test_basic_failover_bad_hashlib_hash_init(self) -> None:\n assert _attempt_init_of_python_3_9_hash_object(None) is None",
"def __init__(self):\n super(LongObjectHashMap, self).__init__()\n initialize()",
"def __init__(self):\n self.size = 10000\n self.hashmap = [None] * self.size",
"def generichash_blake2b_init(\n key: bytes = b\"\",\n salt: bytes = b\"\",\n person: bytes = b\"\",\n digest_size: int = crypto_generichash_BYTES,\n) -> Blake2State:\n\n _checkparams(digest_size, key, salt, person)\n\n state = Blake2State(digest_size)\n\n # both _salt and _personal must be zero-padded to the correct length\n _salt = ffi.new(\"unsigned char []\", crypto_generichash_SALTBYTES)\n _person = ffi.new(\"unsigned char []\", crypto_generichash_PERSONALBYTES)\n\n ffi.memmove(_salt, salt, len(salt))\n ffi.memmove(_person, person, len(person))\n\n rc = lib.crypto_generichash_blake2b_init_salt_personal(\n state._statebuf, key, len(key), digest_size, _salt, _person\n )\n ensure(rc == 0, \"Unexpected failure\", raising=exc.RuntimeError)\n\n return state",
"def __init__(self):\n self.buckets = 1009\n self.table = [{} for _ in range(self.buckets)]"
]
| [
"0.7191982",
"0.67193675",
"0.67058957",
"0.6701905",
"0.66732764",
"0.65171236",
"0.6465162",
"0.6455151",
"0.64273334",
"0.63605845",
"0.63186646",
"0.6285688",
"0.6251298",
"0.62214595",
"0.62131435",
"0.6166384",
"0.6118945",
"0.6118871",
"0.60922813",
"0.6070766",
"0.6049538",
"0.60425323",
"0.604196",
"0.6040563",
"0.6031282",
"0.6031166",
"0.60132295",
"0.6002043",
"0.597738",
"0.59734386"
]
| 0.7485022 | 0 |
initialize (open) a bdb frequency hash | def hfreq_bdb_init(db_file, cache_size=None):
return bdb_init_hash(db_file, cache_size) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n self.freq = {}",
"def bdb_init_hash(db_file, cache_size=None):\n\tdb_dir = dirname(db_file)\n\tif not isdir(db_dir):\n\t\tmakedirs(db_dir)\n\tdb = DB()\n\tif cache_size is None:\n\t\tcache_size = _cache_size\n\tdb.set_cachesize (\n\t\tcache_size / (1024*1024*1024),\n\t\tcache_size % (1024*1024*1024)\n\t)\n\tdb.open(db_file, dbtype=DB_HASH, flags=DB_CREATE)\n\treturn db",
"def __init__(self):\n self.freq = collections.defaultdict(set)\n self.cache = collections.defaultdict()\n self.max_freq = 0\n self.min_freq = 0",
"def __init__(self, filename=None, hashbits=20, depth=100, maxtime=16384):\n if filename is not None:\n self.load(filename)\n else:\n self.hashbits = hashbits\n self.depth = depth\n self.maxtimebits = _bitsfor(maxtime)\n # allocate the big table\n size = 2 ** hashbits\n self.table = np.zeros((size, depth), dtype=np.uint32)\n # keep track of number of entries in each list\n self.counts = np.zeros(size, dtype=np.int32)\n # map names to IDs\n self.names = []\n # track number of hashes stored per id\n self.hashesperid = np.zeros(0, np.uint32)\n # Empty params\n self.params = {}\n # Record the current version\n self.ht_version = HT_VERSION\n # Mark as unsaved\n self.dirty = True",
"def __init__ (self):\n self.lengths = {}\n self.lower_counts = {}\n self.upper_counts = {}\n self.digit_counts = {}\n self.symbol_counts = {}\n self.class_counts = {}\n self.word_counts = {}",
"def __init__(self):\n self.size = 1000\n self.hash_table = [None] * self.size",
"def __init__(self):\n # better to be a prime number, less collision\n self.key_space = 2069\n self.hash_table = [Bucket() for i in range(self.key_space)]",
"def __init__(self, id: int, *, freq: int = 400000) -> None:\n ...",
"def __init__(self):\n self.buckets = 1009\n self.table = [{} for _ in range(self.buckets)]",
"def __init__(self):\n\n self._dict = OrderedDict(zip(const.BFHCOLS, [0] * 111))",
"def __init__(self):\n self.word_to_freq = {}\n self.head = Node()\n self.tail = Node()\n self.head.next = self.tail\n self.tail.prev = self.head\n self.freq_to_node = {0: self.head, sys.maxint: self.tail}",
"def __init__(self):\n self.num_counts = {}",
"def __init__(self, table_capacity=101, hash_base=31):\n\n self.table_capacity = table_capacity\n self.hash_base = hash_base\n self.array = [None] * self.table_capacity # Array initialized with 'None\" in each element.\n self.count = 0 # the number of items in the table, initially set to '0'\n self.rehash_count = 0\n self.probe_array = []",
"def __init__(self, db):\n\n # Add database object\n self.db = db\n\n # Initialize a dictionary to store maps of meg data (oscillation bands)\n self.meg_maps = dict()\n self.bands = dict()\n\n # Initialize a dictionary to store exponent map\n self.exponent_map = dict({'Exponents': np.array([])})\n\n # Initialize booleans that keep track of what is loaded\n self.oscs_loaded = False\n self.exponents_loaded = False",
"def __init__(self):\n super(HashCheck, self).__init__()\n self.hash_log_curr = {}\n self.hash_curr_files = {}\n self.log_cut_off_date = self.get_timestamp(self.number_of_log_days)\n #holds hash codes for which duplicates are excitable\n self.valid = None",
"def __init__(self):\n self.bucket = 1000\n self.bucketItem = 1000\n \n self.hashset = [None] * self.bucket",
"def init_buckets(len2freq):\n source = Counter(len2freq)\n\n if not len(source):\n raise ValueError('Empty length-to-frequency map')\n\n if not all(map(lambda x: isinstance(x, int), source.keys())):\n raise ValueError('Keys of length-to-frequency must be integers')\n\n if not all(map(lambda x: isinstance(x, int), source.values())):\n raise ValueError('Values of length-to-frequency must be integers')\n\n denominator = 8\n lengths = sorted(source.keys())\n\n buckets = []\n for lng in lengths:\n b = int(np.ceil(lng / denominator)) * denominator + 1\n if not len(buckets) or buckets[-1][0] != b:\n buckets.append((b, {}))\n buckets[-1][1][lng] = source[lng]\n\n return buckets",
"def __init__(self):\n self.bucket_of_keys = {}\n self.buckets = LinkedList()",
"def __init__(self):\n _snap.TStrHashF_DJB_swiginit(self, _snap.new_TStrHashF_DJB())",
"def __init__(self):\n self.hash = [[] for _ in range(20011)]",
"def __init__(self, capacity):\n self.capacity = capacity # Number of buckets in the hash table\n self.storage = [None] * capacity\n self.key_count = 0",
"def __init__(self):\n self.counts = {}",
"def __init__(self, frequency):\n super().__init__()\n self.__frequency = frequency\n self.__iterations = 0\n self._is_set_up = False",
"def __init__(self):\n self.size = 10000\n self.hashmap = [None] * self.size",
"def __init__(self):\n self.buckets = 1009\n self.table = [[] for _ in range(self.buckets)]",
"def __init__(self):\n \n self.hashset=[None]* 1000",
"def __init__(self):\n self.hash_table = {}\n self.count_table = {}\n self.head = CountListNode()\n self.tail = CountListNode()",
"def generate_brown_frequency_dictionary():\n brown_frequency_dictionary = FreqDist()\n for sentence in brown.sents():\n for word in sentence:\n brown_frequency_dictionary[word] += 1\n\n corpus_frequency_distribution = pd.DataFrame(list(brown_frequency_dictionary.items()), columns = [\"Word\",\"Frequency\"])\n corpus_frequency_distribution.sort_values(\"Frequency\")\n corpus_frequency_distribution.to_csv('corpus_frequency.csv')\n return brown_frequency_dictionary",
"def __init__(self, database='/tmp/blingalytics_cache'):\n self.database = database\n self._create_metadata_table()",
"def __init__(self):\n self.hash_map = HashMap(1000)\n pass"
]
| [
"0.6704742",
"0.65338004",
"0.64289284",
"0.63803357",
"0.62496257",
"0.6220571",
"0.6148755",
"0.6134633",
"0.60455704",
"0.59618205",
"0.59502053",
"0.59428495",
"0.5888558",
"0.58844864",
"0.5863423",
"0.5857028",
"0.5841682",
"0.5838687",
"0.5838318",
"0.5813352",
"0.57673967",
"0.5715598",
"0.5712637",
"0.57056296",
"0.56955504",
"0.5685467",
"0.5683514",
"0.56453496",
"0.5603033",
"0.55792224"
]
| 0.8135423 | 0 |
Compute Mean Volume Backscattering Strength (MVBS) based on intervals of range (``echo_range``) and ``ping_time`` specified in physical units. Output of this function differs from that of ``compute_MVBS_index_binning``, which computes binaveraged Sv according to intervals of ``echo_range`` and ``ping_time`` specified as index number. | def compute_MVBS(ds_Sv, range_meter_bin=20, ping_time_bin="20S"):
# create bin information for echo_range
range_interval = np.arange(0, ds_Sv["echo_range"].max() + range_meter_bin, range_meter_bin)
# create bin information needed for ping_time
ping_interval = (
ds_Sv.ping_time.resample(ping_time=ping_time_bin, skipna=True).asfreq().ping_time.values
)
# calculate the MVBS along each channel
MVBS_values = get_MVBS_along_channels(ds_Sv, range_interval, ping_interval)
# create MVBS dataset
ds_MVBS = xr.Dataset(
data_vars={"Sv": (["channel", "ping_time", "echo_range"], MVBS_values)},
coords={
"ping_time": ping_interval,
"channel": ds_Sv.channel,
"echo_range": range_interval[:-1],
},
)
# TODO: look into why 'filenames' exist here as a variable
# Added this check to support the test in test_process.py::test_compute_MVBS
if "filenames" in ds_MVBS.variables:
ds_MVBS = ds_MVBS.drop_vars("filenames")
# ping_time_bin parsing and conversions
# Need to convert between pd.Timedelta and np.timedelta64 offsets/frequency strings
# https://xarray.pydata.org/en/stable/generated/xarray.Dataset.resample.html
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.resample.html
# https://pandas.pydata.org/docs/reference/api/pandas.Timedelta.html
# https://pandas.pydata.org/docs/reference/api/pandas.Timedelta.resolution_string.html
# https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
# https://numpy.org/devdocs/reference/arrays.datetime.html#datetime-units
timedelta_units = {
"d": {"nptd64": "D", "unitstr": "day"},
"h": {"nptd64": "h", "unitstr": "hour"},
"t": {"nptd64": "m", "unitstr": "minute"},
"min": {"nptd64": "m", "unitstr": "minute"},
"s": {"nptd64": "s", "unitstr": "second"},
"l": {"nptd64": "ms", "unitstr": "millisecond"},
"ms": {"nptd64": "ms", "unitstr": "millisecond"},
"u": {"nptd64": "us", "unitstr": "microsecond"},
"us": {"nptd64": "ms", "unitstr": "millisecond"},
"n": {"nptd64": "ns", "unitstr": "nanosecond"},
"ns": {"nptd64": "ms", "unitstr": "millisecond"},
}
ping_time_bin_td = pd.Timedelta(ping_time_bin)
# res = resolution (most granular time unit)
ping_time_bin_resunit = ping_time_bin_td.resolution_string.lower()
ping_time_bin_resvalue = int(
ping_time_bin_td / np.timedelta64(1, timedelta_units[ping_time_bin_resunit]["nptd64"])
)
ping_time_bin_resunit_label = timedelta_units[ping_time_bin_resunit]["unitstr"]
# Attach attributes
_set_MVBS_attrs(ds_MVBS)
ds_MVBS["echo_range"].attrs = {"long_name": "Range distance", "units": "m"}
ds_MVBS["Sv"] = ds_MVBS["Sv"].assign_attrs(
{
"cell_methods": (
f"ping_time: mean (interval: {ping_time_bin_resvalue} {ping_time_bin_resunit_label} " # noqa
"comment: ping_time is the interval start) "
f"echo_range: mean (interval: {range_meter_bin} meter "
"comment: echo_range is the interval start)"
),
"binning_mode": "physical units",
"range_meter_interval": str(range_meter_bin) + "m",
"ping_time_interval": ping_time_bin,
"actual_range": [
round(float(ds_MVBS["Sv"].min().values), 2),
round(float(ds_MVBS["Sv"].max().values), 2),
],
}
)
prov_dict = echopype_prov_attrs(process_type="processing")
prov_dict["processing_function"] = "commongrid.compute_MVBS"
ds_MVBS = ds_MVBS.assign_attrs(prov_dict)
ds_MVBS["frequency_nominal"] = ds_Sv["frequency_nominal"] # re-attach frequency_nominal
ds_MVBS = insert_input_processing_level(ds_MVBS, input_ds=ds_Sv)
return ds_MVBS | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_MVBS_index_binning(ds_Sv, range_sample_num=100, ping_num=100):\n da_sv = 10 ** (ds_Sv[\"Sv\"] / 10) # average should be done in linear domain\n da = 10 * np.log10(\n da_sv.coarsen(ping_time=ping_num, range_sample=range_sample_num, boundary=\"pad\").mean(\n skipna=True\n )\n )\n\n # Attach attributes and coarsened echo_range\n da.name = \"Sv\"\n ds_MVBS = da.to_dataset()\n ds_MVBS.coords[\"range_sample\"] = (\n \"range_sample\",\n np.arange(ds_MVBS[\"range_sample\"].size),\n {\"long_name\": \"Along-range sample number, base 0\"},\n ) # reset range_sample to start from 0\n ds_MVBS[\"echo_range\"] = (\n ds_Sv[\"echo_range\"]\n .coarsen( # binned echo_range (use first value in each average bin)\n ping_time=ping_num, range_sample=range_sample_num, boundary=\"pad\"\n )\n .min(skipna=True)\n )\n _set_MVBS_attrs(ds_MVBS)\n ds_MVBS[\"Sv\"] = ds_MVBS[\"Sv\"].assign_attrs(\n {\n \"cell_methods\": (\n f\"ping_time: mean (interval: {ping_num} pings \"\n \"comment: ping_time is the interval start) \"\n f\"range_sample: mean (interval: {range_sample_num} samples along range \"\n \"comment: range_sample is the interval start)\"\n ),\n \"comment\": \"MVBS binned on the basis of range_sample and ping number specified as index numbers\", # noqa\n \"binning_mode\": \"sample number\",\n \"range_sample_interval\": f\"{range_sample_num} samples along range\",\n \"ping_interval\": f\"{ping_num} pings\",\n \"actual_range\": [\n round(float(ds_MVBS[\"Sv\"].min().values), 2),\n round(float(ds_MVBS[\"Sv\"].max().values), 2),\n ],\n }\n )\n\n prov_dict = echopype_prov_attrs(process_type=\"processing\")\n prov_dict[\"processing_function\"] = \"commongrid.compute_MVBS_index_binning\"\n ds_MVBS = ds_MVBS.assign_attrs(prov_dict)\n ds_MVBS[\"frequency_nominal\"] = ds_Sv[\"frequency_nominal\"] # re-attach frequency_nominal\n\n ds_MVBS = insert_input_processing_level(ds_MVBS, input_ds=ds_Sv)\n\n return ds_MVBS",
"def bb_apply_nb(ts, window, ewm, alpha, adjust, ddof, ma_cache_dict, mstd_cache_dict):\n # Calculate lower, middle and upper bands\n h = hash((window, ewm))\n ma = np.copy(ma_cache_dict[h])\n mstd = np.copy(mstd_cache_dict[h])\n # # (MA + Kσ), MA, (MA - Kσ)\n return ma, ma + alpha * mstd, ma - alpha * mstd",
"def burst_time(temp, bstart, bstop):\n to_sum = []\n for b in range(len(bstart)):\n if bstop[b]-bstart[b] >= 0:\n to_sum.append(bstop[b]-bstart[b])\n elif bstop[b]-bstart[b] < 0 and b == len(bstop)+1: # Make it go to end\n to_sum.append(temp['length']/1000.-bstart[b])\n else:\n pass\n return np.mean(to_sum), np.std(to_sum)/np.mean(to_sum), sum(to_sum)/(temp['length']/1000.)",
"def bbands(price, length=30, numsd=2):\n ave = pd.stats.moments.rolling_mean(price,length)\n sd = pd.stats.moments.rolling_std(price,length)\n upband = ave + (sd*numsd)\n dnband = ave - (sd*numsd)\n return np.round(upband,3), np.round(dnband,3)",
"def get_average_energy(audio, beats, begin, end):\n buffer = np.square(audio[int(beats[int(begin)]):int(beats[int(end)])])\n average = np.mean(buffer)\n return average",
"def ma_nb(a, window, ewm, adjust=False):\n if ewm:\n return generic_nb.ewm_mean_nb(a, window, minp=window, adjust=adjust)\n return generic_nb.rolling_mean_nb(a, window, minp=window)",
"def calc_spindle_buffer_means(self):\n \n print('Aligning spindles...')\n # align spindles accoridng to timedelta & combine into single dataframe\n spindle_buffer_aggregates = {}\n for chan in self.spindles.keys():\n # only use channels that have spindles\n if self.spindles_wbuffer[chan]:\n # set the base df\n agg_df = pd.DataFrame(self.spindles_wbuffer[chan][0]['Raw'])\n rsuffix = list(range(1, len(self.spindles_wbuffer[chan])))\n # join on the index for each spindle\n for x in range(1, len(self.spindles_wbuffer[chan])):\n mean_df = agg_df.join(self.spindles_wbuffer[chan][x]['Raw'], how='outer', rsuffix=rsuffix[x-1])\n spindle_buffer_aggregates[chan] = mean_df\n \n print('Calculating statistics...')\n # create a new multiindex dataframe for calculations\n calcs = ['mean', 'std' ,'sem']\n tuples = [(chan, calc) for chan in spindle_buffer_aggregates.keys() for calc in calcs]\n columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])\n spindle_buffer_means = pd.DataFrame(columns=columns)\n \n # fill the dataframe\n for chan in spindle_buffer_aggregates.keys():\n spindle_buffer_means[(chan, 'mean')] = spindle_buffer_aggregates[chan].mean(axis=1)\n spindle_buffer_means[(chan, 'std')] = spindle_buffer_aggregates[chan].std(axis=1)\n spindle_buffer_means[(chan, 'sem')] = spindle_buffer_aggregates[chan].sem(axis=1)\n \n self.spindle_buffer_aggregates = spindle_buffer_aggregates\n self.spindle_buffer_means = spindle_buffer_means\n print('Done. Spindles aggregated by channel in obj.spindle_buffer_aggregates dict. Spindle statisics stored in obj.spindle_buffer_means dataframe.')",
"def get_power_native_binning(self, mean_fluxes):\n mf = None\n flux_arr = np.array([])\n for (i,ss) in enumerate(self.spectrae):\n if mean_fluxes is not None:\n mf = mean_fluxes[i]\n kf_sim, flux_power_sim = ss.get_flux_power_1D(\"H\",1,1215, mean_flux_desired=mf)\n #Store k_F in comoving Mpc/h units, so that it is independent of redshift.\n vscale = ss.velfac * 3.085678e24/ss.units.UnitLength_in_cm\n kf_sim *= vscale\n ii = np.where(kf_sim <= self.maxk)\n flux_arr = np.append(flux_arr,flux_power_sim[ii])\n if self.kf is None:\n self.kf = kf_sim[ii]\n else:\n assert np.all(np.abs(kf_sim[ii]/self.kf - 1) < 1e-5)\n flux_arr = np.ravel(flux_arr)\n assert np.shape(flux_arr) == (self.len()*np.size(self.kf),)\n self.drop_table()\n return flux_arr",
"def find_mvpa_bouts(self, min_dur=10, breaktime=2):\n\n print(\"\\nFinding MVPA activity bouts with minimum duration of {} minutes with a \"\n \"{}-minute break allowed...\".format(min_dur, breaktime))\n\n # Finds longest MVPA bout (no breaks)\n longest = 0\n current = 0\n for num in [i for i in self.df_epoch[\"Wrist_Intensity\"]]:\n if num >= 2:\n current += 1\n else:\n longest = max(longest, current)\n current = 0\n\n print(\"-No {}-minute bouts founds.\".format(min_dur))\n print(\"-Longest MVPA bout was {} minutes.\".format(longest * 15 / 60))",
"def vol_run_bar(data,ET_window,bt1_window,pos_vol_window,neg_vol_window,warm_up_len=100):\n data = _preprocess(data, need_vol=True)\n b_t = _direction(data[\"price\"])\n vol = data[\"vol\"]\n N = data.shape[0]\n\n # initialize E_T, P(b_t=1), E[v_t|b_t==1], E[v_t|b_t==-1]\n t0 = warm_up_len\n E_T = t0\n\n P_bt1 = np.count_nonzero(b_t[:t0]==1)/t0\n P_bt1_vec = [P_bt1]\n\n pos_loc = np.where(b_t[:t0]==1)[0]\n if len(pos_loc) == 0:\n pos_vol_vec = [0]\n else:\n pos_vol_vec = [np.mean(vol[pos_loc].values)]\n\n neg_loc = np.where(b_t[:t0]==1)[0]\n if len(neg_loc) == 0:\n neg_vol_vec = [0]\n else:\n neg_vol_vec = [np.mean(vol[neg_loc].values)]\n E_theta = E_T * max(pos_vol_vec[-1]*P_bt1, neg_vol_vec[-1]*(1-P_bt1))\n \n bar = []\n bar_len = 0\n pos_vol,neg_vol = 0, 0\n pos_cnt,neg_cnt = 0, 0\n increment = 0\n # start updating\n for i in range(N):\n if b_t[i] == 1:\n pos_vol += vol[i]\n pos_cnt += 1\n elif b_t[i] == -1:\n neg_vol += vol[i]\n neg_cnt += 1\n increment += 1\n\n if max(pos_vol,neg_vol) >= E_theta: # max(pos_cnt,neg_cnt) is theta_t\n bar.append(increment) # in this scenario we sample a bar\n bar_len += 1\n\n P_bt1_vec.append(pos_cnt/increment)\n pos_vol_vec.append(pos_vol/increment)\n neg_vol_vec.append(neg_vol/increment)\n\n pos_cnt, neg_cnt = 0, 0 # reset \\sum_{b_t==1} b_t to 0\n pos_vol, neg_vol = 0, 0 # reset \\sum_{b_t==1} v_t to 0\n increment = 0\n # recalculate E_theta\n E_T = _EMA(bar, ET_window)[-1]\n P_bt1 = _EMA(P_bt1_vec, bt1_window)[-1]\n E_v_bt_pos = _EMA(pos_vol_vec,pos_vol_window)[-1]\n E_v_bt_neg = _EMA(neg_vol_vec,neg_vol_window)[-1]\n E_theta = E_T * max(E_v_bt_pos*P_bt1, E_v_bt_neg * (1 - P_bt1))\n bar.append(data.shape[0] - sum(bar))\n result = _bar2df(bar,data)\n return result",
"def ewm(dataArray):\r\n\r\n # normalized = np.zeros(dataArray.shape)\r\n starting_means = np.mean(dataArray[:init_block_size])\r\n starting_var = np.var(dataArray[:init_block_size])\r\n averages = np.copy(starting_means)\r\n variances = np.copy(starting_var)\r\n\r\n for i in range(0, len(dataArray)):\r\n # for the first samples, there are not enough previous samples to warrant an exponential weighted averaging\r\n # simply substract the true average of the first samples\r\n if i < init_block_size:\r\n dataArray[i] = (dataArray[i] - starting_means) / np.maximum(eps, np.sqrt(starting_var))\r\n else:\r\n #update the rolling mean and variance\r\n averages = 0.999 * averages + 0.001 * dataArray[i]\r\n variances = 0.999 * variances + 0.001 * (np.square(dataArray[i] - averages))\r\n\r\n dataArray[i] = (dataArray[i] - averages) / np.maximum(eps, np.sqrt(variances)) \r\n\r\n return dataArray",
"def pwcmtm(t, w, t_binning):\n\n\n # computing the optimal solution of the least squares approximation \\hat\\beta\n hat_beta= np.array([np.mean(w[t_binning == i]) for i in range(max(t_binning) + 1)])\n \n # reconstructing the (S . \\hat\\beta) vector (the vector approximating w)\n hat_w= hat_beta[t_binning]\n \n # computing the measure\n return np.sum((w - hat_w)**2)/(np.var(w)*len(w))",
"def sample_maxwell_boltzmann_velocity_distribution(v_thermal, num_velocities):\n a = v_thermal / np.sqrt(2) # shape parameter of distribution\n\n maxwell = stats.maxwell\n\n speeds = maxwell.rvs(loc=0, scale=a, size=num_velocities) # generate speeds\n theta = np.random.rand(num_velocities) * 2 * np.pi # select random angle\n\n x_vels = speeds * np.sin(theta)\n y_vels = speeds * np.cos(theta)\n\n return np.stack((x_vels, y_vels))",
"def binning(S, bands):\n B = np.zeros((S.shape[0], len(bands)), dtype=S.dtype)\n for i, b in enumerate(bands):\n B[:, i] = np.mean(S[:, b[0] : b[1]], axis=1)\n\n return B",
"def bin_output(emg, bin_size, overlap = 0):\n win_d = bin_size - overlap\n n_bins = int(((emg.shape[0]-bin_size)/win_d)+1)\n EMGbin = np.empty([n_bins, emg.shape[1]])\n for i in range(n_bins):\n EMGbin[i,:] = np.mean(emg[i*win_d:i*win_d+bin_size,:], axis = 0)\n \n return EMGbin",
"def bsm_vega(S0, K, T, r, sigma):\n \n from math import log, sqrt\n from scipy import stats\n \n S0 = float(S0)\n d1 = (log(S0 / K) + (r + 0.5 * sigma ** 2) * T / (sigma * sqrt(T))\n vega = S0 * stats.normcdf(d1, 0.0, 1.0) * sqrt(T)\n return vega\n \n# Implied volatility function\n\ndef bsm_call_imp_vol(S0, K, T, r, C0, sigma_est, it = 100):\n \"\"\" \n Implied volatility of European call option in BSM model\n \n Parameters\n ==========\n S0 : Float\n Initial stock/index level\n K : Float\n Strike Price\n T : Float\n Maturity Date (in year fractions)\n r : Float\n Constant risk-free short rate\n sigma_est : Float\n Estimate of impl. volatility\n it : integer\n Number of iterations\n \n Returns\n =======\n sigma_est : Float\n Numerically estimated implied volatility\n \"\"\"\n for i in range(it):\n sigma_est -= ((bsm_call_value(S0, K, T, r, sigma_est) - C0)\n / bsm_vega(S0, K, T, r, sigma_est))\n return sigma_est",
"def vwap(high, low, close, volumes, interval):\n\n # calculate prices data for each day\n prices = (high + low + close) / 3\n\n # declare empty VWAP numpy array\n p = np.zeros((prices.shape[0] - interval))\n\n # calculate the value of each point in the VWAP array\n for t in range(0, p.shape[0]):\n p[t] = np.sum(prices[t:t + interval]*volumes[t:t + interval]) / np.sum(volumes[t:t + interval])\n\n return p",
"def vwap(high, low, close, volumes, interval):\n\n # calculate prices data for each day\n prices = (high + low + close) / 3\n\n # declare empty VWAP numpy array\n p = np.zeros((prices.shape[0] - interval))\n\n # calculate the value of each point in the VWAP array\n for t in range(0, p.shape[0]):\n p[t] = np.sum(prices[t:t + interval]*volumes[t:t + interval]) / np.sum(volumes[t:t + interval])\n\n return p",
"def specmod(self, dmbin, tbin, bgwindow=4):\n\n# smarr = n.zeros(len(self.dataph)) # uncomment to do specmod lightcurve\n# for int in range(len(self.dataph)-bgwindow):\n diff = self.tracksub(dmbin, tbin, bgwindow=bgwindow)\n bfspec = diff.mean(axis=0).real # should be ok for multipol data...\n sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )\n\n return sm",
"def apply_transmission(self, slamb, sflux):\n mean, samples = self._get_mean_and_samples_attribute('apply_transmission')\n mean_val = mean(slamb, sflux)\n samp_val = [sk(slamb, sflux) for sk in samples]\n return mean_val, samp_val",
"def mass_per_bin(self, time_edges, sample_rate=25):\n\n return mass_per_bin(self._sfh_calculator, time_edges, sample_rate=sample_rate)",
"def specmod(self, dmbin, tbin, bgwindow=4):\n\n# smarr = n.zeros(len(self.dataph)) # uncomment to do specmod lightcurve\n# for int in range(len(self.dataph)-bgwindow):\n bfspec = self.dedisperse(dmbin)[tbin].mean(axis=0).real\n sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )\n\n return sm",
"def mean_vol(df):\n return df.tail(5)['volume'].mean(), df.tail(20)['volume'].mean()",
"def draw_bs_reps_mean(data, size=1):\n out = np.empty(size)\n for i in range(size):\n out[i] = np.mean(draw_bs_sample(data))\n return out",
"def blockMean(self, region_width, scale=0.95, down=True,thresholdFromLocalBlocks=True):\n region_width = boof_fixed_length(region_width)\n\n java_object = pbg.gateway.jvm.boofcv.factory.filter.binary.FactoryThresholdBinary. \\\n blockMean(region_width,float(scale),down,thresholdFromLocalBlocks,self.boof_image_type)\n return InputToBinary(java_object)",
"def specmod(self, tbin, bgwindow=4):\n\n diff = self.tracksub(tbin, bgwindow=bgwindow)\n bfspec = diff.mean(axis=0).real # should be ok for multipol data...\n sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )\n\n return sm",
"def compute_bias(ics, vbc):\n import os, time\n from seren3.array import SimArray\n \n # Compute size of grid and boxsize (for this patch)\n N = vbc.shape[0]\n boxsize = ics.boxsize.in_units(\"Mpc a h**-1\") * (float(N) / float(ics.header.N))\n\n # Compute vbc @ z=1000\n z = ics.z\n rms = vbc_rms(vbc)\n rms_recom = rms * (1001./z)\n\n # Check for PS and run CICsASS if needed\n fname_vbc0 = vbc_ps_fname(0., z, boxsize)\n if not os.path.isfile(fname_vbc0):\n exit_code = run_cicsass(boxsize, z, 0., fname_vbc0)\n\n fname_vbcrecom = vbc_ps_fname(rms_recom, z, boxsize)\n if not os.path.isfile(fname_vbcrecom):\n exit_code = run_cicsass(boxsize, z, rms_recom, fname_vbcrecom)\n\n # Load power spectra and compute bias\n ps_vbc0 = np.loadtxt(fname_vbc0, unpack=True)\n ps_vbcrecom = np.loadtxt(fname_vbcrecom, unpack=True)\n\n # Should have same lenghts if finished writing\n count = 0\n while len(ps_vbcrecom[1]) != len(ps_vbc0[1]):\n count += 1\n if count > 10:\n raise Exception(\"Reached sleep limit. Filesizes still differ\")\n time.sleep(5)\n ps_vbc0 = np.loadtxt(fname_vbc0, unpack=True)\n ps_vbcrecom = np.loadtxt(fname_vbcrecom, unpack=True)\n\n #CDM bias\n b_cdm = ps_vbcrecom[1] / ps_vbc0[1]\n # Baryon bias\n b_b = ps_vbcrecom[2] / ps_vbc0[2]\n # Wavenumber\n k_bias = SimArray(ps_vbcrecom[0] / ics.cosmo[\"h\"], \"h Mpc**-1\")\n\n return k_bias, b_cdm, b_b",
"def calc_spindle_means(self):\n\n print('Aligning spindles...')\n # align spindles accoridng to timedelta & combine into single dataframe\n spindle_aggregates = {}\n datatypes = ['Raw', 'spfilt']\n for chan in self.spindles.keys():\n # only use channels that have spindles\n if self.spindles[chan]:\n spindle_aggregates[chan] = {}\n for datatype in datatypes:\n # set the base df\n agg_df = pd.DataFrame(self.spindles[chan][0][datatype])\n agg_df = agg_df.rename(columns={datatype:'spin_0'})\n rsuffix = list(range(1, len(self.spindles[chan])))\n # join on the index for each spindle\n agg_df = agg_df.join([self.spindles[chan][x][datatype].rename('spin_'+str(x)) for x in rsuffix], how='outer')\n spindle_aggregates[chan][datatype] = agg_df\n \n print('Calculating spindle statistics...')\n # create a new multiindex dataframe for calculations\n spindle_means = {}\n calcs = ['count', 'mean', 'std' ,'sem']\n tuples = [(chan, calc) for chan in spindle_aggregates.keys() for calc in calcs]\n columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])\n for datatype in datatypes:\n spindle_means[datatype] = pd.DataFrame(columns=columns)\n # fill the dataframe\n for chan in spindle_aggregates.keys():\n spindle_means[datatype][(chan, 'count')] = spindle_aggregates[chan][datatype].notna().sum(axis=1)\n spindle_means[datatype][(chan, 'mean')] = spindle_aggregates[chan][datatype].mean(axis=1)\n spindle_means[datatype][(chan, 'std')] = spindle_aggregates[chan][datatype].std(axis=1)\n spindle_means[datatype][(chan, 'sem')] = spindle_aggregates[chan][datatype].sem(axis=1)\n \n self.spindle_aggregates = spindle_aggregates\n self.spindle_means = spindle_means\n print('Done. Spindles aggregated by channel in obj.spindle_aggregates dict. Spindle statisics stored in obj.spindle_means dataframe.\\n')",
"def mu_law_bins(num_bins):\n #all edges\n bins_edge = np.linspace(-1, 1, num_bins + 1)\n #center of all edges\n bins_center = np.linspace(-1 + 1.0 / num_bins, 1 - 1.0 / num_bins, num_bins)\n #get the right edges\n bins_trunc = bins_edge[1:]\n #if sample >= right edges, it might be assigned to the next bin, add 0.1 to avoid this\n bins_trunc[-1] += 0.1\n #convert edges and centers to mu-law scale\n bins_edge_mu = np.multiply(np.sign(bins_trunc), (num_bins ** np.absolute(bins_trunc) - 1) / (num_bins - 1))\n bins_center_mu = np.multiply(np.sign(bins_center), (num_bins ** np.absolute(bins_center) - 1) / (num_bins - 1))\n \n return (bins_edge_mu, bins_center_mu)",
"def BoosterFlux(E,mbparam):\n flux_data = np.array(LoadBoosterFlux(mbparam))\n E_lo = flux_data[:,0]*mbparam.GeV\n E_hi = flux_data[:,1]*mbparam.GeV\n \n nu_mu = flux_data[:,2]/(50.0*mbparam.MeV) # conv. scale to eV\n nu_mub = flux_data[:,3]/(50.0*mbparam.MeV) # conv. scale to eV\n \n nu_e = flux_data[:,4]/(50.0*mbparam.MeV) # conv. scale to eV\n nu_eb = flux_data[:,5]/(50.0*mbparam.MeV) # conv. scale to eV\n \n for i,EE in enumerate(E_lo):\n if E >= E_lo[i] and E < E_hi[i]:\n return [nu_e[i],nu_eb[i],nu_mu[i],nu_mub[i]]\n else :\n pass\n \n return [0.0,0.0,0.0,0.0]"
]
| [
"0.7186321",
"0.50017744",
"0.48987573",
"0.48254302",
"0.47455582",
"0.47400346",
"0.4675963",
"0.45459038",
"0.45336497",
"0.45286122",
"0.45276594",
"0.45231518",
"0.44899863",
"0.44832665",
"0.44780084",
"0.44775733",
"0.44637537",
"0.44637537",
"0.44610992",
"0.4456314",
"0.44534382",
"0.44265452",
"0.4400225",
"0.44002134",
"0.4371822",
"0.4370521",
"0.43679556",
"0.43647504",
"0.4351145",
"0.43504992"
]
| 0.6702398 | 1 |
Compute Mean Volume Backscattering Strength (MVBS) based on intervals of ``range_sample`` and ping number (``ping_num``) specified in index number. Output of this function differs from that of ``compute_MVBS``, which computes binaveraged Sv according to intervals of range (``echo_range``) and ``ping_time`` specified in physical units. | def compute_MVBS_index_binning(ds_Sv, range_sample_num=100, ping_num=100):
da_sv = 10 ** (ds_Sv["Sv"] / 10) # average should be done in linear domain
da = 10 * np.log10(
da_sv.coarsen(ping_time=ping_num, range_sample=range_sample_num, boundary="pad").mean(
skipna=True
)
)
# Attach attributes and coarsened echo_range
da.name = "Sv"
ds_MVBS = da.to_dataset()
ds_MVBS.coords["range_sample"] = (
"range_sample",
np.arange(ds_MVBS["range_sample"].size),
{"long_name": "Along-range sample number, base 0"},
) # reset range_sample to start from 0
ds_MVBS["echo_range"] = (
ds_Sv["echo_range"]
.coarsen( # binned echo_range (use first value in each average bin)
ping_time=ping_num, range_sample=range_sample_num, boundary="pad"
)
.min(skipna=True)
)
_set_MVBS_attrs(ds_MVBS)
ds_MVBS["Sv"] = ds_MVBS["Sv"].assign_attrs(
{
"cell_methods": (
f"ping_time: mean (interval: {ping_num} pings "
"comment: ping_time is the interval start) "
f"range_sample: mean (interval: {range_sample_num} samples along range "
"comment: range_sample is the interval start)"
),
"comment": "MVBS binned on the basis of range_sample and ping number specified as index numbers", # noqa
"binning_mode": "sample number",
"range_sample_interval": f"{range_sample_num} samples along range",
"ping_interval": f"{ping_num} pings",
"actual_range": [
round(float(ds_MVBS["Sv"].min().values), 2),
round(float(ds_MVBS["Sv"].max().values), 2),
],
}
)
prov_dict = echopype_prov_attrs(process_type="processing")
prov_dict["processing_function"] = "commongrid.compute_MVBS_index_binning"
ds_MVBS = ds_MVBS.assign_attrs(prov_dict)
ds_MVBS["frequency_nominal"] = ds_Sv["frequency_nominal"] # re-attach frequency_nominal
ds_MVBS = insert_input_processing_level(ds_MVBS, input_ds=ds_Sv)
return ds_MVBS | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_MVBS(ds_Sv, range_meter_bin=20, ping_time_bin=\"20S\"):\n\n # create bin information for echo_range\n range_interval = np.arange(0, ds_Sv[\"echo_range\"].max() + range_meter_bin, range_meter_bin)\n\n # create bin information needed for ping_time\n ping_interval = (\n ds_Sv.ping_time.resample(ping_time=ping_time_bin, skipna=True).asfreq().ping_time.values\n )\n\n # calculate the MVBS along each channel\n MVBS_values = get_MVBS_along_channels(ds_Sv, range_interval, ping_interval)\n\n # create MVBS dataset\n ds_MVBS = xr.Dataset(\n data_vars={\"Sv\": ([\"channel\", \"ping_time\", \"echo_range\"], MVBS_values)},\n coords={\n \"ping_time\": ping_interval,\n \"channel\": ds_Sv.channel,\n \"echo_range\": range_interval[:-1],\n },\n )\n\n # TODO: look into why 'filenames' exist here as a variable\n # Added this check to support the test in test_process.py::test_compute_MVBS\n if \"filenames\" in ds_MVBS.variables:\n ds_MVBS = ds_MVBS.drop_vars(\"filenames\")\n\n # ping_time_bin parsing and conversions\n # Need to convert between pd.Timedelta and np.timedelta64 offsets/frequency strings\n # https://xarray.pydata.org/en/stable/generated/xarray.Dataset.resample.html\n # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.resample.html\n # https://pandas.pydata.org/docs/reference/api/pandas.Timedelta.html\n # https://pandas.pydata.org/docs/reference/api/pandas.Timedelta.resolution_string.html\n # https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects\n # https://numpy.org/devdocs/reference/arrays.datetime.html#datetime-units\n timedelta_units = {\n \"d\": {\"nptd64\": \"D\", \"unitstr\": \"day\"},\n \"h\": {\"nptd64\": \"h\", \"unitstr\": \"hour\"},\n \"t\": {\"nptd64\": \"m\", \"unitstr\": \"minute\"},\n \"min\": {\"nptd64\": \"m\", \"unitstr\": \"minute\"},\n \"s\": {\"nptd64\": \"s\", \"unitstr\": \"second\"},\n \"l\": {\"nptd64\": \"ms\", \"unitstr\": \"millisecond\"},\n \"ms\": {\"nptd64\": \"ms\", \"unitstr\": \"millisecond\"},\n \"u\": {\"nptd64\": \"us\", \"unitstr\": \"microsecond\"},\n \"us\": {\"nptd64\": \"ms\", \"unitstr\": \"millisecond\"},\n \"n\": {\"nptd64\": \"ns\", \"unitstr\": \"nanosecond\"},\n \"ns\": {\"nptd64\": \"ms\", \"unitstr\": \"millisecond\"},\n }\n ping_time_bin_td = pd.Timedelta(ping_time_bin)\n # res = resolution (most granular time unit)\n ping_time_bin_resunit = ping_time_bin_td.resolution_string.lower()\n ping_time_bin_resvalue = int(\n ping_time_bin_td / np.timedelta64(1, timedelta_units[ping_time_bin_resunit][\"nptd64\"])\n )\n ping_time_bin_resunit_label = timedelta_units[ping_time_bin_resunit][\"unitstr\"]\n\n # Attach attributes\n _set_MVBS_attrs(ds_MVBS)\n ds_MVBS[\"echo_range\"].attrs = {\"long_name\": \"Range distance\", \"units\": \"m\"}\n ds_MVBS[\"Sv\"] = ds_MVBS[\"Sv\"].assign_attrs(\n {\n \"cell_methods\": (\n f\"ping_time: mean (interval: {ping_time_bin_resvalue} {ping_time_bin_resunit_label} \" # noqa\n \"comment: ping_time is the interval start) \"\n f\"echo_range: mean (interval: {range_meter_bin} meter \"\n \"comment: echo_range is the interval start)\"\n ),\n \"binning_mode\": \"physical units\",\n \"range_meter_interval\": str(range_meter_bin) + \"m\",\n \"ping_time_interval\": ping_time_bin,\n \"actual_range\": [\n round(float(ds_MVBS[\"Sv\"].min().values), 2),\n round(float(ds_MVBS[\"Sv\"].max().values), 2),\n ],\n }\n )\n\n prov_dict = echopype_prov_attrs(process_type=\"processing\")\n prov_dict[\"processing_function\"] = \"commongrid.compute_MVBS\"\n ds_MVBS = ds_MVBS.assign_attrs(prov_dict)\n ds_MVBS[\"frequency_nominal\"] = ds_Sv[\"frequency_nominal\"] # re-attach frequency_nominal\n\n ds_MVBS = insert_input_processing_level(ds_MVBS, input_ds=ds_Sv)\n\n return ds_MVBS",
"def bb_apply_nb(ts, window, ewm, alpha, adjust, ddof, ma_cache_dict, mstd_cache_dict):\n # Calculate lower, middle and upper bands\n h = hash((window, ewm))\n ma = np.copy(ma_cache_dict[h])\n mstd = np.copy(mstd_cache_dict[h])\n # # (MA + Kσ), MA, (MA - Kσ)\n return ma, ma + alpha * mstd, ma - alpha * mstd",
"def calc_spindle_buffer_means(self):\n \n print('Aligning spindles...')\n # align spindles accoridng to timedelta & combine into single dataframe\n spindle_buffer_aggregates = {}\n for chan in self.spindles.keys():\n # only use channels that have spindles\n if self.spindles_wbuffer[chan]:\n # set the base df\n agg_df = pd.DataFrame(self.spindles_wbuffer[chan][0]['Raw'])\n rsuffix = list(range(1, len(self.spindles_wbuffer[chan])))\n # join on the index for each spindle\n for x in range(1, len(self.spindles_wbuffer[chan])):\n mean_df = agg_df.join(self.spindles_wbuffer[chan][x]['Raw'], how='outer', rsuffix=rsuffix[x-1])\n spindle_buffer_aggregates[chan] = mean_df\n \n print('Calculating statistics...')\n # create a new multiindex dataframe for calculations\n calcs = ['mean', 'std' ,'sem']\n tuples = [(chan, calc) for chan in spindle_buffer_aggregates.keys() for calc in calcs]\n columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])\n spindle_buffer_means = pd.DataFrame(columns=columns)\n \n # fill the dataframe\n for chan in spindle_buffer_aggregates.keys():\n spindle_buffer_means[(chan, 'mean')] = spindle_buffer_aggregates[chan].mean(axis=1)\n spindle_buffer_means[(chan, 'std')] = spindle_buffer_aggregates[chan].std(axis=1)\n spindle_buffer_means[(chan, 'sem')] = spindle_buffer_aggregates[chan].sem(axis=1)\n \n self.spindle_buffer_aggregates = spindle_buffer_aggregates\n self.spindle_buffer_means = spindle_buffer_means\n print('Done. Spindles aggregated by channel in obj.spindle_buffer_aggregates dict. Spindle statisics stored in obj.spindle_buffer_means dataframe.')",
"def calc_spindle_means(self):\n\n print('Aligning spindles...')\n # align spindles accoridng to timedelta & combine into single dataframe\n spindle_aggregates = {}\n datatypes = ['Raw', 'spfilt']\n for chan in self.spindles.keys():\n # only use channels that have spindles\n if self.spindles[chan]:\n spindle_aggregates[chan] = {}\n for datatype in datatypes:\n # set the base df\n agg_df = pd.DataFrame(self.spindles[chan][0][datatype])\n agg_df = agg_df.rename(columns={datatype:'spin_0'})\n rsuffix = list(range(1, len(self.spindles[chan])))\n # join on the index for each spindle\n agg_df = agg_df.join([self.spindles[chan][x][datatype].rename('spin_'+str(x)) for x in rsuffix], how='outer')\n spindle_aggregates[chan][datatype] = agg_df\n \n print('Calculating spindle statistics...')\n # create a new multiindex dataframe for calculations\n spindle_means = {}\n calcs = ['count', 'mean', 'std' ,'sem']\n tuples = [(chan, calc) for chan in spindle_aggregates.keys() for calc in calcs]\n columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])\n for datatype in datatypes:\n spindle_means[datatype] = pd.DataFrame(columns=columns)\n # fill the dataframe\n for chan in spindle_aggregates.keys():\n spindle_means[datatype][(chan, 'count')] = spindle_aggregates[chan][datatype].notna().sum(axis=1)\n spindle_means[datatype][(chan, 'mean')] = spindle_aggregates[chan][datatype].mean(axis=1)\n spindle_means[datatype][(chan, 'std')] = spindle_aggregates[chan][datatype].std(axis=1)\n spindle_means[datatype][(chan, 'sem')] = spindle_aggregates[chan][datatype].sem(axis=1)\n \n self.spindle_aggregates = spindle_aggregates\n self.spindle_means = spindle_means\n print('Done. Spindles aggregated by channel in obj.spindle_aggregates dict. Spindle statisics stored in obj.spindle_means dataframe.\\n')",
"def bbands(price, length=30, numsd=2):\n ave = pd.stats.moments.rolling_mean(price,length)\n sd = pd.stats.moments.rolling_std(price,length)\n upband = ave + (sd*numsd)\n dnband = ave - (sd*numsd)\n return np.round(upband,3), np.round(dnband,3)",
"def mass_per_bin(self, time_edges, sample_rate=25):\n\n return mass_per_bin(self._sfh_calculator, time_edges, sample_rate=sample_rate)",
"def sample_maxwell_boltzmann_velocity_distribution(v_thermal, num_velocities):\n a = v_thermal / np.sqrt(2) # shape parameter of distribution\n\n maxwell = stats.maxwell\n\n speeds = maxwell.rvs(loc=0, scale=a, size=num_velocities) # generate speeds\n theta = np.random.rand(num_velocities) * 2 * np.pi # select random angle\n\n x_vels = speeds * np.sin(theta)\n y_vels = speeds * np.cos(theta)\n\n return np.stack((x_vels, y_vels))",
"def mu_law_bins(num_bins):\n #all edges\n bins_edge = np.linspace(-1, 1, num_bins + 1)\n #center of all edges\n bins_center = np.linspace(-1 + 1.0 / num_bins, 1 - 1.0 / num_bins, num_bins)\n #get the right edges\n bins_trunc = bins_edge[1:]\n #if sample >= right edges, it might be assigned to the next bin, add 0.1 to avoid this\n bins_trunc[-1] += 0.1\n #convert edges and centers to mu-law scale\n bins_edge_mu = np.multiply(np.sign(bins_trunc), (num_bins ** np.absolute(bins_trunc) - 1) / (num_bins - 1))\n bins_center_mu = np.multiply(np.sign(bins_center), (num_bins ** np.absolute(bins_center) - 1) / (num_bins - 1))\n \n return (bins_edge_mu, bins_center_mu)",
"def rms_smoothing( values, samples=100 ):\n rms = []\n rng = int(samples/2) # Sample used for Smoothing\n for i,x in enumerate( values ): \n lo = i-rng if i-rng > 0 else 0\n hi = i+rng\n rms.append( rootMeanSquareValueOf( values[ lo : hi] ))\n return rms",
"def burst_time(temp, bstart, bstop):\n to_sum = []\n for b in range(len(bstart)):\n if bstop[b]-bstart[b] >= 0:\n to_sum.append(bstop[b]-bstart[b])\n elif bstop[b]-bstart[b] < 0 and b == len(bstop)+1: # Make it go to end\n to_sum.append(temp['length']/1000.-bstart[b])\n else:\n pass\n return np.mean(to_sum), np.std(to_sum)/np.mean(to_sum), sum(to_sum)/(temp['length']/1000.)",
"def ma_nb(a, window, ewm, adjust=False):\n if ewm:\n return generic_nb.ewm_mean_nb(a, window, minp=window, adjust=adjust)\n return generic_nb.rolling_mean_nb(a, window, minp=window)",
"def specmod(self, dmbin, tbin, bgwindow=4):\n\n# smarr = n.zeros(len(self.dataph)) # uncomment to do specmod lightcurve\n# for int in range(len(self.dataph)-bgwindow):\n bfspec = self.dedisperse(dmbin)[tbin].mean(axis=0).real\n sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )\n\n return sm",
"def specmod(self, dmbin, tbin, bgwindow=4):\n\n# smarr = n.zeros(len(self.dataph)) # uncomment to do specmod lightcurve\n# for int in range(len(self.dataph)-bgwindow):\n diff = self.tracksub(dmbin, tbin, bgwindow=bgwindow)\n bfspec = diff.mean(axis=0).real # should be ok for multipol data...\n sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )\n\n return sm",
"def paramSamples(self):\n\n if self._paramSamples is not None:\n return self._paramSamples\n timescale = self.mjdmax - self.mjdmin\n T0Vals = self.randomState.uniform(size=self.numSN) * timescale \\\n + self.mjdmin\n mB, x1, c, m = SALT2_MMDist(self.numSN)\n print(\"range of sampled mB\", mB.min(), mB.max())\n x0 = np.zeros(len(mB))\n mB += self.randomState.normal(loc=0., scale=self.Mdisp,\n size=self.numSN)\n H70cosmo = self.cosmo.clone(name='H70cosmo',\n H0=self.cosmo.H0 * (70/self.cosmo.H0.value))\n MB = mB + H70cosmo.distmod(self.zSamples).value - \\\n self.cosmo.distmod(self.zSamples).value\n model = sncosmo.Model(source='SALT2')\n for i, z in enumerate(self.zSamples):\n model.set(z=z, x1=x1[i], c=c[i])\n model.set_source_peakabsmag(MB[i], 'bessellB', 'ab',\n cosmo=self.cosmo)\n x0[i] = model.get('x0')\n mB[i] = model.source.peakmag('bessellB', 'ab')\n df = pd.DataFrame(dict(x0=x0, mB=mB, x1=x1, c=c,\n t0=T0Vals, z=self.zSamples, snid=self.snids))\n self._paramSamples = df\n return self._paramSamples",
"def apply_transmission(self, slamb, sflux):\n mean, samples = self._get_mean_and_samples_attribute('apply_transmission')\n mean_val = mean(slamb, sflux)\n samp_val = [sk(slamb, sflux) for sk in samples]\n return mean_val, samp_val",
"def blockMean(self, region_width, scale=0.95, down=True,thresholdFromLocalBlocks=True):\n region_width = boof_fixed_length(region_width)\n\n java_object = pbg.gateway.jvm.boofcv.factory.filter.binary.FactoryThresholdBinary. \\\n blockMean(region_width,float(scale),down,thresholdFromLocalBlocks,self.boof_image_type)\n return InputToBinary(java_object)",
"def binning(S, bands):\n B = np.zeros((S.shape[0], len(bands)), dtype=S.dtype)\n for i, b in enumerate(bands):\n B[:, i] = np.mean(S[:, b[0] : b[1]], axis=1)\n\n return B",
"def sm_measure_voltage(self,num_readings=1):\n self.sm.set_measurement_function(\"VOLTAGE\")\n self.sm.format_readings(\"VOLTAGE\")\n return average(self.sm.take_measurement(num_readings))",
"def ewm(dataArray):\r\n\r\n # normalized = np.zeros(dataArray.shape)\r\n starting_means = np.mean(dataArray[:init_block_size])\r\n starting_var = np.var(dataArray[:init_block_size])\r\n averages = np.copy(starting_means)\r\n variances = np.copy(starting_var)\r\n\r\n for i in range(0, len(dataArray)):\r\n # for the first samples, there are not enough previous samples to warrant an exponential weighted averaging\r\n # simply substract the true average of the first samples\r\n if i < init_block_size:\r\n dataArray[i] = (dataArray[i] - starting_means) / np.maximum(eps, np.sqrt(starting_var))\r\n else:\r\n #update the rolling mean and variance\r\n averages = 0.999 * averages + 0.001 * dataArray[i]\r\n variances = 0.999 * variances + 0.001 * (np.square(dataArray[i] - averages))\r\n\r\n dataArray[i] = (dataArray[i] - averages) / np.maximum(eps, np.sqrt(variances)) \r\n\r\n return dataArray",
"def draw_bs_reps_mean(data, size=1):\n out = np.empty(size)\n for i in range(size):\n out[i] = np.mean(draw_bs_sample(data))\n return out",
"def specmod(self, tbin, bgwindow=4):\n\n diff = self.tracksub(tbin, bgwindow=bgwindow)\n bfspec = diff.mean(axis=0).real # should be ok for multipol data...\n sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )\n\n return sm",
"def sample_vMF(mu, kappa, num_samples):\n dim = len(mu)\n result = np.zeros((num_samples, dim))\n for nn in range(num_samples):\n # sample offset from center (on sphere) with spread kappa\n w = _sample_weight(kappa, dim)\n\n # sample a point v on the unit sphere that's orthogonal to mu\n v = _sample_orthonormal_to(mu)\n\n # compute new point\n result[nn, :] = v * np.sqrt(1. - w**2) + w * mu\n\n return result",
"def calc_spin_tstats(self, spin_range):\n \n print('Calculating spindle time-domain statistics...')\n \n # create multi-index dataframe\n # lvl1 = ['Count', 'Duration', 'Duration', 'Amplitude_raw', 'Amplitude_raw', 'Amplitude_spfilt', 'Amplitude_spfilt', 'Density', 'ISI', 'ISI', 'Power', 'Power']\n # lvl2 = ['total', 'mean', 'sd', 'rms', 'sd', 'rms', 'sd', 'spin_per_min', 'mean', 'sd', 'center_freq', 'total_pwr']\n lvl1 = ['Count', 'Duration', 'Duration', 'Amplitude_raw', 'Amplitude_raw', 'Amplitude_spfilt', 'Amplitude_spfilt', 'Density', 'ISI', 'ISI']\n lvl2 = ['total', 'mean', 'sd', 'rms', 'sd', 'rms', 'sd', 'spin_per_min', 'mean', 'sd']\n columns = pd.MultiIndex.from_arrays([lvl1, lvl2])\n spindle_stats = pd.DataFrame(columns=columns)\n \n #exclude non-EEG channels\n exclude = ['EOG_L', 'EOG_R', 'EKG']\n\n # fill dataframe\n for chan in self.spindles:\n if chan not in exclude:\n # calculate spindle count\n count = len(self.spindles[chan])\n \n if count == 0:\n spindle_stats.loc[chan] = [count, None, None, None, None, None, None, None]\n \n else:\n # calculate spindle duration\n durations = np.array([(self.spindles[chan][spin].time.iloc[-1] - self.spindles[chan][spin].time.iloc[0]).total_seconds() for spin in self.spindles[chan]])\n duration_mean = durations.mean()\n duration_sd = durations.std()\n\n # calculate amplitude\n amplitudes_raw = np.concatenate([self.spindles[chan][x].Raw.values for x in self.spindles[chan]])\n amp_rms_raw = np.sqrt(np.array([x**2 for x in amplitudes_raw]).mean())\n amp_sd_raw = amplitudes_raw.std()\n amplitudes_spfilt = np.concatenate([self.spindles[chan][x].spfilt.values for x in self.spindles[chan]])\n amp_rms_spfilt = np.sqrt(np.array([x**2 for x in amplitudes_spfilt]).mean())\n amp_sd_spfilt = amplitudes_spfilt.std()\n\n # calculate density\n density = count/((self.data.index[-1] - self.data.index[0]).total_seconds()/60)\n\n # calculate inter-spindle-interval (ISI) --> NOT ACCURATE FOR 2HR BLOCKS\n isi_arr = np.array([(self.spindles[chan][x+1].time.iloc[0] - self.spindles[chan][x].time.iloc[-1]).total_seconds() for x in self.spindles[chan] if x < len(self.spindles[chan])-1])\n isi_mean = isi_arr.mean()\n isi_sd = isi_arr.std()\n\n # calculate center frequency & total spindle power\n # spindle_power = self.spindle_psd_norm[chan]['normed_pwr'][(self.spindle_psd[chan].index >= spin_range[0]) & (self.spindle_psd[chan].index <= spin_range[1])]\n # center_freq = spindle_power.idxmax()\n # total_pwr = spindle_power.sum()\n\n spindle_stats.loc[chan] = [count, duration_mean, duration_sd, amp_rms_raw, amp_sd_raw, amp_rms_spfilt, amp_sd_spfilt, density, isi_mean, isi_sd]\n # spindle_stats.loc[chan] = [count, duration_mean, duration_sd, amp_rms_raw, amp_sd_raw, amp_rms_spfilt, amp_sd_spfilt, density, isi_mean, isi_sd, center_freq, total_pwr]\n\n self.spindle_tstats = spindle_stats \n \n print('Spindle time stats stored in obj.spindle_tstats.\\n')",
"def ewm_mean_nb(a, span, minp=None, adjust=False):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = ewm_mean_1d_nb(a[:, col], span, minp=minp, adjust=adjust)\n return out",
"def boost(sample):\n assert isinstance(sample, AudioSegment)\n\n # get the raw audio\n yield 1\n track_raw = sample.get_array_of_samples()\n\n # as list\n yield 2\n track_raw = list(track_raw)\n\n # c-value\n yield 3\n est_mean = np.mean(track_raw)\n\n # a-value\n yield 4\n est_std = 3.0 * np.std(track_raw) / (math.sqrt(2))\n\n yield 5\n bass_factor = int(round((est_std - est_mean) * 0.005))\n\n yield 6\n filtered = sample.low_pass_filter(bass_factor)\n\n yield 7\n combined = (sample - attenuate_db).overlay(filtered + accentuate_db)\n yield combined",
"async def update_mas(self, pair: str):\n\n for window in config['ma_windows']:\n try:\n num = self.last_update_nums[pair]\n source = self.adjusted_close_values[pair]\n ma = self.source_close_value_mas[pair][window]\n source_len = len(source)\n\n for index in range(source_len - num, source_len):\n average = sum(source[index - window:index]) / window\n ma.append(average)\n\n truncate = len(ma) - self.min_tick_length\n if truncate > 60:\n del ma[:truncate]\n\n self.close_value_mas[pair][window] = ma\n\n except IndexError:\n self.log.error('Cannot update MA {} for {} with data length of {}!',\n window, pair, len(self.adjusted_close_values[pair]))\n\n for window in config['vdma_windows']:\n try:\n num = self.last_update_nums[pair]\n source = self.base_24hr_volumes[pair][1]\n ma = self.volume_deriv_mas[pair][window]\n source_len = len(source)\n\n for index in range(source_len - num, source_len):\n average = sum(source[index - window:index]) / window\n ma.append(average)\n\n truncate = len(ma) - self.min_tick_length\n if truncate > 60:\n del ma[:truncate]\n\n except IndexError:\n self.log.error('Cannot update VDMA {} for {} with data length of {}!',\n window, pair, len(self.base_24hr_volumes[pair][1]))\n\n self.log.debug('{} Updated moving averages.', pair, verbosity=1)",
"def bsm_vega(S0, K, T, r, sigma):\n \n from math import log, sqrt\n from scipy import stats\n \n S0 = float(S0)\n d1 = (log(S0 / K) + (r + 0.5 * sigma ** 2) * T / (sigma * sqrt(T))\n vega = S0 * stats.normcdf(d1, 0.0, 1.0) * sqrt(T)\n return vega\n \n# Implied volatility function\n\ndef bsm_call_imp_vol(S0, K, T, r, C0, sigma_est, it = 100):\n \"\"\" \n Implied volatility of European call option in BSM model\n \n Parameters\n ==========\n S0 : Float\n Initial stock/index level\n K : Float\n Strike Price\n T : Float\n Maturity Date (in year fractions)\n r : Float\n Constant risk-free short rate\n sigma_est : Float\n Estimate of impl. volatility\n it : integer\n Number of iterations\n \n Returns\n =======\n sigma_est : Float\n Numerically estimated implied volatility\n \"\"\"\n for i in range(it):\n sigma_est -= ((bsm_call_value(S0, K, T, r, sigma_est) - C0)\n / bsm_vega(S0, K, T, r, sigma_est))\n return sigma_est",
"def gibbs_vhv(self, v0_sample):\n # t1 = timeit.default_timer()\n h1_mean, h1_sample = self.sample_h_given_v(v0_sample)\n # t2 = timeit.default_timer()\n v1_mean, v1_sample = self.sample_v_given_h(h1_sample)\n # t3 = timeit.default_timer()\n # print('gibbsvhv time: sample_h_given_v ', t2 - t1, ' sec') # 0.000492095947266 sec\n # print('gibbsvhv time: sample_v_given_h ', t3 - t2, ' sec') # 0.000334024429321 sec\n return h1_mean, h1_sample, v1_mean, v1_sample",
"def vbmstep(self):\n for k in range(self.k):\n self.beta_k[k] = self.beta_0 + self.counts[k]\n self.m_k[k] = (1 / self.beta_k[k]) * (self.beta_0 * self.m_0 +\n self.counts[k] * self.means[k])\n\n tmp = (self.beta_0 * self.counts[k]) / (self.beta_0 + self.counts[k])\n tmp2 = (self.means[k] - self.m_0)\n tmp = np.linalg.inv(self.W_0) + self.counts[k] * self.covars[k] + tmp * tmp2 @ tmp2.T\n self.w_k[k] = np.linalg.inv(tmp)\n self.nu_k[k] = self.nu_0 + self.counts[k]\n self.alpha_k[k] = self.alpha_0[k] + self.counts[k]",
"def mean_vol(df):\n return df.tail(5)['volume'].mean(), df.tail(20)['volume'].mean()"
]
| [
"0.67099965",
"0.4966635",
"0.49315238",
"0.47369355",
"0.4736683",
"0.47173724",
"0.4700994",
"0.46965313",
"0.4664597",
"0.4657153",
"0.46374193",
"0.4625975",
"0.46182775",
"0.45960948",
"0.45564148",
"0.4544722",
"0.45403704",
"0.45295733",
"0.4529087",
"0.4528951",
"0.45234993",
"0.45080867",
"0.4492049",
"0.4491241",
"0.44877014",
"0.4486376",
"0.4442638",
"0.44379106",
"0.44255498",
"0.44212592"
]
| 0.7733896 | 0 |
Log a critical SQL error and exit | def sql_error(err):
try:
logger.critical('MySQL error [%d]: %s', err.args[0], err.args[1])
except IndexError:
logger.critical('MySQL error: %s', err)
sys.exit(-1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sql_error(err):\n try:\n print('MySQL error [%d]: %s' % (err.args[0], err.args[1]))\n except IndexError:\n print('MySQL error: %s' % err)\n sys.exit(-1)",
"def fatal(self, *args):\n self.mylog.critical(*args)\n sys.exit(1)",
"def _err(self, *args):\n logger.error(*args)\n exit(1)",
"def reportError(self):\n self.Q['err'].put(sys.exc_info()[:2])",
"def db_err(qry):\n if qry is None:\n QtGui.QMessageBox.critical(None, \"Database Error\", \"An unknown error occurred\")\n else:\n QtGui.QMessageBox.critical(None, \"Database Error\", qry.lastError().text())\n return",
"def handle_error(msg):\r\n query.error_message = msg\r\n query.status = ADMIN_USER_QUERY_STATUSES.FAILED.value\r\n db.session.commit()\r\n raise Exception(msg)",
"def fatal(self, *args, **kwargs):",
"def handle_error(self):\n self.cmd_channel.debug(\"ActiveDTP.handle_error()\")\n logerror(traceback.format_exc())\n self.close()",
"def abort(self, msg):\n\n print\n print \"*** ERROR in module [ \" + self.name + \" ]: DEBUG INFO: \" + str(self.parent.debugInfo)\n print\n print \"*** ERROR in module [ \" + self.name + \" ]: \" + msg\n\n if (\"logfile\" in dir(self)):\n print\n print \"Logfile for failed module: \" + self.logfile\n\n # write error to logfile\n try:\n getoutput( \"echo \\\"*** Error in module [ \" + self.name + \" ]: DEBUG INFO: \" + str(self.parent.debugInfo).replace(\"\\n\",\"\") + \"\\\" >> \" + self.logfile )\n getoutput( \"echo \\\"*** Error in module [ \" + self.name + \" ]: \" + str(msg).replace(\"\\n\",\"\") + \"\\\" >> \" + self.logfile )\n except:\n pass\n sys.exit(1)",
"def print_critical(msg):\n print('CRITICAL - %s' % (msg))\n sys.exit(2)",
"def sql_exceptwrapper(method, integrity, *args, **kwargs):\n try:\n result = method(*args, **kwargs)\n return result\n except sqlite3.IntegrityError if bool(integrity) else exceptions.DummyException:\n dummy.UselessStdout.write(\"ASDASDASD\") # DummyException never going to happen\n except sqlite3.Error as sqerror:\n print(sqerror)",
"def fail(self, msg, *args):\n self.log.error(msg, *args)\n sys.exit(1)",
"def _send_database_problem(self):\n template_filename = self._get_config_template('databaseerror')\n text = read_template(\n template_filename,\n title='%s - Datebase error' % SERVER_NAME,\n header='Database error')\n if not text:\n self._send_internal_server_error()\n return\n self._send_head(text, 500)\n if not self._header_only:\n self.wfile.write(text)",
"def die_screaming(instr):\n LOG.error(instr)\n sys.exit(1)",
"def die_screaming(instr):\n LOG.error(instr)\n sys.exit(1)",
"def error(self, *args):\n\n if self.is_on(_Log.ERROR):\n self._write(self._err, *args)",
"def _log_exception(self, exception, query, parameters):\n logging.error(\"Error on MySQL Server:\" + self.host)\n logging.error(\"Error query:\", query)\n logging.error(\"Error parameters:\", parameters)\n logging.error(\"Error Exception:\", str(exception))",
"def error_throw(self,stage):\n if self.is_table_info == False:\n print(\"please enter table info by table_info()\")\n sys.exit(0)\n if stage == 'rank':\n if self.import_method == 'none':\n self.error_output_import()\n elif stage == 'output':\n if self.import_method == 'none':\n self.error_output_import()\n else: \n if self.rank_method == 'none':\n self.error_output_rank()",
"def main_log_error() -> None:\n\n try:\n main()\n except Exception:\n error(\"Unhandled exception: {}\".format(traceback.format_exc()))\n raise",
"def __exit__(self, exception_type, exception_val, trace):\n if not exception_type:\n self.commit()\n else:\n self.rollback()\n self.close()",
"def logged_batch_throws_uae_test(self):\n cursor = self.prepare(nodes=3)\n [ node.stop(wait_other_notice=True) for node in self.cluster.nodelist()[1:] ]\n cursor.consistency_level = 'ONE'\n assert_unavailable(cursor.execute, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")",
"def __exit__(self, exc_type, exc_value, exc_trace):\n try:\n self.conn.commit()\n self.cursor.close()\n self.conn.close()\n except Exception as error:\n print(f\"DBCM::__exit__::{error}\")",
"def finalize_error():\n print('')\n exit(-1)",
"def execute_error_query():\n db = psycopg2.connect(database=\"news\")\n c = db.cursor()\n c.execute(error_query)\n content = c.fetchall()\n db.close()\n print(error_query_question)\n for date, error in content:\n print('%s -- %.2f%% errors' % (date.strftime(\"%B %d, %Y\"), error))\n print('\\n')",
"def cmd_query_die(self, c, e):\n self.die(self.getRandomQuitMsg())",
"def error(self, tag, message, exc_info=False):\n \n self.log(logging.error,tag, message, exc_info)",
"def exception_handler_quits(exctype, val, trace):\n logger.info(\n ''.join(traceback.format_exception(exctype, val, trace)))\n sys.exit(1)",
"def error(error_no):\n print('--] Encountered unrecoverable ERROR [%s] ... leaving' % error_no)\n write_termination_message(error_no)\n sys.exit(0)",
"def error_exit(text):\n logging.error(text)\n exit(1)",
"def _epilogue(self):\n LOG.info(\"We could clean up the database connection.\")"
]
| [
"0.7171329",
"0.6761154",
"0.6277771",
"0.62170476",
"0.6160511",
"0.6116838",
"0.6057346",
"0.60028756",
"0.5875448",
"0.5868887",
"0.5848594",
"0.5823213",
"0.5809815",
"0.57910967",
"0.57910967",
"0.576729",
"0.5765866",
"0.57383525",
"0.57295555",
"0.5719349",
"0.57147664",
"0.57028365",
"0.56994784",
"0.56905395",
"0.5671562",
"0.5651915",
"0.56489104",
"0.56427306",
"0.564227",
"0.56399024"
]
| 0.78105843 | 1 |
r"""Test the rotation matrix generator. | def test_rot(self):
print("rot()")
obs = self.fixture
# rotation(0) = identity
for axis in [1, 2, 3]:
# theta = 0.0
rotation = obs.rot(0.0, axis)
# find || eye - rot1 ||
diff = np.linalg.norm(np.eye(3) - rotation)
self.assertAlmostEqual(diff, 0.0, delta=1e-12)
# theta = 2*pi
rotation = obs.rot(2.0 * np.pi, axis)
# find || eye - rot1 ||
diff = np.linalg.norm(np.eye(3) - rotation)
self.assertAlmostEqual(diff, 0.0, delta=1e-12)
# perform many randomized tests
num_tests = 100
num_products = 10
for _test_counter in range(num_tests):
thetas = []
axes = []
base = np.eye(3)
# we will multiply a series of rotations into "base"
rot_all = base
for _rot_counter in range(num_products):
theta = np.random.uniform(2 * np.pi) # in [0,2 pi]
axis = np.random.randint(3) + 1 # in {1,2,3}
axes.append(axis)
thetas.append(theta)
rotation = obs.rot(theta, axis)
# multiply rot1 into the cumulative rotation
rot_all = np.dot(rot_all, rotation)
# now, back all the rotations out
for _rot_counter in range(num_products):
theta = thetas.pop()
axis = axes.pop()
# apply the inverse rotation
rotation = obs.rot(-theta, axis)
rot_all = np.dot(rot_all, rotation)
# find || base - rot1 * rot2 ||
diff = np.linalg.norm(base - rot_all)
self.assertAlmostEqual(diff, 0.0, delta=1e-10 * num_products) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_asssert_rotation_matrix_behaves_like_check_matrix():\n random_state = np.random.RandomState(2345)\n for _ in range(5):\n a = pr.random_axis_angle(random_state)\n R = pr.matrix_from_axis_angle(a)\n original_value = R[2, 2]\n for error in [0, 1e-8, 1e-7, 1e-5, 1e-4, 1]:\n R[2, 2] = original_value + error\n try:\n pr.assert_rotation_matrix(R)\n pr.check_matrix(R)\n except AssertionError:\n assert_raises_regexp(\n ValueError, \"Expected rotation matrix\", pr.check_matrix, R)",
"def test_d_2():\n rs = 10\n d = 2\n np.random.seed(rs)\n num = 3\n theta = np.random.uniform(0, 2 * math.pi)\n rotation = np.identity(d)\n\n rotation[0, 0] = math.cos(theta)\n rotation[0, 1] = - math.sin(theta)\n rotation[1, 0] = math.sin(theta)\n rotation[1, 1] = math.cos(theta)\n\n np.random.seed(rs)\n rotation_function = mt_obj.calculate_rotation_matrix(d, num)\n assert(np.all(rotation == rotation_function))",
"def test_arbitrary_rotation(self):\n \n # This test is run a bunch of times on various intervals, ranging from 50% to 1/6\n\t\t# (16.667%).\n for i in range(2, 7):\n \n interval = 1 / i # The amount to increase each qubit's probability by, relative to the previous qubit\n step_string = \"{:.4f}\".format(100 / i) # The decimal representation of the interval, as a percent\n target_probabilities = [0] * (i + 1) # This will store the desired probabilities of each qubit\n for j in range(0, i + 1):\n target_probability = j * interval\n target_probabilities[j] = target_probability\n\n # Run the test\n self.run_test(self.arbitrary_rotation_function, f\"Rotation with steps of 1/{i} ({step_string}%)\", 2000, target_probabilities, 0.05)",
"def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)",
"def random_rotation_matrix():\n\n x = np.random.uniform(size=3)\n theta = x[0]*2*math.pi\n phi = x[1]*2*math.pi\n z = x[2]*2\n\n r = math.sqrt(z)\n vx = math.sin(phi)*r\n vy = math.cos(phi)*r\n vz = math.sqrt(2.0-z)\n\n st = math.sin(theta)\n ct = math.cos(theta)\n\n sx = vx*ct-vy*st\n sy = vx*st+vy*ct\n\n return np.array([[vx*sx-ct, vx*sy-st, vx*vz],\n [vy*sx+st, vy*sy-ct, vy*vz],\n [vz*sx,vz*sy,1.0-z]])",
"def test_rotation(self, tol):\n theta = 0.98\n S = symplectic.rotation(theta)\n expected = np.block([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])\n np.allclose(S, expected, atol=tol, rtol=0)",
"def test_calc_rotation(self):\n t = AioBaseTurtle()\n t.speed(speed=2)\n orient, steps, delta = t._calc_rotation(120)\n self.assertEqual(steps, 21)\n self.assertAlmostEqual(delta, 120.0 / 21.0)\n self.assertAlmostEqual(orient[0], math.cos(math.radians(120)))\n self.assertAlmostEqual(orient[1], math.sin(math.radians(120)))",
"def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = angles[0:3]\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def make_sample_rot_matrix(self, angles):\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def make_sample_rot_matrix(self, angles):\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def make_sample_rot_matrix(self, angles):\n (phi, chi) = angles[0:2]\n omega = np.deg2rad(self.omega)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def make_sample_rot_matrix(self, angles):\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n return numpy_utils.rotation_matrix(phi, chi, omega)",
"def test_rotation(self):\n quat_rotated = rowan.rotate(input1, vector_inputs)\n\n matrices = rowan.to_matrix(input1)\n matrix_rotated = np.einsum(\"ijk,ki->ij\", matrices, vector_inputs.T)\n self.assertTrue(np.allclose(matrix_rotated, quat_rotated))",
"def test_rotate_right(self):\n # Testing 'down' rotation clockwise\n side = 'R'\n rotation = list(self.cube.rotate_cube(side))\n result = [np.array([['g', 'g'], ['g', 'g']], dtype='<U1'),\n np.array([['y', 'o'], ['y', 'o']], dtype='<U1'),\n np.array([['o', 'w'], ['o', 'w']], dtype='<U1'),\n np.array([['w', 'r'], ['w', 'r']], dtype='<U1'),\n np.array([['b', 'b'], ['b', 'b']], dtype='<U1'),\n np.array([['y', 'r'], ['y', 'r']], dtype='<U1')]\n\n np.testing.assert_array_equal(rotation, result)",
"def generate_rotation_matrix(x_angle, y_angle, z_angle):\n return np.array([\n [1, 0, 0],\n [0, np.cos(x_angle), -np.sin(x_angle)],\n [0, np.sin(x_angle), np.cos(x_angle)],\n ]).dot([\n [np.cos(y_angle), 0, np.sin(y_angle)],\n [0, 1, 0],\n [-np.sin(y_angle), 0, np.cos(y_angle)],\n ]).dot([\n [np.cos(z_angle), -np.sin(z_angle), 0],\n [np.sin(z_angle), np.cos(z_angle), 0],\n [0, 0, 1],\n ]).tolist()",
"def test_x_rot(self):\n\n # Create a Matrix representing 90 deg x rot.\n mat = Matrix44.from_rot_x(90)\n # Use from_matrix44()\n quat = Quat.from_matrix44(mat)\n\n # Ensure the quat matches a 90 degree x rotation.\n expected = Quat.from_axis_angle_deg(Vec3(1, 0, 0), 90)\n AssertQuatAlmostEqual(quat, expected, self)",
"def test_conversions_matrix_euler_xyz():\n random_state = np.random.RandomState(0)\n for _ in range(5):\n a = pr.random_axis_angle(random_state)\n R = pr.matrix_from_axis_angle(a)\n pr.assert_rotation_matrix(R)\n\n e_xyz = pr.euler_xyz_from_matrix(R)\n R2 = pr.matrix_from_euler_xyz(e_xyz)\n assert_array_almost_equal(R, R2)\n pr.assert_rotation_matrix(R2)\n\n e_xyz2 = pr.euler_xyz_from_matrix(R2)\n pr.assert_euler_xyz_equal(e_xyz, e_xyz2)\n\n # Gimbal lock\n for _ in range(5):\n e_xyz = random_state.rand(3)\n e_xyz[1] = np.pi / 2.0\n R = pr.matrix_from_euler_xyz(e_xyz)\n e_xyz2 = pr.euler_xyz_from_matrix(R)\n pr.assert_euler_xyz_equal(e_xyz, e_xyz2)\n\n e_xyz[1] = -np.pi / 2.0\n R = pr.matrix_from_euler_xyz(e_xyz)\n e_xyz2 = pr.euler_xyz_from_matrix(R)\n pr.assert_euler_xyz_equal(e_xyz, e_xyz2)",
"def test_id_rot():\n assert_array_almost_equal(pr.R_id, pr.matrix_from_axis_angle(pr.a_id))\n assert_array_almost_equal(pr.R_id, pr.matrix_from_quaternion(pr.q_id))\n assert_array_almost_equal(pr.R_id, pr.matrix_from_euler_xyz(pr.e_xyz_id))\n assert_array_almost_equal(pr.R_id, pr.matrix_from_euler_zyx(pr.e_zyx_id))",
"def test_rotate_right_counter(self):\n # Testing 'right' rotation counter-clockwise\n side = 'Rr'\n rotation = list(self.cube.rotate_cube(side))\n result = [np.array([['g', 'g'], ['g', 'g']], dtype='<U1'),\n np.array([['y', 'y'], ['y', 'y']], dtype='<U1'),\n np.array([['o', 'o'], ['o', 'o']], dtype='<U1'),\n np.array([['w', 'w'], ['w', 'w']], dtype='<U1'),\n np.array([['b', 'b'], ['b', 'b']], dtype='<U1'),\n np.array([['r', 'r'], ['r', 'r']], dtype='<U1')]\n\n np.testing.assert_array_equal(rotation, result)",
"def test_conversions_matrix_euler_zyx():\n random_state = np.random.RandomState(0)\n for _ in range(5):\n a = pr.random_axis_angle(random_state)\n R = pr.matrix_from_axis_angle(a)\n pr.assert_rotation_matrix(R)\n\n e_zyx = pr.euler_zyx_from_matrix(R)\n R2 = pr.matrix_from_euler_zyx(e_zyx)\n assert_array_almost_equal(R, R2)\n pr.assert_rotation_matrix(R2)\n\n e_zyx2 = pr.euler_zyx_from_matrix(R2)\n pr.assert_euler_zyx_equal(e_zyx, e_zyx2)\n\n # Gimbal lock\n for _ in range(5):\n e_zyx = random_state.rand(3)\n e_zyx[1] = np.pi / 2.0\n R = pr.matrix_from_euler_zyx(e_zyx)\n e_zyx2 = pr.euler_zyx_from_matrix(R)\n pr.assert_euler_zyx_equal(e_zyx, e_zyx2)\n\n e_zyx[1] = -np.pi / 2.0\n R = pr.matrix_from_euler_zyx(e_zyx)\n e_zyx2 = pr.euler_zyx_from_matrix(R)\n pr.assert_euler_zyx_equal(e_zyx, e_zyx2)",
"def test_calc_basis_rotation_matrix(time_location, moon_time_location, telescope_frame):\n\n if telescope_frame == \"itrs\":\n time, telescope_location = time_location\n else:\n time, telescope_location = moon_time_location\n\n source = SkyModel(\n name=\"Test\",\n skycoord=SkyCoord(\n Longitude(12.0 * units.hr), Latitude(-30.0 * units.deg), frame=\"icrs\"\n ),\n stokes=[1.0, 0.0, 0.0, 0.0] * units.Jy,\n spectral_type=\"flat\",\n )\n source.update_positions(time, telescope_location)\n\n basis_rot_matrix = source._calc_average_rotation_matrix()\n\n assert np.allclose(np.matmul(basis_rot_matrix, basis_rot_matrix.T), np.eye(3))\n assert np.allclose(np.matmul(basis_rot_matrix.T, basis_rot_matrix), np.eye(3))",
"def test_x_y_and_z_rot(self):\n\n axis = Vec3(4, 5, 6)\n # Create a Matrix representing a rotation.\n mat = Matrix44.from_axis_angle_deg(axis, 45.0)\n # Use from_matrix44()\n quat = Quat.from_matrix44(mat)\n\n # Ensure it matches the expected quaternion.\n expected_quat = Quat.from_axis_angle_deg(axis, 45.0)\n self.assertAlmostEqual(quat.x, expected_quat.x)\n self.assertAlmostEqual(quat.y, expected_quat.y)\n self.assertAlmostEqual(quat.z, expected_quat.z)\n self.assertAlmostEqual(quat.w, expected_quat.w)",
"def _is_rotation_matrix(self, R):\n Rt = np.transpose(R)\n shouldBeIdentity = np.dot(Rt, R)\n I = np.identity(3, dtype=R.dtype)\n n = np.linalg.norm(I - shouldBeIdentity)\n return n < 1e-6",
"def test_conversions_matrix_quaternion():\n R = np.eye(3)\n a = pr.axis_angle_from_matrix(R)\n assert_array_almost_equal(a, np.array([1, 0, 0, 0]))\n\n random_state = np.random.RandomState(0)\n for _ in range(5):\n q = pr.random_quaternion(random_state)\n R = pr.matrix_from_quaternion(q)\n pr.assert_rotation_matrix(R)\n\n q2 = pr.quaternion_from_matrix(R)\n pr.assert_quaternion_equal(q, q2)\n\n R2 = pr.matrix_from_quaternion(q2)\n assert_array_almost_equal(R, R2)\n pr.assert_rotation_matrix(R2)",
"def test_rotate(self):\n rotable = TestRotable()\n command = RotateCommand(rotable)\n collinear_to_new_direction = rotable.get_direction() + rotable.get_angular_velocity()\n\n command()\n\n ratio = norm(rotable.get_direction()) / norm(collinear_to_new_direction)\n self.assertTrue(allclose(collinear_to_new_direction * ratio, rotable.get_direction()))\n self.assertTrue(isclose(norm(rotable.get_direction()), 1))",
"def random_rotation_matrix(strength=None, dtype=None):\n if strength is None:\n strength = 1.0\n\n if dtype is None:\n dtype = np.float32\n\n x = np.random.rand(3)\n theta = x[0] * 2 * np.pi * strength\n phi = x[1] * 2 * np.pi\n z = x[2] * strength\n\n r = np.sqrt(z)\n V = np.array([np.sin(phi) * r, np.cos(phi) * r, np.sqrt(2.0 - z)])\n\n st = np.sin(theta)\n ct = np.cos(theta)\n\n Rz = np.array([[ct, st, 0], [-st, ct, 0], [0, 0, 1]])\n\n rand_R = (np.outer(V, V) - np.eye(3)).dot(Rz)\n return rand_R.astype(dtype)",
"def test_z_rot(self):\n\n # Create a Matrix representing 90 deg z rot.\n mat = Matrix44.from_rot_z(90)\n # Use from_matrix44()\n quat = Quat.from_matrix44(mat)\n\n # Ensure the quat matches a 90 degree x rotation.\n expected = Quat.from_axis_angle_deg(Vec3(0, 0, 1), 90)\n AssertQuatAlmostEqual(quat, expected, self)",
"def rotatematrix(m, x, y ,z):\r\n for i in xrange(x):\r\n m = rotatem_x(m)\r\n for i in xrange(y):\r\n m = rotatem_y(m)\r\n for i in xrange(z):\r\n m = rotatem_z(m)\r\n return m"
]
| [
"0.7209717",
"0.70525926",
"0.6995734",
"0.6941755",
"0.6785118",
"0.67178756",
"0.66431063",
"0.6460628",
"0.6438224",
"0.6438224",
"0.6431514",
"0.6425429",
"0.6425429",
"0.6425429",
"0.64090383",
"0.6356306",
"0.6322191",
"0.6319203",
"0.63143456",
"0.62883717",
"0.6263961",
"0.6229099",
"0.6225551",
"0.6218369",
"0.6204671",
"0.61945975",
"0.6174622",
"0.6131859",
"0.61252487",
"0.6123789"
]
| 0.71879506 | 1 |
Return an integer representation based on the given string representation. The Trace.traceLevels hash table is used to do the translation. The integer returned is one of the levels from the Level class. The incoming level is intended to be an integer or a Jython string. | def _coerceLevel(self,level):
if (type(level) == type(0)):
if (level >= Level.NONE and level <= Level.FINEST):
result = level
else:
raise TraceSpecificationException("Unknown integer trace level: %s Valid integer trace levels: %s <= level <= %s" % (level, Level.NONE, Level.FINEST))
#endIf
elif (type(level) == type("") or type(level) == type(u"")):
level = level.lower()
# Need explicit test for None in the if condition because some valid trace
# levels have a value of 0, e.g., none and off.
if (Trace.traceLevels.get(level) == None):
raise TraceSpecificationException("Unknown trace level: %s Valid trace levels: %s" % (level,Trace.traceNames))
else:
result = Trace.traceLevels[level]
#endIf
else:
raise TraceSpecificationException("Unexpected type of trace level, expected either a string or integer.")
#endIf
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _log_level_from_string(string):\n try:\n return getattr(logging, string)\n except AttributeError:\n raise ValueError('invalid log level: %r' % string)",
"def _coerceLevel(self,level):\n result = level\n if (type(level) == type(\"\") or type(level) == type(u\"\")):\n level = level.lower()\n result = Trace.traceLevels.get(level)\n # Need an explicit test for None in the following if condition because\n # trace levels \"none\" and \"off\" map to a level with a value of 0\n if (result == None):\n raise TraceLevelException(\"Unknown trace level: %s Valid trace levels: %s\" % (level,Trace.traceNames))\n #endIf\n #endIf\n return result",
"def lvl_name2num(name):\n try:\n levelno = log_levels[name]\n except KeyError:\n logger.error(\"Log level not set: '%s' is not a valid level\", name)\n logger.error(\"Fix the log level configuration\")\n return logging.NOTSET\n else:\n return levelno",
"def logLvl(s):\n return logLevelMap[s.lower()]",
"def _checkLevel(level):\n\n try:\n level = int(level)\n except:\n pass\n\n if isinstance(level, (int, long)):\n rv = level\n elif str(level) == level:\n if level not in logging._levelNames:\n raise ValueError('Unknown level: %r' % level)\n rv = logging._levelNames[level]\n else:\n raise TypeError('Level not an integer or a valid string: %r' % level)\n return rv",
"def get_level(text):\n m = re.search(LEVEL_PATTERN, text)\n if m:\n level = m.groups(0)[0]\n return level\n return UNKNOWN",
"def _log_level(x):\n level_dict = {\n \"DEBUG\": logging.DEBUG,\n \"INFO\": logging.INFO,\n \"WARN\": logging.WARN,\n \"WARNING\": logging.WARN,\n \"ERROR\": logging.ERROR,\n \"CRITICAL\": logging.CRITICAL,\n }\n\n if isinstance(x, int):\n return x\n elif isinstance(x, str) and x in level_dict:\n return level_dict[x.upper()]\n else:\n raise ValueError(\"Logging level %s not understood\" % repr(x))",
"def _get_level_number(self, level: Union[int, Name]) -> int:\n count = self.names.count(level)\n if (count > 1) and not isinstance(level, int):\n raise ValueError(\"The name %s occurs multiple times, use a level number\" % level)\n if level in self.names:\n level = self.names.index(level)\n elif isinstance(level, int):\n nlevels = self.nlevels\n if level >= nlevels:\n raise IndexError(\n \"Too many levels: Index has only %d \"\n \"levels, %d is not a valid level number\" % (nlevels, level)\n )\n if level < 0:\n if (level + nlevels) < 0:\n raise IndexError(\n \"Too many levels: Index has only %d levels, \"\n \"not %d\" % (nlevels, level + 1)\n )\n level = level + nlevels\n else:\n raise KeyError(\"Level %s not found\" % str(level))\n\n return level",
"def parse_log_level_flag(level):\n log_level = getattr(LogLevel, level.upper().replace(\"-\", \"_\"), None)\n if type(log_level) == int:\n return log_level\n\n try:\n return int(level)\n except ValueError:\n level_names = sorted(LogLevel._asdict().keys())\n raise Error('Invalid logging-level %r. Use one of %s or an integer.'\n % (level, ', '.join(level_names)))",
"def getLevel(levelName, no_match=logging.NOTSET):\n# strict={'case': False, 'type': False, 'map': False},\n# fixup=False\n try:\n result = logging._nameToLevel.get(levelName)\n if result is not None:\n return result\n\n return int(levelName)\n\n except ValueError:\n if raiseExceptions:\n raise(\"parameter 'levelName' must be a defined String\")\n\n return no_match",
"def from_string(string: str) -> int:\n if string == Definitions.get_value(CaseType.SAMPLE_STRING_KEY):\n return CaseType.SAMPLE\n elif string == Definitions.get_value(CaseType.CORNER_STRING_KEY):\n return CaseType.CORNER_CASE\n else:\n return CaseType.GENERATED",
"def name2level(level_name: str) -> int:\n level_name = level_name.upper()\n level = logging._nameToLevel.get(level_name)\n if level is None:\n raise ConfigError(\n \"logging module doesn't support this level {}\".format(level_name)\n )\n return level",
"def _get_log_level(log_level_string):\n log_level = ''\n if log_level_string == 'DEBUG':\n log_level = logging.DEBUG\n elif log_level_string == 'INFO':\n log_level = logging.INFO\n elif log_level_string == 'WARNING':\n log_level = logging.WARNING\n elif log_level_string == 'ERROR':\n log_level = logging.ERROR\n elif log_level_string == 'CRITICAL':\n log_level = logging.CRITICAL\n else:\n raise Exception(f'Log level {log_level_string} is invalid')\n return log_level",
"def _level(self, level):\r\n\r\n level_t = type(level)\r\n if level_t == int: return level\r\n if level == None: return level\r\n if level == \"SILENT\": return log.SILENT\r\n if hasattr(logging, \"_checkLevel\"):\r\n return logging._checkLevel(level)\r\n return logging.getLevelName(level)",
"def mlevel(level: Union[int, str]) -> int:\n try:\n if not level:\n raise KeyError\n return LOG_LEVELS[str(level).upper()]\n except KeyError:\n unique_set = set(\n [str(x) for x in list(LOG_LEVELS.keys())]\n + [str(x) for x in list(LOG_LEVELS.values())]\n )\n opts: str = \", \".join(sorted(list(unique_set)))\n raise ValueError(f\"Invalid log level: {level}. Available options: {opts}\")",
"def which_level(label):\n if not isinstance(label, basestring):\n raise ValueError(\"String expected\")\n\n label = label.lower()\n if label.startswith('impute'):\n level = 'imputing'\n elif label in ('recenter', 'standardize', 'normalize', 'minmax'):\n level = 'preproc'\n elif label in ('pca', 'incrementalpca', 'randomizedpca', 'kernelpca',\n 'isomap', 'lle', 'se', 'mds', 'tsne', 'rbm'):\n level = 'dimred'\n elif label in ('kmeans', 'ap', 'ms', 'spectral',\n 'hierarchical'):\n level = 'clustering'\n else:\n level = 'None'\n return level",
"def __rank_from_str_to_int(rank: str) -> int:\n return int(rank) - 1",
"def get_level(tag: str) -> int:\n return TAG_LEVELS[tag]",
"def level(self) -> pulumi.Input[Union[str, 'Level']]:\n return pulumi.get(self, \"level\")",
"def ParseLogLevelFlag(level):\n log_level = getattr(LogLevel, level.upper(), None)\n if type(log_level) == int:\n return log_level\n\n try:\n return int(level)\n except ValueError:\n level_names = sorted(LogLevel._asdict().keys())\n raise Error('Invalid logging-level %r. Use one of %s or an integer.'\n % (level, ', '.join(level_names)))",
"def level_to_index(level: int) -> int:\n\n level_data = {\n 15: 1,\n 25: 2,\n 30: 3,\n 35: 4,\n 40: 5,\n 45: 6,\n 50: 7,\n }\n return level_data[level]",
"def stringToInt(*args):\n return _libsbml.SBO_stringToInt(*args)",
"def check_level(ctx, param, value):\n try:\n return int(value)\n except ValueError:\n return value.upper()",
"def get_level(k):\r\n return int(log2(k))",
"def read_level(self):\n current_level = 1\n\n try:\n if self.store.exists(LEVEL_STORE):\n current_level_str = self.store.get(LEVEL_STORE)['level']\n current_level = int(current_level_str)\n except:\n print 'Exception when reading Galaxy run level from JSON file!'\n current_level = 1\n\n return current_level",
"def level(self, obj):\n if isinstance(obj, compat.string_type):\n if obj not in self._levels:\n raise KeyError(\"No level %s in dimension %s\" %\n (obj, self.name))\n return self._levels[obj]\n elif isinstance(obj, Level):\n return obj\n else:\n raise ValueError(\"Unknown level object %s (should be a string \"\n \"or Level)\" % obj)",
"def convert_label_string2num(label, num_types):\n dictionary = empty_label_dictionary(num_types)\n all_labels = list(dictionary.keys())\n if num_types==4:\n label = label.replace('Implicit_', '')\n label = label.replace('Explicit_', '')\n return all_labels.index(label)",
"def get_log_level(level):\n levels = logbook.base._reverse_level_names\n try:\n new_level = levels[level]\n except KeyError:\n log_logger.error((f'Requested level [\\'{level}\\'] is not valid.'\n f' Valid levels are {list(levels.keys())}.'))\n return\n return new_level",
"def _get_level(value, levels, prefix=None):\r\n\r\n if value > 1 or value < 0:\r\n raise ValueError(\"Encountered invalid normalized alpha diversity value %s. \"\r\n \"Normalized values must be between 0 and 1.\" % value)\r\n\r\n check = [i for i in range(0, len(levels)) if levels[i] == value]\r\n\r\n # apply a special rule for the values that are equal to an edge\r\n if len(check):\r\n value_level = check[0] + 2\r\n # if it is not a special case just use searchsorted\r\n else:\r\n value_level = searchsorted(levels, value) + 1\r\n\r\n if prefix is not None:\r\n output = '{0}_{1}_of_{2}'.format(prefix, value_level, len(levels) + 1)\r\n else:\r\n output = value_level\r\n\r\n return output",
"def map_level(level):\n if level >= logging.ERROR:\n return 'error'\n elif level >= logging.WARNING:\n return 'warn'\n elif level >= logging.INFO:\n return 'info'\n return ''"
]
| [
"0.6459587",
"0.6423294",
"0.6329741",
"0.6158966",
"0.6141229",
"0.6050959",
"0.58506906",
"0.5825516",
"0.5792962",
"0.5753301",
"0.5750161",
"0.57239944",
"0.5698036",
"0.56045485",
"0.55975974",
"0.5583622",
"0.5563788",
"0.5537052",
"0.55249166",
"0.54837537",
"0.5475534",
"0.5475103",
"0.54689825",
"0.54077214",
"0.53948057",
"0.5390005",
"0.5383035",
"0.5370575",
"0.5310579",
"0.5303872"
]
| 0.6949068 | 0 |
Return a Python/Jython regular expression string that represents the given pattern. | def _patternToRegEx(self,pattern):
if (pattern == "*"):
# special case that matches anything
regex = ".*?"
else:
regex = pattern
if (regex.find(".") >= 0):
regex = regex.replace(".", "\.")
#endIf
asteriskIndex = regex.find("*")
if (asteriskIndex < 0):
# no wildcard in pattern
regex = "%s$" % regex
elif (asteriskIndex + 1 != len(regex)):
raise TraceSpecificationException("Invalid entity pattern: %s. A wildcard character may only be used to terminate a pattern." % pattern)
else:
# remove * and add ".*?"
regex = "%s.*?" % regex[:-1]
#endIf
#endIf
return regex | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_regex(pattern:str) -> str:\n raise NotImplementedError()",
"def _format_pattern(pattern: str) -> str:\n return pattern.rstrip('*') + '**'",
"def get_regex_format(self, case_sensitive=True):\n\n if case_sensitive is True:\n c = self.cursor()\n c.execute('PRAGMA case_sensitive_like=true')\n elif case_sensitive is False:\n c = self.cursor()\n c.execute('PRAGMA case_sensitive_like=false')\n elif case_sensitive is None:\n pass\n else:\n raise errors.UnknownCaseSensitiveError(value=case_sensitive)\n\n return \"{target:s} REGEXP {pattern:s}\"",
"def _create_regex(pattern, ignore_case=False, whole_words=False, literal_pattern=False):\n if literal_pattern:\n pattern = re.escape(pattern)\n if whole_words:\n b = r'\\b' if isinstance(pattern, str) else br'\\b'\n pattern = b + pattern + b\n\n regex = re.compile(pattern, re.I if ignore_case else 0)\n return regex",
"def regex2js(pattern):\r\n # TODO: costruire un buon risolutore di espressioni regolari per javascript\r\n return pattern",
"def re_format(self):\n return self._re.pattern",
"def translate(self, pattern):\n\n if not pattern:\n return re.compile('')\n\n # Express windows, mac patterns in unix patterns.\n pattern = os.path.normcase(pattern).replace(os.sep, \"/\")\n\n # If pattern contains '/' it should match from the start.\n temp = pattern\n if pattern[0] == \"/\":\n pattern = pattern[1:]\n if temp[-1] == \"/\":\n temp = temp[:-1]\n\n # Convert pattern rules: ** * ? to regexp rules.\n pattern = re.escape(pattern)\n pattern = pattern.replace(\"\\\\?\", \"[^/]\")\n pattern = pattern.replace(\"\\\\*\\\\*\", \".*\")\n pattern = pattern.replace(\"\\\\*\", \"[^/]*\")\n pattern = pattern.replace(\"\\\\*\", \".*\")\n\n if \"/\" in temp:\n # If pattern contains '/' it should match from the start.\n pattern = \"^\\\\/\" + pattern\n else:\n # Else the pattern should match the all file or folder name.\n pattern = \"\\\\/\" + pattern\n\n if pattern[-2:] == \"\\\\/\":\n # Folder patterns should match also files (MP specific).\n pattern = pattern + \".*\"\n\n # (MP: not used because it is file-based)\n #if pattern[-2:] != \"\\\\/\" and pattern[-2:] != \".*\":\n # File patterns should match also folders.\n #pattern = pattern + \"\\\\/?\"\n\n # Pattern should match till the end.\n pattern = pattern + \"$\"\n return re.compile(pattern, re.S)",
"def convert_pattern(pattern, pattern_type=None):\n\tif pattern_type == 'regex':\n\t\treturn re.compile(pattern)\n\telif pattern_type == 'wildcard':\n\t\treturn re.compile(fnmatch.translate(pattern))\n\treturn re.compile(re.escape(pattern))",
"def create_pattern_function(self):\n\n type_regex = \"(?:\\w+(?:\\:\\:)?)+\"\n regex = \"^(?P<indent>\\s*)(?P<virtual>virtual )?(?P<function_return>(?:const )?\" + type_regex + \"(?P<subtype><?\" + type_regex + \">?)?) (?P<function_name>.*)\\((?P<args>.*)\\)(?P<const_qualifier> const)?(?: = 0)?;\\n$\"\n return regex",
"def formatPattern(self, pat):\n\n if not pat:\n return ''\n else:\n return pat",
"def pattern_to_regex(pattern):\n\n pattern = pattern.replace('.', r'\\.')\n pattern = pattern.replace('?', r'.')\n pattern = pattern.replace('*', r'.*')\n\n if pattern.endswith('/'):\n pattern += r'.*'\n elif pattern.endswith('.*'):\n pattern = pattern[:-2]\n pattern += r'(?!.*?/.*?)'\n\n return pattern",
"def pattern(self) -> str:\n return pulumi.get(self, \"pattern\")",
"def pattern(self) -> str:\n return pulumi.get(self, \"pattern\")",
"def pattern(self) -> str:\n return pulumi.get(self, \"pattern\")",
"def regexp(self, pattern):\r\n match = pattern.match(self.text, self.cur)\r\n if match is not None:\r\n return match.group()",
"def args_str(self):\n return repr(self._regex.pattern)",
"def compiler(pattern):\n if hasattr(pattern, 'pattern'):\n return pattern\n import re\n try:\n if case_sensitive:\n comped = re.compile(pattern)\n else:\n comped = re.compile(pattern, re.IGNORECASE)\n return comped\n except:\n import traceback\n import sys\n from time import localtime, strftime\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lst = traceback.format_exception(exc_type, exc_value, exc_traceback)\n error_message = lst[-1]\n thetime = strftime(\"%H:%M:%S\", localtime())\n print('%s: Query %s' % (thetime, error_message))\n if root:\n return 'Bad query'\n else:\n raise ValueError('%s: Query %s' % (thetime, error_message))",
"def _get_regex_for_pattern(self, pattern: bytes):\n # TODO: should blacksheep support \":\" in routes (using escape chars)?\n for c in _escaped_chars:\n if c in pattern:\n pattern = pattern.replace(c, b\"\\\\\" + c)\n\n if b\"*\" in pattern:\n # throw exception if a star appears more than once\n if pattern.count(b\"*\") > 1:\n raise RouteException(\n \"A route pattern cannot contain more than one star sign *. \"\n \"Multiple star signs are not supported.\"\n )\n\n if b\"/*\" in pattern:\n pattern = _route_all_rx.sub(br\"?(?P<tail>.*)\", pattern)\n else:\n pattern = _route_all_rx.sub(br\"(?P<tail>.*)\", pattern)\n\n # support for < > patterns, e.g. /api/cats/<cat_id>\n # but also: /api/cats/<int:cat_id> or /api/cats/<uuid:cat_id> for more\n # granular control on the generated pattern\n if b\"<\" in pattern:\n pattern = _angle_bracket_route_param_rx.sub(\n self._handle_rich_parameter, pattern\n )\n\n # support for mustache patterns, e.g. /api/cats/{cat_id}\n # but also: /api/cats/{int:cat_id} or /api/cats/{uuid:cat_id} for more\n # granular control on the generated pattern\n if b\"{\" in pattern:\n pattern = _mustache_route_param_rx.sub(self._handle_rich_parameter, pattern)\n\n # route parameters defined using /:name syntax\n if b\"/:\" in pattern:\n pattern = _route_param_rx.sub(br\"/(?P<\\1>[^\\/]+)\", pattern)\n\n # NB: following code is just to throw user friendly errors;\n # regex would fail anyway, but with a more complex message\n # 'sre_constants.error: redefinition of group name'\n # we only return param names as they are useful for other things\n param_names = []\n for p in _named_group_rx.finditer(pattern):\n param_name = p.group(1)\n if param_name in param_names:\n raise ValueError(\n f\"cannot have multiple parameters with name: \" f\"{param_name}\"\n )\n\n param_names.append(param_name)\n\n if len(pattern) > 1 and not pattern.endswith(b\"*\"):\n # NB: the /? at the end ensures that a route is matched both with\n # a trailing slash or not\n pattern = pattern + b\"/?\"\n return re.compile(b\"^\" + pattern + b\"$\", re.IGNORECASE), param_names",
"def _regexify_matching_pattern(rule_pattern: str, wildcard_optional=False) -> str:\n return rule_pattern.replace(\"*\", f\"(.{'+*'[wildcard_optional]})\")",
"def pattern_gen():\n pattern = \"\"\n\n return pattern",
"def _compile_fnmatch(pattern: str) -> re.Pattern:\n return re.compile(translate(pattern))",
"def regex_pattern(self):\n regex_to_match = input(\"Enter the regex pattern you'd like to use> \")\n return regex_to_match",
"def AsRegEx(self):\n parts = _REGEX_SPLIT_PATTERN.split(self._value)\n result = u\"\".join(self._ReplaceRegExPart(p) for p in parts)\n\n return rdf_standard.RegularExpression(u\"(?i)\\\\A%s\\\\Z\" % result)",
"def parse_pattern(s: str) -> str:\n # Escape regex metacharacters\n for c in [\"\\\\\", \".\", \"(\", \")\", \"[\", \"]\", \"^\", \"$\", \"*\", \"+\", \"?\", \"|\"]:\n s = s.replace(c, \"\\\\\" + c)\n\n s = re.sub(\"~+\", \".*\", s)\n s = \"^\" + s + \"$\"\n return s",
"def _regex_from_encoded_pattern(s):\r\n if s.startswith('/') and s.rfind('/') != 0:\r\n # Parse it: /PATTERN/FLAGS\r\n idx = s.rfind('/')\r\n pattern, flags_str = s[1:idx], s[idx+1:]\r\n flag_from_char = {\r\n \"i\": re.IGNORECASE,\r\n \"l\": re.LOCALE,\r\n \"s\": re.DOTALL,\r\n \"m\": re.MULTILINE,\r\n \"u\": re.UNICODE,\r\n }\r\n flags = 0\r\n for char in flags_str:\r\n try:\r\n flags |= flag_from_char[char]\r\n except KeyError:\r\n raise ValueError(\"unsupported regex flag: '%s' in '%s' \"\r\n \"(must be one of '%s')\"\r\n % (char, s, ''.join(list(flag_from_char.keys()))))\r\n return re.compile(s[1:idx], flags)\r\n else: # not an encoded regex\r\n return re.compile(re.escape(s))",
"def get_regex_pattern():\n # The regex checks if the content of files contain a comment of\n # form => /** {charachters | newline} */ ,\n # file may also contain any number of \n # newlines or charachters before or after the comment.\n # To analyse the regex visit https://regex101.com/ and paste the regex\n regex = '^(.|\\n)*(\\\\/\\\\*\\\\*(.|\\n)*\\\\*\\\\/)+(.|\\n)*$'\n pattern = re.compile(regex)\n return pattern",
"def _MakeRE(regex_str):\n return re.compile(regex_str.format(**SHORTHAND))",
"def make_pattern(current_pattern):\n pattern = ''.join([str(b) for b in current_pattern])\n return pattern",
"def matching_regex_pattern(self):\n if not self._pattern:\n # Match one or more words separated by whitespace\n word = \"[a-zA-Z0-9?,\\.\\-_!;:']+\"\n regex = \"(\\s+%s)+\" % word\n self._pattern = re.compile(regex)\n return self._pattern",
"def build_regex(self) -> typing.Pattern:\n self._regex = re.compile(\"|\".join(sorted(self._includes)))\n return self._regex"
]
| [
"0.7571252",
"0.6932767",
"0.68578047",
"0.67705536",
"0.6753435",
"0.67393273",
"0.669281",
"0.6593594",
"0.64645416",
"0.64624697",
"0.64510584",
"0.6398081",
"0.6398081",
"0.6398081",
"0.6377575",
"0.63312477",
"0.632012",
"0.63149226",
"0.6304338",
"0.62888813",
"0.6284583",
"0.62588364",
"0.6249097",
"0.62151307",
"0.61717784",
"0.61667395",
"0.6164652",
"0.61475563",
"0.6128731",
"0.61035687"
]
| 0.6962399 | 1 |
Return "true" if the given trace level is a valid string or integer representation of a trace level. | def _isTraceLevel(self,level):
if (type(level) == type(0)):
result = level >= Level.NONE and level <= Level.FINEST
elif (type(level) == type("") or type(level) == type(u"")):
level = level.lower()
validLevel = Trace.traceLevels.get(level)
# Keep in mind, trace level "none" maps to Level.NONE which has the value of 0
result = validLevel != None
else:
# level can only be an int or str
result = 0
#endIf
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _checkLevel(level):\n\n try:\n level = int(level)\n except:\n pass\n\n if isinstance(level, (int, long)):\n rv = level\n elif str(level) == level:\n if level not in logging._levelNames:\n raise ValueError('Unknown level: %r' % level)\n rv = logging._levelNames[level]\n else:\n raise TypeError('Level not an integer or a valid string: %r' % level)\n return rv",
"def _coerceLevel(self,level):\n if (type(level) == type(0)):\n if (level >= Level.NONE and level <= Level.FINEST):\n result = level\n else:\n raise TraceSpecificationException(\"Unknown integer trace level: %s Valid integer trace levels: %s <= level <= %s\" % (level, Level.NONE, Level.FINEST))\n #endIf\n elif (type(level) == type(\"\") or type(level) == type(u\"\")):\n level = level.lower()\n # Need explicit test for None in the if condition because some valid trace\n # levels have a value of 0, e.g., none and off.\n if (Trace.traceLevels.get(level) == None):\n raise TraceSpecificationException(\"Unknown trace level: %s Valid trace levels: %s\" % (level,Trace.traceNames))\n else:\n result = Trace.traceLevels[level]\n #endIf\n else:\n raise TraceSpecificationException(\"Unexpected type of trace level, expected either a string or integer.\")\n #endIf\n return result",
"def setTraceLevel (self,level):\n if (type(level) == type(\"\") or type(level) == type(u\"\")):\n if (level):\n level = self._coerceLevel(level)\n self.traceLevel = level\n #endIf\n elif (type(level) == type(0)):\n if (self._isTraceLevel(level)):\n self.traceLevel = level\n else:\n # level is a number but not in the range of a trace level.\n raise TraceLevelException(\"Invalid trace level: %s Valid trace levels are defined by the Level class.\" % level)\n #endIf\n else:\n # Odd case where level is unexpected type\n raise TraceLevelException(\"Trace level must be either a string or an integer. Use levels defined by the Level class.\")\n #endIf",
"def _coerceLevel(self,level):\n result = level\n if (type(level) == type(\"\") or type(level) == type(u\"\")):\n level = level.lower()\n result = Trace.traceLevels.get(level)\n # Need an explicit test for None in the following if condition because\n # trace levels \"none\" and \"off\" map to a level with a value of 0\n if (result == None):\n raise TraceLevelException(\"Unknown trace level: %s Valid trace levels: %s\" % (level,Trace.traceNames))\n #endIf\n #endIf\n return result",
"def parse_log_level_flag(level):\n log_level = getattr(LogLevel, level.upper().replace(\"-\", \"_\"), None)\n if type(log_level) == int:\n return log_level\n\n try:\n return int(level)\n except ValueError:\n level_names = sorted(LogLevel._asdict().keys())\n raise Error('Invalid logging-level %r. Use one of %s or an integer.'\n % (level, ', '.join(level_names)))",
"def __is_int(self,string):\r\n try: \r\n int(string)\r\n return True\r\n except ValueError:\r\n return False",
"def _is_int(test_val):\n try:\n int(test_val)\n return True\n except ValueError:\n return False",
"def is_int_like(val):\n try:\n return str(int(val)) == str(val)\n except Exception:\n return False",
"def is_int_like(val):\n try:\n return str(int(val)) == str(val)\n except Exception:\n return False",
"def ParseLogLevelFlag(level):\n log_level = getattr(LogLevel, level.upper(), None)\n if type(log_level) == int:\n return log_level\n\n try:\n return int(level)\n except ValueError:\n level_names = sorted(LogLevel._asdict().keys())\n raise Error('Invalid logging-level %r. Use one of %s or an integer.'\n % (level, ', '.join(level_names)))",
"def is_int(string:str) -> bool:\n try:\n int(string)\n return True\n except:\n return False",
"def is_some_number(mystring):\n # print(Bcolors.cyan + re.findall(r\".*\\\\(.*)\", inspect.stack()[0][1])[0] + \" --- \"\n # + inspect.stack()[0][3] + \"()\" + Bcolors.ENDC)\n mystring = str(mystring)\n mystring = re.sub(\",\", \".\", mystring)\n try:\n if float(mystring):\n return True\n except ValueError:\n return False",
"def represents_int(s):\n try:\n int(s)\n return True\n except ValueError:\n return False",
"def is_int(string):\n try:\n int(string)\n return True\n except ValueError:\n return False",
"def is_on(self, level):\n\n return self.log_level >= level",
"def is_number(s):\r\n try:\r\n int(s)\r\n return True\r\n except ValueError:\r\n return False",
"def is_level(self, state):\n \n logging.info('checking state '+state+' against self '+str(self.state))\n result = False\n if('up' == state):\n result = (self.state == 255)\n elif('down' == state):\n result = (self.state == 0)\n elif(state.isdigit()):\n state = int(state)\n result = (abs(self.state - int(255*state/100)) < 2)\n return result",
"def is_positive_integer(string:str) -> bool:\n try:\n value = int(string)\n return value >= 0\n except ValueError:\n return False",
"def isInt(s):\n try:\n int(s)\n return True\n except ValueError:\n return False",
"def _isint(string, inttype=int):\n return (\n type(string) is inttype\n or isinstance(string, (bytes, str))\n and _isconvertible(inttype, string)\n )",
"def is_number(s):\n try:\n int(s)\n return True\n except ValueError:\n return False",
"def is_int(self, val):\n try:\n int(val)\n return True\n except ValueError:\n return False",
"def is_int(value):\n try:\n int(value)\n except ValueError:\n return False\n else:\n return True",
"def _validate_level(self, levelText):\n if len([line for line in levelText.splitlines() if line.strip()]) != 6:\n # wrong num rows\n return False\n \n if any(len(list(line)) != 6 for line in levelText.splitlines() if line.strip()):\n # wrong num cols\n return False\n\n return True",
"def is_serious(self, level=ERROR):\n return self.level >= level",
"def is_valid_case_type(case_type):\n return bool(_case_type_regex.match(case_type or ''))",
"def is_int(value):\n try:\n int(value)\n return True\n except ValueError:\n return False",
"def is_int(value):\n try:\n int(value)\n return True\n except ValueError:\n return False",
"def check_for_integer(number):\r\n \r\n try:\r\n int(number) \r\n return True\r\n except ValueError:\r\n return False",
"def isindex(str):\n try:\n int(str)\n return True\n except ValueError:\n return False"
]
| [
"0.64003354",
"0.63713634",
"0.6259688",
"0.6041013",
"0.5832999",
"0.568361",
"0.5626215",
"0.5560428",
"0.5560428",
"0.5522911",
"0.55040485",
"0.54773974",
"0.54651505",
"0.5385012",
"0.5354778",
"0.53337514",
"0.53284377",
"0.53162473",
"0.5291516",
"0.5280502",
"0.52439964",
"0.521524",
"0.52098066",
"0.5199637",
"0.5195133",
"0.51880884",
"0.5186576",
"0.5186576",
"0.5182173",
"0.51701"
]
| 0.8185287 | 0 |
Set the trace level for this instance of the trace class based on the Trace class traceSpec. If there is no trace spec that has a module pattern that matches this trace instance module name, then the trace level is not modified. | def configureThisTrace(self):
for spec in Trace.traceSpec:
if (spec.compiledRegex.match(self.entityName)):
self.traceLevel = spec.level
break
#endIf
#endFor | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configureTrace(traceString):\n \n setTraceSpec(traceString)\n registeredModules = Trace.tracedEntities.keys()\n for module in registeredModules:\n for spec in Trace.traceSpec:\n if (spec.compiledRegex.match(module)):\n trace = Trace.tracedEntities[module]\n trace.setTraceLevel(spec.level)\n break\n #endIf\n #endFor\n #endFor",
"def setTraceLevel (self,level):\n if (type(level) == type(\"\") or type(level) == type(u\"\")):\n if (level):\n level = self._coerceLevel(level)\n self.traceLevel = level\n #endIf\n elif (type(level) == type(0)):\n if (self._isTraceLevel(level)):\n self.traceLevel = level\n else:\n # level is a number but not in the range of a trace level.\n raise TraceLevelException(\"Invalid trace level: %s Valid trace levels are defined by the Level class.\" % level)\n #endIf\n else:\n # Odd case where level is unexpected type\n raise TraceLevelException(\"Trace level must be either a string or an integer. Use levels defined by the Level class.\")\n #endIf",
"def setTraceSpec(traceString):\n \n if (not traceString):\n raise Exception(\"The traceString argument must be a non-empty string.\")\n #endIf\n \n Trace.traceSpec = parseTraceString(traceString)\n Trace.traceString = traceString",
"def setlevel(self, lvl):\n self.logger.setLevel(lvl)",
"def test_set_subsystem_logger_level(self):\n pass",
"def set_level(self, level_name):\n\n self.current_level = level_name",
"def set_level(self, level: LogLevel):\n pass",
"def __set_level(self,L):\n assert isinstance(L,level)\n self.__level = L",
"def setLevel(self, level):\n self.level = level",
"def setLevel(self, level):\n self.lvl = level",
"def set_logger_level(lgr, level):\n if isinstance(level, int):\n pass\n elif level.isnumeric():\n level = int(level)\n elif level.isalpha():\n level = getattr(logging, level)\n else:\n lgr.warning(\"Do not know how to treat loglevel %s\" % level)\n return\n lgr.setLevel(level)",
"def setLevel(newLevel):\n Verbose.__level = max(-1, newLevel)",
"def __change_level(self, level):\n self.level = level",
"def setThresholdLevel(self, *args):\n return _libsbml.Input_setThresholdLevel(self, *args)",
"def level(self, level):\n\n self._level = level",
"def level(self, level):\n\n self._level = level",
"def level(self, level):\n\n self._level = level",
"def set_log_level(params, logger):\n level = params.get(\"Level\")\n\n levels = {\n \"CRITICAL\": logging.CRITICAL,\n \"ERROR\" : logging.ERROR,\n \"WARNING\" : logging.WARNING,\n \"INFO\" : logging.INFO,\n \"DEBUG\" : logging.DEBUG,\n \"NOTSET\" : logging.NOTSET\n }\n\n if level:\n logger.setLevel(levels.get(level, logging.NOTSET))",
"def set_log_level(self, level):\n if level == 'info':\n level = logging.INFO\n if level == 'debug':\n level = logging.DEBUG\n if level == 'error':\n level = logging.ERROR\n self._log.setLevel(level)",
"def set_threshold_levels(self, event_name, val):\n if self.validate_supply_name(event_name, \"events/\") and val:\n self.console.runcmd(f\"echo {val} > events/{event_name}\")\n else:\n assert (\n False\n ), \"A valid event name or the value, is not given while setting levels\"",
"def trace_set_format(self, fmt):\n cmd = enums.JLinkTraceCommand.SET_FORMAT\n data = ctypes.c_uint32(fmt)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n if (res == 1):\n raise errors.JLinkException('Failed to set trace format.')\n return None",
"def set_logging_level(self, level):\n if str(level) == '1':\n self.logging_level = logging.DEBUG\n elif str(level) == '2':\n self.logging_level = logging.INFO\n elif str(level) == '3':\n self.logging_level = logging.WARNING\n elif str(level) == '4':\n self.logging_level = logging.ERROR\n elif str(level) == '5':\n self.logging_level = logging.CRITICAL",
"def set_level(self, level: str):\n self._logger.setLevel(getattr(logging, level))",
"def patch_traces(self, traces, project_id=None, options=None):\n if project_id is None:\n project_id = self.project\n\n self.trace_api.patch_traces(\n project_id=project_id,\n traces=traces,\n options=options)",
"def set_log_level_package(\n self,\n value: t.Union[str, int] = LOG_LEVEL_PACKAGE,\n ) -> None:\n logs.set_log_level(obj=self.LOG_LOGGER, level=value)",
"def set_level(self, debug_level, verbose=False):\n self.debug_level = debug_level\n self.verbosity = verbose\n level = logging.INFO\n if debug_level > 4:\n level = logging.DEBUG - 3\n elif debug_level > 0:\n level = logging.DEBUG - debug_level + 1\n elif verbose:\n level = logging.INFO - 1\n self.mylog.setLevel(level)\n self.handler.setLevel(level)",
"def set_level(log_or_name, level):\n if isinstance(log_or_name, str):\n log = get_logger(log_or_name)\n else:\n log = log_or_name\n log.setLevel(level)\n for handler in log.handlers:\n handler.setLevel(level)",
"def level(self, L):\n assert isinstance(L, level)\n self.__level = L",
"def set_logging_level(self, level):\n return self.sdk.set_logging_level(\n level,\n prefix=self.__class__.__name__,\n )",
"def set_level(self, level):\n\n self.sh.setLevel(level)\n\n if self.fh:\n self.fh.setLevel(level)"
]
| [
"0.61845666",
"0.5965638",
"0.58020526",
"0.55751276",
"0.55380774",
"0.5459781",
"0.54203385",
"0.53951794",
"0.53279537",
"0.5266613",
"0.52020276",
"0.5173878",
"0.51630473",
"0.51576376",
"0.50914013",
"0.50914013",
"0.50914013",
"0.5078109",
"0.5073327",
"0.50539666",
"0.50448984",
"0.50410056",
"0.50371313",
"0.5028212",
"0.50192285",
"0.50166047",
"0.5011227",
"0.50070345",
"0.4983",
"0.49788493"
]
| 0.65092444 | 0 |
Return a string useable for output to stdout or a log file that provides a representation of the "exception stack" and the "frame stack" from "top to bottm" (TTB). The "exception stack" captures the code tree from main to where the exception was raised and is usually the most interesting part of the stack. The "frame stack" captures the code from the point to where the exception was caught. Displaying the stack from top to bottom in an output log or stdout is the style in which Java displays the stack. There is another method named _exceptionStackBTT() that can be used to create a string that represents the execution stack from bottom to top, which is the style that Jython/Python uses by default. | def _exceptionStackTTB(self,methodName,exc,depth=10):
stack = ""
# Reconstruct the call stack from where the trace of the exception was initiated by invoking
# Trace.error() or Trace.severe().
stackList = traceback.extract_stack()
try:
for stackData in stackList:
sourcefile,line,function,text = stackData
if (sourcefile.endswith("Trace.py") and (function == "error" or function == "severe")): break
sepIndex = sourcefile.rfind(os.sep)
if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):
sourcefile = sourcefile[sepIndex+1:]
#endIf
if (text == None):
if (not stack):
# Leave out the newline for the bottom line on the stack
stack = "\t%s(%s) [%s]" % (sourcefile,line,function)
else:
stack = "\t%s(%s) [%s]\n%s" % (sourcefile,line,function,stack)
#endIf
else:
if (not stack):
# Leave out the newline for the bottom line on the stack
stack = "\t%s(%s) [%s] - %s" % (sourcefile,line,function,text)
else:
stack = "\t%s(%s) [%s] - %s\n%s" % (sourcefile,line,function,text,stack)
#endIf
#endIf
#endFor
stack = "\tFrame stack (most recent call first):\n%s" % stack
except:
# This shouldn't happen, but in case it does...
exc_type,exc_value = sys.exc_info()[:2]
stack = "\tException getting frame stack. Type: %s, Value: %s\n%s" % (exc_type,exc_value,stack)
#endTry
try:
tb = sys.exc_info()[2]
stackList = traceback.extract_tb(tb,depth)
for stackData in stackList:
sourcefile,line,function,text = stackData
sepIndex = sourcefile.rfind(os.sep)
if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):
sourcefile = sourcefile[sepIndex+1:]
#endIf
if (text == None):
stack = "\t%s(%s) [%s]\n%s" % (sourcefile,line,function,stack)
else:
stack = "\t%s(%s) [%s] - %s\n%s" % (sourcefile,line,function,text,stack)
#endIf
#endFor
stack = "\tException stack (most recent call first):\n%s" % stack
except:
# This shouldn't happen, but in case it does...
exc_type,exc_value = sys.exc_info()[:2]
stack = "\tException getting exception stack. Type: %s, Value: %s\n%s" % (exc_type,exc_value,stack)
#endTry
# At the very top - put the exception string
stack = "\t%s\n%s" % (exc,stack)
return stack | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _exceptionStackBTT(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n stack = \"\\tFrame stack (most recent call last):\\n\"\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else:\n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\n\\tException getting frame stack. Type: %s, Value: %s\" % (stack,exc_type,exc_value)\n #endTry\n \n try:\n stack = \"%s\\tException stack (most recent call last):\\n\" % stack\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else: \n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\tException getting exception stack. Type: %s, Value: %s\\n\" % (stack,exc_type,exc_value)\n #endTry\n\n # At the very end - put the exception string\n stack = \"%s\\t%s\" % (stack,exc)\n \n return stack",
"def get_traceback_stxt():\n #/\n exc_cls, exc_obj, tb_obj = sys.exc_info()\n\n #/\n txt_s = traceback.format_exception(exc_cls, exc_obj, tb_obj)\n\n #/\n res = ''.join(txt_s)\n\n return res",
"def get_exception():\n trace = ''\n exception = ''\n exc_list = traceback.format_exception_only(sys.exc_info()[0],\n sys.exc_info()[1])\n for entry in exc_list:\n exception += entry\n tb_list = traceback.format_tb(sys.exc_info()[2])\n for entry in tb_list:\n trace += entry\n return '%s\\n%s' % (exception, trace)",
"def _get_traceback(self, exc_info=None):\n import traceback\n import sys\n return '\\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))",
"def exception_to_string(excp: Exception) -> str:\n stack = traceback.extract_stack()[:-3] + traceback.extract_tb(\n excp.__traceback__\n ) # add limit=??\n pretty = traceback.format_list(stack)\n return \"\".join(pretty) + f\"\\n {excp.__class__} {excp}\"",
"def dump_stacks(self):\n\n dump = []\n\n # threads\n threads = dict([(th.ident, th.name) for th in threading.enumerate()])\n\n for thread, frame in sys._current_frames().items():\n if thread not in threads:\n continue\n dump.append(\"Thread 0x%x (%s)\\n\" % (thread, threads[thread]))\n dump.append(\"\".join(traceback.format_stack(frame)))\n dump.append(\"\\n\")\n\n return \"\".join(dump)",
"def formatException(cls, instance, trcback, context=1):\n\n\tstack = extractStack(getInnerMostFrame(trcback), context=context)\n\toutput = []\n\toutput.append(\"Traceback (most recent call last):\")\n\tfor frame, fileName, lineNumber, name, context, index in stack:\n\t\toutput.append(\" File \\\"{0}\\\", line {1}, in {2}\".format(fileName, lineNumber, name))\n\t\tfor line in context:\n\t\t\toutput.append(\" {0}\".format(line.strip()))\n\tfor line in traceback.format_exception_only(cls, instance):\n\t\toutput.append(\"{0}\".format(line))\n\treturn output",
"def fancy_traceback(exc: Exception) -> str:\n text = \"\".join(traceback.format_exception(type(exc), exc, exc.__traceback__))\n return f\"```py\\n{text[-4086:]}\\n```\"",
"def getStackString(self):\n return \"\".join(self.stack[:0:-1])",
"def _get_traceback(self, exc_info):\n import traceback\n return '<br/>'.join(traceback.format_exception(*(exc_info or sys.exc_info())))",
"def exception_stacktrace(self):\n # type: () -> list[string_types]\n return self._exception_stacktrace",
"def traceback(self):\r\n clean = self.raw_traceback\r\n lines = ['Traceback (most recent call last):\\n']\r\n lines += traceback.format_list(clean)\r\n msg = str(self.error)\r\n lines += traceback.format_exception_only(self.exc_info[0], msg)\r\n return ''.join(lines)[:-1]",
"def format_stack_trace(exc_info):\n if exc_info[0] is None:\n return ''\n lines = traceback.format_exception(*exc_info)\n return ''.join(line for line in lines)",
"def DumpStackTracebacks():\n results = []\n id_name_map = {}\n for thread in threading.enumerate():\n id_name_map[thread.ident] = thread.name\n\n results.append(\n '*****\\n'\n '*\\n'\n '* Dumping debug information.\\n'\n '*\\n'\n '*****\\n')\n # pylint: disable=protected-access\n for thread_id, stack in sys._current_frames().items():\n results.append('Thread %s (id=%d):\\n' %\n (id_name_map.get(thread_id, 'unnamed-%d' % thread_id),\n thread_id))\n for filename, line_no, function_name, text in (\n traceback.extract_stack(stack)):\n # Same format as the usual Python stack trace, but indented\n # twice\n results.append(' File: \"%s\", line %d, in %s\\n' % (\n filename, line_no, function_name))\n if text:\n results.append(' %s\\n' % text.strip())\n\n results.append('***** End of debug information.\\n')\n\n return ''.join(results)",
"def last_exception():\n exc_type, exc_value, exc_traceback = sys.exc_info()\n return ''.join(traceback.format_exception(exc_type, exc_value,\n exc_traceback))",
"def format_exc():\n from traceback import format_exc\n return format_exc().decode('utf-8', 'surrogateescape')",
"def LastStackTrace():\n # Temporarily redirect traceback from STDOUT into a buffer.\n trace_buf = Buffer()\n old_stdout = sys.stdout\n sys.stdout = trace_buf\n\n try:\n traceback.print_exc(file=sys.stdout)\n except AttributeError:\n # No exception for traceback exist.\n print ''\n\n # Restore STDOUT.\n sys.stdout = old_stdout\n\n return trace_buf.GetBufferAsStr().strip()",
"def tb():\n etype, value, tb = sys.exc_info()\n return \"%s: %s (%s@%s:%d)\" % (etype.__name__, value, tb.tb_frame.f_code.co_name, os.path.basename(tb.tb_frame.f_code.co_filename), tb.tb_lineno)",
"def tb_log_str(exception) -> str:\n return \"\".join(traceback.format_exception(None, exception, exception.__traceback__))",
"def dump_stacktraces():\n lines = []\n for thread_id, stack in sys._current_frames().items(): # pylint: disable=W0212\n lines.append(\"\\n######### ProcessID=%s, ThreadID=%s #########\" % (\n os.getpid(), thread_id\n ))\n for filename, lineno, name, line in traceback.extract_stack(stack):\n lines.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n if line:\n lines.append(\" %s\" % (line.strip()))\n lines.append(\"#############################################\\n\\n\")\n\n print('\\n'.join(lines), file=sys.stderr if _MANHOLE.redirect_stderr else sys.stdout)",
"def formatException(self, exc_info):\n traces = traceback.format_exception(*exc_info)\n return \"\\n\".join(traces)",
"def FormatExceptionOnly():\n return '\\n'.join(\n traceback.format_exception_only(*sys.exc_info()[:2])).strip()",
"def trace(context=1):\r\n return getinnerframes(sys.exc_info()[2], context)",
"def _exc_info_to_string(self, err, test):\r\n exctype, value, tb = err\r\n # Skip test runner traceback levels\r\n while tb and self._is_relevant_tb_level(tb):\r\n tb = tb.tb_next\r\n if exctype is test.failureException:\r\n # Skip assert*() traceback levels\r\n length = self._count_relevant_tb_levels(tb)\r\n msgLines = traceback.format_exception(exctype, value, tb, length)\r\n else:\r\n msgLines = traceback.format_exception(exctype, value, tb)\r\n \r\n if self.buffer:\r\n output = sys.stdout.getvalue()\r\n error = sys.stderr.getvalue() \r\n if output:\r\n if not output.endswith('\\n'):\r\n output += '\\n'\r\n msgLines.append(STDOUT_LINE % output)\r\n if error:\r\n if not error.endswith('\\n'):\r\n error += '\\n'\r\n msgLines.append(STDERR_LINE % error)\r\n return ''.join(msgLines)",
"def print_exc_plus(tb):\n while 1:\n if not tb.tb_next:\n break\n tb = tb.tb_next\n stack = []\n f = tb.tb_frame\n while f:\n stack.append(f)\n f = f.f_back\n stack.reverse()\n traceback.print_exc()\n print(\"Locals by frame, innermost last\")\n for frame in stack:\n print()\n print(\"Frame %s in %s at line %s\" % (frame.f_code.co_name,\n frame.f_code.co_filename,\n frame.f_lineno))\n for key, value in frame.f_locals.items():\n print(\"\\t%20s = \" % key,)\n #We have to be careful not to cause a new error in our error\n #printer! Calling str() on an unknown object could cause an\n #error we don't want.\n try:\n print(value)\n except:\n print(\"<ERROR WHILE PRINTING VALUE>\")",
"def _exc_info_to_string(self, err, test):\n\t\texctype, value, tb = err\n\t\t# Skip test runner traceback levels\n\t\twhile tb and self._is_relevant_tb_level(tb):\n\t\t\ttb = tb.tb_next\n\n\t\tif exctype is test.failureException:\n\t\t\t# Skip assert*() traceback levels\n\t\t\tlength = self._count_relevant_tb_levels(tb)\n\t\t\tmsgLines = traceback.format_exception(exctype, value, tb, length)\n\t\telse:\n\t\t\tmsgLines = traceback.format_exception(exctype, value, tb)\t\t\n\t\treturn ''.join(msgLines)",
"def print_exc_plus():\n tb = sys.exc_info()[2]\n while tb.tb_next:\n tb = tb.tb_next\n stack = []\n f = tb.tb_frame\n while f:\n stack.append(f)\n f = f.f_back\n stack.reverse()\n traceback.print_exc()\n print \"Locals by frame, innermost last\"\n for frame in stack:\n print\n print \"Frame %s in %s at line %s\" % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno)\n for key, value in frame.f_locals.items():\n print \"\\t%20s = \" % key,\n try: print value\n except: print \"<ERROR WHILE PRINT VALUE>\"",
"def log_stack(self, msg):\n\n log(('%s\\n' % msg) + ''.join(traceback.format_list(traceback.extract_stack())))",
"def tidy_error(ex=None) -> str:\r\n from sys import exc_info\r\n from os.path import join, abspath, dirname\r\n from traceback import extract_tb, format_list, format_exception_only\r\n\r\n show = join(dirname(abspath(__file__)), '')\r\n\r\n def _check_file(name):\r\n return name and name.startswith(show)\r\n\r\n def _print(typ, value, tb): # If not debug, generator expression: filter trace to my files.\r\n show = extract_tb(tb) if DEBUG else (fs for fs in extract_tb(tb, limit=3) if _check_file(fs.filename))\r\n fmt = format_list(show) + format_exception_only(typ, value)\r\n return ''.join((f.strip('\"\\'').replace('\\\\n', '') for f in fmt))\r\n\r\n args = ex or exc_info()\r\n return _print(*args)",
"def GetBtlogBacktrace(self, depth, zstack_record):\n\n\t\tout_str = ''\n\t\tframe = 0\n\t\tif not zstack_record:\n\t\t\treturn \"Zstack record none!\"\n\t\t\n\t\tzstack_record_bt = zstack_record.GetChildMemberWithName('bt')\n\t\tpc_array = read_mem(zstack_record_bt.load_addr, depth * self.pointer_size)\n\n\t\twhile frame < depth:\n\t\t\tframe_pc = unpack('<Q', pc_array[frame*8 : (frame + 1) * 8])[0]\n\t\t\tif not frame_pc:\n\t\t\t\tbreak\n\t\t\t\n\t\t\tsb_addr = self.target.ResolveLoadAddress(frame_pc)\n\t\t\tif sb_addr:\n\t\t\t\tsymbol_str = str(sb_addr)\n\t\t\telse:\n\t\t\t\tsymbol_str = ''\n\t\t\tout_str += \"{0: <#0x} <{1: <s}>\\n\".format(frame_pc, symbol_str)\n\t\t\tframe += 1\n\n\t\treturn out_str"
]
| [
"0.7825131",
"0.768806",
"0.7321852",
"0.7201898",
"0.7133164",
"0.7126731",
"0.71193105",
"0.7107479",
"0.70943695",
"0.7013598",
"0.7007724",
"0.699771",
"0.6987858",
"0.6929358",
"0.68698084",
"0.67966884",
"0.6782249",
"0.6760829",
"0.67419785",
"0.6733596",
"0.67156154",
"0.67023444",
"0.6645795",
"0.6637326",
"0.66136605",
"0.65875334",
"0.65124863",
"0.64817536",
"0.6412701",
"0.6411645"
]
| 0.77414405 | 1 |
Return a string useable for output to stdout or a log file that provides a representation of the "exception stack" and the "frame stack" from "bottom to top" (BTT). The "exception stack" captures the code tree from main to where the exception was raised and is usually the most interesting part of the stack. The "frame stack" captures the code from the point to where the exception was caught. Displaying the stack from bottom to top in an output log or stdout is the style in which Jython/Python displays the stack by default. There is another method named _exceptionStackTTB() that can be used to create a string that represents the execution stack from top to bottom, which is the style that Java uses. | def _exceptionStackBTT(self,methodName,exc,depth=10):
stack = ""
# Reconstruct the call stack from where the trace of the exception was initiated by invoking
# Trace.error() or Trace.severe().
stackList = traceback.extract_stack()
try:
stack = "\tFrame stack (most recent call last):\n"
for stackData in stackList:
sourcefile,line,function,text = stackData
if (sourcefile.endswith("Trace.py") and (function == "error" or function == "severe")): break
sepIndex = sourcefile.rfind(os.sep)
if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):
sourcefile = sourcefile[sepIndex+1:]
#endIf
if (text == None):
stack = "%s\t%s(%s) [%s]\n" % (stack,sourcefile,line,function)
else:
stack = "%s\t%s(%s) [%s] - %s\n" % (stack,sourcefile,line,function,text)
#endIf
#endFor
except:
# This shouldn't happen, but in case it does...
exc_type,exc_value = sys.exc_info()[:2]
stack = "%s\n\tException getting frame stack. Type: %s, Value: %s" % (stack,exc_type,exc_value)
#endTry
try:
stack = "%s\tException stack (most recent call last):\n" % stack
tb = sys.exc_info()[2]
stackList = traceback.extract_tb(tb,depth)
for stackData in stackList:
sourcefile,line,function,text = stackData
sepIndex = sourcefile.rfind(os.sep)
if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):
sourcefile = sourcefile[sepIndex+1:]
#endIf
if (text == None):
stack = "%s\t%s(%s) [%s]\n" % (stack,sourcefile,line,function)
else:
stack = "%s\t%s(%s) [%s] - %s\n" % (stack,sourcefile,line,function,text)
#endIf
#endFor
except:
# This shouldn't happen, but in case it does...
exc_type,exc_value = sys.exc_info()[:2]
stack = "%s\tException getting exception stack. Type: %s, Value: %s\n" % (stack,exc_type,exc_value)
#endTry
# At the very end - put the exception string
stack = "%s\t%s" % (stack,exc)
return stack | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _exceptionStackTTB(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s]\" % (sourcefile,line,function)\n else:\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n #endIf\n else:\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s] - %s\" % (sourcefile,line,function,text)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endIf\n #endFor\n stack = \"\\tFrame stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting frame stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n\n try:\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endFor\n stack = \"\\tException stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting exception stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n \n # At the very top - put the exception string\n stack = \"\\t%s\\n%s\" % (exc,stack)\n \n return stack",
"def get_traceback_stxt():\n #/\n exc_cls, exc_obj, tb_obj = sys.exc_info()\n\n #/\n txt_s = traceback.format_exception(exc_cls, exc_obj, tb_obj)\n\n #/\n res = ''.join(txt_s)\n\n return res",
"def get_exception():\n trace = ''\n exception = ''\n exc_list = traceback.format_exception_only(sys.exc_info()[0],\n sys.exc_info()[1])\n for entry in exc_list:\n exception += entry\n tb_list = traceback.format_tb(sys.exc_info()[2])\n for entry in tb_list:\n trace += entry\n return '%s\\n%s' % (exception, trace)",
"def _get_traceback(self, exc_info=None):\n import traceback\n import sys\n return '\\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))",
"def dump_stacks(self):\n\n dump = []\n\n # threads\n threads = dict([(th.ident, th.name) for th in threading.enumerate()])\n\n for thread, frame in sys._current_frames().items():\n if thread not in threads:\n continue\n dump.append(\"Thread 0x%x (%s)\\n\" % (thread, threads[thread]))\n dump.append(\"\".join(traceback.format_stack(frame)))\n dump.append(\"\\n\")\n\n return \"\".join(dump)",
"def formatException(cls, instance, trcback, context=1):\n\n\tstack = extractStack(getInnerMostFrame(trcback), context=context)\n\toutput = []\n\toutput.append(\"Traceback (most recent call last):\")\n\tfor frame, fileName, lineNumber, name, context, index in stack:\n\t\toutput.append(\" File \\\"{0}\\\", line {1}, in {2}\".format(fileName, lineNumber, name))\n\t\tfor line in context:\n\t\t\toutput.append(\" {0}\".format(line.strip()))\n\tfor line in traceback.format_exception_only(cls, instance):\n\t\toutput.append(\"{0}\".format(line))\n\treturn output",
"def exception_to_string(excp: Exception) -> str:\n stack = traceback.extract_stack()[:-3] + traceback.extract_tb(\n excp.__traceback__\n ) # add limit=??\n pretty = traceback.format_list(stack)\n return \"\".join(pretty) + f\"\\n {excp.__class__} {excp}\"",
"def getStackString(self):\n return \"\".join(self.stack[:0:-1])",
"def fancy_traceback(exc: Exception) -> str:\n text = \"\".join(traceback.format_exception(type(exc), exc, exc.__traceback__))\n return f\"```py\\n{text[-4086:]}\\n```\"",
"def traceback(self):\r\n clean = self.raw_traceback\r\n lines = ['Traceback (most recent call last):\\n']\r\n lines += traceback.format_list(clean)\r\n msg = str(self.error)\r\n lines += traceback.format_exception_only(self.exc_info[0], msg)\r\n return ''.join(lines)[:-1]",
"def exception_stacktrace(self):\n # type: () -> list[string_types]\n return self._exception_stacktrace",
"def format_stack_trace(exc_info):\n if exc_info[0] is None:\n return ''\n lines = traceback.format_exception(*exc_info)\n return ''.join(line for line in lines)",
"def _get_traceback(self, exc_info):\n import traceback\n return '<br/>'.join(traceback.format_exception(*(exc_info or sys.exc_info())))",
"def DumpStackTracebacks():\n results = []\n id_name_map = {}\n for thread in threading.enumerate():\n id_name_map[thread.ident] = thread.name\n\n results.append(\n '*****\\n'\n '*\\n'\n '* Dumping debug information.\\n'\n '*\\n'\n '*****\\n')\n # pylint: disable=protected-access\n for thread_id, stack in sys._current_frames().items():\n results.append('Thread %s (id=%d):\\n' %\n (id_name_map.get(thread_id, 'unnamed-%d' % thread_id),\n thread_id))\n for filename, line_no, function_name, text in (\n traceback.extract_stack(stack)):\n # Same format as the usual Python stack trace, but indented\n # twice\n results.append(' File: \"%s\", line %d, in %s\\n' % (\n filename, line_no, function_name))\n if text:\n results.append(' %s\\n' % text.strip())\n\n results.append('***** End of debug information.\\n')\n\n return ''.join(results)",
"def last_exception():\n exc_type, exc_value, exc_traceback = sys.exc_info()\n return ''.join(traceback.format_exception(exc_type, exc_value,\n exc_traceback))",
"def LastStackTrace():\n # Temporarily redirect traceback from STDOUT into a buffer.\n trace_buf = Buffer()\n old_stdout = sys.stdout\n sys.stdout = trace_buf\n\n try:\n traceback.print_exc(file=sys.stdout)\n except AttributeError:\n # No exception for traceback exist.\n print ''\n\n # Restore STDOUT.\n sys.stdout = old_stdout\n\n return trace_buf.GetBufferAsStr().strip()",
"def tb():\n etype, value, tb = sys.exc_info()\n return \"%s: %s (%s@%s:%d)\" % (etype.__name__, value, tb.tb_frame.f_code.co_name, os.path.basename(tb.tb_frame.f_code.co_filename), tb.tb_lineno)",
"def dump_stacktraces():\n lines = []\n for thread_id, stack in sys._current_frames().items(): # pylint: disable=W0212\n lines.append(\"\\n######### ProcessID=%s, ThreadID=%s #########\" % (\n os.getpid(), thread_id\n ))\n for filename, lineno, name, line in traceback.extract_stack(stack):\n lines.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n if line:\n lines.append(\" %s\" % (line.strip()))\n lines.append(\"#############################################\\n\\n\")\n\n print('\\n'.join(lines), file=sys.stderr if _MANHOLE.redirect_stderr else sys.stdout)",
"def FormatExceptionOnly():\n return '\\n'.join(\n traceback.format_exception_only(*sys.exc_info()[:2])).strip()",
"def tb_log_str(exception) -> str:\n return \"\".join(traceback.format_exception(None, exception, exception.__traceback__))",
"def format_exc():\n from traceback import format_exc\n return format_exc().decode('utf-8', 'surrogateescape')",
"def formatException(self, exc_info):\n traces = traceback.format_exception(*exc_info)\n return \"\\n\".join(traces)",
"def print_exc_plus(tb):\n while 1:\n if not tb.tb_next:\n break\n tb = tb.tb_next\n stack = []\n f = tb.tb_frame\n while f:\n stack.append(f)\n f = f.f_back\n stack.reverse()\n traceback.print_exc()\n print(\"Locals by frame, innermost last\")\n for frame in stack:\n print()\n print(\"Frame %s in %s at line %s\" % (frame.f_code.co_name,\n frame.f_code.co_filename,\n frame.f_lineno))\n for key, value in frame.f_locals.items():\n print(\"\\t%20s = \" % key,)\n #We have to be careful not to cause a new error in our error\n #printer! Calling str() on an unknown object could cause an\n #error we don't want.\n try:\n print(value)\n except:\n print(\"<ERROR WHILE PRINTING VALUE>\")",
"def _exc_info_to_string(self, err, test):\r\n exctype, value, tb = err\r\n # Skip test runner traceback levels\r\n while tb and self._is_relevant_tb_level(tb):\r\n tb = tb.tb_next\r\n if exctype is test.failureException:\r\n # Skip assert*() traceback levels\r\n length = self._count_relevant_tb_levels(tb)\r\n msgLines = traceback.format_exception(exctype, value, tb, length)\r\n else:\r\n msgLines = traceback.format_exception(exctype, value, tb)\r\n \r\n if self.buffer:\r\n output = sys.stdout.getvalue()\r\n error = sys.stderr.getvalue() \r\n if output:\r\n if not output.endswith('\\n'):\r\n output += '\\n'\r\n msgLines.append(STDOUT_LINE % output)\r\n if error:\r\n if not error.endswith('\\n'):\r\n error += '\\n'\r\n msgLines.append(STDERR_LINE % error)\r\n return ''.join(msgLines)",
"def print_exc_plus():\n tb = sys.exc_info()[2]\n while tb.tb_next:\n tb = tb.tb_next\n stack = []\n f = tb.tb_frame\n while f:\n stack.append(f)\n f = f.f_back\n stack.reverse()\n traceback.print_exc()\n print \"Locals by frame, innermost last\"\n for frame in stack:\n print\n print \"Frame %s in %s at line %s\" % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno)\n for key, value in frame.f_locals.items():\n print \"\\t%20s = \" % key,\n try: print value\n except: print \"<ERROR WHILE PRINT VALUE>\"",
"def _exc_info_to_string(self, err, test):\n\t\texctype, value, tb = err\n\t\t# Skip test runner traceback levels\n\t\twhile tb and self._is_relevant_tb_level(tb):\n\t\t\ttb = tb.tb_next\n\n\t\tif exctype is test.failureException:\n\t\t\t# Skip assert*() traceback levels\n\t\t\tlength = self._count_relevant_tb_levels(tb)\n\t\t\tmsgLines = traceback.format_exception(exctype, value, tb, length)\n\t\telse:\n\t\t\tmsgLines = traceback.format_exception(exctype, value, tb)\t\t\n\t\treturn ''.join(msgLines)",
"def trace(context=1):\r\n return getinnerframes(sys.exc_info()[2], context)",
"def tidy_error(ex=None) -> str:\r\n from sys import exc_info\r\n from os.path import join, abspath, dirname\r\n from traceback import extract_tb, format_list, format_exception_only\r\n\r\n show = join(dirname(abspath(__file__)), '')\r\n\r\n def _check_file(name):\r\n return name and name.startswith(show)\r\n\r\n def _print(typ, value, tb): # If not debug, generator expression: filter trace to my files.\r\n show = extract_tb(tb) if DEBUG else (fs for fs in extract_tb(tb, limit=3) if _check_file(fs.filename))\r\n fmt = format_list(show) + format_exception_only(typ, value)\r\n return ''.join((f.strip('\"\\'').replace('\\\\n', '') for f in fmt))\r\n\r\n args = ex or exc_info()\r\n return _print(*args)",
"def log_stack(self, msg):\n\n log(('%s\\n' % msg) + ''.join(traceback.format_list(traceback.extract_stack())))",
"def format_exc(etype, evalue, etb, context=5, tb_offset=0):\r\n # some locals\r\n try:\r\n etype = etype.__name__\r\n except AttributeError:\r\n pass\r\n\r\n # Header with the exception type, python version, and date\r\n pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable\r\n date = time.ctime(time.time())\r\n pid = 'PID: %i' % os.getpid()\r\n\r\n head = '%s%s%s\\n%s%s%s' % (etype, ' ' * (75 - len(str(etype)) - len(date)),\r\n date, pid, ' ' * (75 - len(str(pid)) - len(pyver)),\r\n pyver)\r\n\r\n # Flush cache before calling inspect. This helps alleviate some of the\r\n # problems with python 2.3's inspect.py.\r\n linecache.checkcache()\r\n # Drop topmost frames if requested\r\n try:\r\n records = _fixed_getframes(etb, context, tb_offset)\r\n except:\r\n raise\r\n print('\\nUnfortunately, your original traceback can not be '\r\n 'constructed.\\n')\r\n return ''\r\n\r\n # Get (safely) a string form of the exception info\r\n try:\r\n etype_str, evalue_str = map(str, (etype, evalue))\r\n except:\r\n # User exception is improperly defined.\r\n etype, evalue = str, sys.exc_info()[:2]\r\n etype_str, evalue_str = map(str, (etype, evalue))\r\n # ... and format it\r\n exception = ['%s: %s' % (etype_str, evalue_str)]\r\n frames = format_records(records)\r\n return '%s\\n%s\\n%s' % (head, '\\n'.join(frames), ''.join(exception[0]))"
]
| [
"0.770486",
"0.76810896",
"0.735255",
"0.7192024",
"0.71451706",
"0.71258485",
"0.7122424",
"0.70791197",
"0.7071235",
"0.70586354",
"0.70271665",
"0.70149356",
"0.69869155",
"0.69722915",
"0.69187415",
"0.6851646",
"0.67909634",
"0.67870295",
"0.6779541",
"0.677022",
"0.6752659",
"0.6738084",
"0.66894376",
"0.6627414",
"0.6567436",
"0.656183",
"0.65616554",
"0.648992",
"0.6478177",
"0.6335669"
]
| 0.77506846 | 0 |
Set the trace level for this instance of Trace to the given level. The given level may be a Jython string that is a valid trace level as determined by the _coerceLevel() method. Or the given level may be an integer constant that is one of the levels defined in the Level class. | def setTraceLevel (self,level):
if (type(level) == type("") or type(level) == type(u"")):
if (level):
level = self._coerceLevel(level)
self.traceLevel = level
#endIf
elif (type(level) == type(0)):
if (self._isTraceLevel(level)):
self.traceLevel = level
else:
# level is a number but not in the range of a trace level.
raise TraceLevelException("Invalid trace level: %s Valid trace levels are defined by the Level class." % level)
#endIf
else:
# Odd case where level is unexpected type
raise TraceLevelException("Trace level must be either a string or an integer. Use levels defined by the Level class.")
#endIf | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_level(self, level: LogLevel):\n pass",
"def level(self, level: int):\n if level is None:\n raise ValueError(\"Invalid value for `level`, must not be `None`\")\n\n self._level = level",
"def set_level(self, level: str):\n self._logger.setLevel(getattr(logging, level))",
"def set_level(self, level):\n if self._level_fixed:\n raise NameError(\"set_level() can be called only once !\")\n\n try:\n Level(level)\n except ValueError:\n raise ValueError(\"LEVEL parameter must be a Level\")\n\n self._level = Level(level)\n self._level_fixed = True",
"def setLevel(self, level):\n self.level = level",
"def set_logging_level(self, level):\n return self.sdk.set_logging_level(\n level,\n prefix=self.__class__.__name__,\n )",
"def level(self, level):\n\n self._level = level",
"def level(self, level):\n\n self._level = level",
"def level(self, level):\n\n self._level = level",
"def level(self, level):\n allowed_values = [\"INFO\", \"WARNING\", \"SEVERE\", \"FINE\", \"FINER\", \"FINEST\"]\n if level not in allowed_values:\n raise ValueError(\n \"Invalid value for `level` ({0}), must be one of {1}\"\n .format(level, allowed_values)\n )\n\n self._level = level",
"def setLevel(self, level):\n self.lvl = level",
"def set_logging_level(self, level):\n if str(level) == '1':\n self.logging_level = logging.DEBUG\n elif str(level) == '2':\n self.logging_level = logging.INFO\n elif str(level) == '3':\n self.logging_level = logging.WARNING\n elif str(level) == '4':\n self.logging_level = logging.ERROR\n elif str(level) == '5':\n self.logging_level = logging.CRITICAL",
"def setLevel(level='info'):\n\n mapper = {\n 'critical' : logging.CRITICAL, \n 'error' : logging.ERROR,\n 'warning' : logging.WARNING,\n 'info' : logging.INFO,\n 'debug' : logging.DEBUG,\n }\n if level not in mapper:\n raise ValueError('level must be one of these: {}'.format(list(mapper.keys())))\n else:\n logger.setLevel(mapper[level])",
"def set_logger_level(lgr, level):\n if isinstance(level, int):\n pass\n elif level.isnumeric():\n level = int(level)\n elif level.isalpha():\n level = getattr(logging, level)\n else:\n lgr.warning(\"Do not know how to treat loglevel %s\" % level)\n return\n lgr.setLevel(level)",
"def __change_level(self, level):\n self.level = level",
"def level(self, level=ERROR):\n try:\n self._level = level_dict[level]\n except KeyError:\n raise ValueError(f\"Input level is invalid.\")\n self.cnsl_handler.setLevel(level=self._level)\n self.file_handler.setLevel(level=self._level)\n self.logger.setLevel(level=self._level)",
"def set_log_level(self, level):\n if level == 'info':\n level = logging.INFO\n if level == 'debug':\n level = logging.DEBUG\n if level == 'error':\n level = logging.ERROR\n self._log.setLevel(level)",
"def setLevel(self, level):\n self._autoLevelFunction = None\n level = float(level)\n if level != self._level:\n self._level = level\n self._updateScenePrimitive()\n self._updated(Item3DChangedType.ISO_LEVEL)",
"def set_level(self, level):\n\n self.sh.setLevel(level)\n\n if self.fh:\n self.fh.setLevel(level)",
"def __set_level(self,L):\n assert isinstance(L,level)\n self.__level = L",
"def level_logging(self, level):\r\n\r\n # converts the provided logging level value (either string or\r\n # integer value) into the appropriate normalized value that can\r\n # be used internally for logging level setting\r\n level = self._level(level)\r\n\r\n # sets the (new) level value value for both the base stream\r\n # handler and also for the logger itself\r\n self.handler_stream.setLevel(level)\r\n self.logger.setLevel(level)\r\n\r\n # iterates over the complete set of attached handlers to\r\n # update their respective logging level\r\n for handler in self.handlers: handler.setLevel(level)",
"async def set_log_level(self, log_level: str) -> None:\n await self._send_message_get_response(OutgoingMessage(OutgoingMessageType.set_log_level, log_level=log_level))",
"def level(self, L):\n assert isinstance(L, level)\n self.__level = L",
"def setLevel( self, lvl ):\n if isinstance( lvl, str ):\n return super().setLevel( lvl.upper() )\n else:\n return super().setLevel( lvl )",
"async def loglevel(self, ctx, level):\n level = level.lower()\n assert level in LEVELS\n await self.bot.log.change_level(LEVELS[level], ctx.author.name)\n await ctx.send(f\"Set log level to {level.upper()}\")",
"def setLevel(self, level):\n handlers = self.logger.handlers\n for handler in handlers:\n handler.setLevel(level)",
"def setLogLevel(level):\n None",
"def change_level(level):\n if 'debug' in level: LOG.setLevel(logging.DEBUG)\n elif 'info' in level: LOG.setLevel(logging.INFO)\n elif 'warning' in level: LOG.setLevel(logging.WARNING)\n elif 'error' in level: LOG.setLevel(logging.ERROR)\n elif 'critical' in level: LOG.setLevel(logging.CRITICAL)\n Logger.log('info', 'This logger changed the messages priority level to ', level)",
"def set_level(self, level_name):\n\n self.current_level = level_name",
"def level(self, level):\n allowed_values = [\"INFO\", \"WARNING\", \"ERROR\"]\n if level.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for level -> \" + level)\n self._level = \"outdated_sdk_version\"\n else:\n self._level = level"
]
| [
"0.7306564",
"0.72274274",
"0.7219667",
"0.71255",
"0.70831287",
"0.7012871",
"0.69622135",
"0.69622135",
"0.69622135",
"0.69605273",
"0.69581693",
"0.68610203",
"0.6837203",
"0.67565393",
"0.6656551",
"0.6635947",
"0.6564048",
"0.6537813",
"0.6396795",
"0.63748324",
"0.6345045",
"0.6333168",
"0.62392485",
"0.6230107",
"0.62225395",
"0.6213904",
"0.6211399",
"0.6175336",
"0.61486703",
"0.6130222"
]
| 0.7709587 | 0 |
The configureTrace() method defined for the Trace class is a convenience wrapper around the configureTrace() method defined for the Trace module. It is often the case that a Trace class instance is readily available to use for "global" trace configuration. | def configureTrace(self,traceString):
configureTrace(traceString) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trace(self, trace=...):\n ...",
"def configureTrace(traceString):\n \n setTraceSpec(traceString)\n registeredModules = Trace.tracedEntities.keys()\n for module in registeredModules:\n for spec in Trace.traceSpec:\n if (spec.compiledRegex.match(module)):\n trace = Trace.tracedEntities[module]\n trace.setTraceLevel(spec.level)\n break\n #endIf\n #endFor\n #endFor",
"def _configureTraceAndLogging(self,traceArgs):\n logFile = self._getArg(['logFile','logfile'], traceArgs)\n if (logFile):\n TR.appendTraceLog(logFile)\n #endIf\n\n trace = self._getArg(['trace', 'loglevel'], traceArgs)\n\n if (trace):\n if (not logFile):\n TR.appendTraceLog('trace.log')\n #endDef\n\n TR.configureTrace(trace)\n #endIf\n return (trace,logFile)",
"def trace(config: Optional[Config] = None) -> ContextManager[None]:\n if config is None:\n config = get_default_config()\n return trace_calls(\n logger=config.trace_logger(),\n code_filter=config.code_filter(),\n sample_rate=config.sample_rate(),\n max_typed_dict_size=config.max_typed_dict_size(),\n )",
"def trace_logger(self: logging.Logger, msg: str, *args, **kwargs) -> None:\n if self.isEnabledFor(logging.TRACE):\n self._log(logging.TRACE, msg, args, **kwargs)",
"def traceXml(self, traceConfXml):\r\n if core.FW_conf['tracing_enabled']:\r\n core.FW_conf['trace'].traceActivation(traceConfXml, scriptActivation = True )",
"def configureThisTrace(self):\n for spec in Trace.traceSpec:\n if (spec.compiledRegex.match(self.entityName)):\n self.traceLevel = spec.level\n break\n #endIf\n #endFor",
"def setTraceSpec(traceString):\n \n if (not traceString):\n raise Exception(\"The traceString argument must be a non-empty string.\")\n #endIf\n \n Trace.traceSpec = parseTraceString(traceString)\n Trace.traceString = traceString",
"def set_tracing(self, tracing: bool) -> None:\n self.tracing = tracing",
"def trace(self, *args, **kwargs): # real signature unknown\n pass",
"def trace(\n self, trace_id=None, _create_span=False, _span_args=None, **trace_args\n ):\n\n def _new_trace_decorator(func):\n @six.wraps(func)\n def __new_trace_decorator_inner(*args, **kwargs):\n if not callable(trace_id):\n trace_args.setdefault('trace_id', trace_id)\n\n with self._sdk.trace(**trace_args) as trace:\n if not _create_span:\n return func(*args, **kwargs)\n\n with trace.span(**_span_args):\n return func(*args, **kwargs)\n\n return __new_trace_decorator_inner\n\n if callable(trace_id):\n return _new_trace_decorator(trace_id)\n\n return _new_trace_decorator",
"def log(self, trace: CallTrace) -> None:\n pass",
"def replace_trace(trace=None):\n oldtrace = sys.gettrace()\n sys.settrace(trace)\n try:\n yield\n finally:\n # specific hack to work around a bug in pycoverage, see\n # https://bitbucket.org/ned/coveragepy/issue/123\n if (oldtrace is not None and not callable(oldtrace) and\n hasattr(oldtrace, 'pytrace')):\n oldtrace = oldtrace.pytrace\n sys.settrace(oldtrace)",
"def enableTrace(tracable):\r\n global traceEnabled\r\n traceEnabled = tracable\r\n if tracable:\r\n if not logger.handlers:\r\n logger.addHandler(logging.StreamHandler())\r\n logger.setLevel(logging.DEBUG)",
"def configure(cls):\n pass",
"def settrace(function): # real signature unknown; restored from __doc__\n pass",
"def configure(self, *args, **kwargs):\n raise NotImplementedError()",
"def configure(self, *args, **kwargs):\n raise NotImplementedError()",
"def configure(self, *args, **kwargs):\n raise NotImplementedError()",
"def patch_traces(self, traces, project_id=None, options=None):\n if project_id is None:\n project_id = self.project\n\n self.trace_api.patch_traces(\n project_id=project_id,\n traces=traces,\n options=options)",
"def set_trace(self, frame=None):\n if frame is None:\n frame = sys._getframe().f_back\n self.reset()\n while frame:\n frame.f_trace = self.trace_dispatch\n self.botframe = frame\n frame = frame.f_back\n self.set_step()\n sys.settrace(self.trace_dispatch)",
"def apply(self, trace: Trace, _) -> Optional[Trace]:\n raise NotImplementedError",
"def getTraceSpec():\n return Trace.traceSpec",
"def settrace_patch(tracefunc: Any) -> None:\n global _is_debugger_active\n _is_debugger_active = bool(tracefunc)\n try:\n _original_settrace(tracefunc)\n except Exception:\n # IDEs, such as PyCharm, may ban calls to settrace().\n # http://pydev.blogspot.com/2007/06/why-cant-pydev-debugger-work-with.html\n # In such cases, do nothing.\n pass",
"def run_model_pipeline_for_trace(self, trace, tuning=True):\n pass",
"def patch_trace(self, trace):\n if self.auto:\n # Dispatch immediately:\n self.logger.debug('Immediate dispatch')\n # Also dispatch any cached traces:\n self._traces.append(trace)\n self._dispatch(self._traces)\n else:\n if trace in self._traces:\n # Trace already cached!\n return\n # Dispatch when called:\n self.logger.debug('Delayed dispatch')\n self._traces.append(trace)",
"def log_trace(self, msg):\n self.log(msg, level=LOG_TRACE)",
"def configure(self):\n pass",
"def configure(self):\n pass",
"def configure(self):\r\n pass"
]
| [
"0.6717687",
"0.6619051",
"0.62301195",
"0.61260366",
"0.605834",
"0.59307045",
"0.5855935",
"0.583541",
"0.5781825",
"0.57749945",
"0.56175256",
"0.5521022",
"0.551721",
"0.550927",
"0.5466992",
"0.54495686",
"0.5391383",
"0.5391383",
"0.5391383",
"0.534257",
"0.53140676",
"0.5292872",
"0.5292706",
"0.52157223",
"0.5211381",
"0.520904",
"0.51943284",
"0.5183837",
"0.5183837",
"0.51834965"
]
| 0.78586704 | 0 |
Return a list of TraceSpecification instances that represent a parsing of the given trace string. The returned list holds a TraceSpecifification instance for each trace specification in the given trace string. | def parseTraceString(traceString):
result = []
# If the given traceString is enclosed in double-quotes,
# then strip the double-quotes.
if (traceString[0] == '"' and traceString[-1] == '"'):
traceString = traceString[1:-1]
#endIf
traceStrings = traceString.split(":")
for trace in traceStrings:
traceParts = trace.split("=")
if (len(traceParts) != 2):
raise TraceSpecificationException("Encountered an invalid trace string: %s A trace string looks like <module_pattern>=<level>." % trace)
#endIf
modulePattern = traceParts[0]
level = traceParts[1]
result.append(TraceSpecification(modulePattern,level))
#endFor
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configureTrace(traceString):\n \n setTraceSpec(traceString)\n registeredModules = Trace.tracedEntities.keys()\n for module in registeredModules:\n for spec in Trace.traceSpec:\n if (spec.compiledRegex.match(module)):\n trace = Trace.tracedEntities[module]\n trace.setTraceLevel(spec.level)\n break\n #endIf\n #endFor\n #endFor",
"def setTraceSpec(traceString):\n \n if (not traceString):\n raise Exception(\"The traceString argument must be a non-empty string.\")\n #endIf\n \n Trace.traceSpec = parseTraceString(traceString)\n Trace.traceString = traceString",
"def parse_spans(span_string):\n spans = []\n for span in span_string.split(';'):\n start, end = span.split(' ')\n spans.append((int(start), int(end)))\n return spans",
"def parse_spans(span_string):\n spans = []\n for span in span_string.split(';'):\n start, end = span.split(' ')\n spans.append((int(start), int(end)))\n return spans",
"def parse_list(specs):\n if not specs:\n return []\n if isinstance(specs, six.string_types):\n specs = specs.split(',')\n return [TagPattern.parse(spec) for spec in specs]",
"def _parse_traceback(self, trace):\n p_traceback = [ \"%s:%d:in `%s'\" % (filename, lineno, funcname) \n for filename, lineno, funcname, _\n in traceback.extract_tb(trace) ]\n p_traceback.reverse()\n\n return p_traceback",
"def query_trace_list(self) -> List[Trace]:\n traces = self.CONFch.TRACe.CATalog.q()\n return [\n self.get_trace(tr_name) for _tr_no, tr_name in traces.comma_list_pairs()\n ]",
"def parse(self):\r\n self._properties = {}\r\n self._names = []\r\n\r\n self._results = []\r\n for x in self._specs:\r\n result = self.parse_statement(x)\r\n if result:\r\n self._results.append(result)\r\n self.validate(self._results)\r\n return self._results",
"def parse_filters(filters_str):\n fltrs = []\n for part in str(filters_str).lower().split(\",\"):\n if part==\"blur\":\n fltrs.append(filters.blur(1))\n elif part==\"distort\":\n fltrs.append(filters.distort(18))\n\n return fltrs",
"def Parse(self, stacktrace_string, deps, signature=None, top_n_frames=None):\n # Filters to filter callstack buffers.\n filters = [FilterFramesBeforeAndInBetweenSignatureParts(signature),\n FilterInlineFunction(),\n KeepTopNFrames(top_n_frames or DEFAULT_TOP_N_FRAMES)]\n stacktrace_buffer = StacktraceBuffer(filters=filters)\n\n stack_detector = callstack_detectors.ChromeCrashStackDetector()\n # Initial background callstack which is not to be added into Stacktrace.\n stack_buffer = CallStackBuffer()\n for line in stacktrace_string.splitlines():\n start_of_callstack = stack_detector(line)\n\n if start_of_callstack:\n stacktrace_buffer.AddFilteredStack(stack_buffer)\n stack_buffer = CallStackBuffer.FromStartOfCallStack(start_of_callstack)\n else:\n frame = StackFrame.Parse(stack_buffer.language_type,\n stack_buffer.format_type, line, deps,\n len(stack_buffer.frames))\n if frame is not None:\n stack_buffer.frames.append(frame)\n\n # Add the last stack to stacktrace.\n stacktrace_buffer.AddFilteredStack(stack_buffer)\n return stacktrace_buffer.ToStacktrace()",
"def parse_trace(self):\n current = '' # current basic block being parsed\n previous = '0' # previous basic block beding parsed\n edge_count = 0\n uniq_count = 0\n with open(self._fifo_name, 'r') as fifo:\n for line in fifo:\n if line[6] == '4':\n continue\n # process traceed tbs\n current = line.split(':')[0]\n\n parse_edge = (previous, current)\n edge_count += 1\n if not parse_edge in TRACE_EDGES:\n TRACE_EDGES.add(parse_edge)\n uniq_count += 1\n previous = current",
"def Parse(self, stacktrace_list, deps, signature=None, top_n_frames=None):\n callstacks = []\n for stacktrace_str in stacktrace_list:\n sub_stacktrace = self._sub_parser.Parse(stacktrace_str, deps,\n signature=signature,\n top_n_frames=top_n_frames)\n if sub_stacktrace:\n callstacks.extend(sub_stacktrace.stacks)\n\n return Stacktrace(callstacks, callstacks[0]) if callstacks else None",
"def parse_stack_trace(self, it, line):\n events = []\n stack_traces = []\n\n while self.stack_trace_re.match(line):\n event = self.parse_stack_trace_line(line)\n if event:\n events.append(event)\n\n stack_traces.append(line)\n line = get_next(it)\n\n events.reverse()\n\n return stack_traces, events, line",
"def get_expression(self, s):\n s = s.split('#')[0]\n expressions = []\n for m in self.re_expression.finditer(s):\n expressions.append(Expression(m.group(0), m.start(), m.end()))\n return expressions",
"def parse(s):\n # Use _PARSE_RE to check that it's valid.\n if not CFGProduction._PARSE_RE.match(s):\n raise ValueError, 'Bad production string'\n # Use _SPLIT_RE to process it.\n pieces = CFGProduction._SPLIT_RE.split(s)\n pieces = [p for i,p in enumerate(pieces) if i%2==1]\n lhside = Nonterminal(pieces[0])\n rhsides = [[]]\n for piece in pieces[2:]:\n if piece == '|':\n rhsides.append([]) # Vertical bar\n elif piece[0] in ('\"', \"'\"):\n rhsides[-1].append(piece[1:-1]) # Terminal\n else:\n rhsides[-1].append(Nonterminal(piece)) # Nonterminal\n return [CFGProduction(lhside, rhside) for rhside in rhsides]",
"def parse_tracelogging(bv: binaryninja.binaryview.BinaryView, stream: Stream) -> List[Provider]:\n magic = stream.read(4)\n if magic != b\"ETW0\":\n raise ETWBreakerUnexpectedToken(b\"ETW0\", magic)\n\n stream.read(12)\n providers = []\n while True:\n prov_type = stream.read_u8()\n if prov_type == 6:\n providers.append(parse_tracelogging_event(bv, stream))\n elif prov_type == 4:\n providers.append(parse_tracelogging_provider(bv, stream))\n elif prov_type == 0:\n # padding\n continue\n else:\n print(\"Unknown Trace logging prov_type {0:d}, expect to be the end of trace logging block\".format(prov_type))\n break\n\n return providers",
"def parse_str( s: str ) -> list:\n\n tree = ET.fromstring( s )\n if tree is None: return None\n return parse_tree( tree )",
"def project_trace(self, trace, elements):\n res = filter(\n lambda ac: self.activity_concept_name(ac) in elements, trace)\n\n return list(map(lambda ac: self.activity_concept_name(ac), res))",
"def _parseVec(self, str):\r\n\t\tvec = []\r\n\t\tsplt = str.split()\r\n\t\tfor i in range(0,len(splt)):\r\n\t\t\tvec.append(self._parseNumber(splt[i]))\r\n\t\treturn vec",
"def getTraceSpec():\n return Trace.traceSpec",
"def _segment(self, string: str) -> Generator:\n buff: List = []\n segment_start = 1\n type_: Optional[Types] = None\n for i, line in enumerate(string.split(\"\\n\"), start=1):\n line_type = self._parse_segment_type(line)\n if line_type is not None:\n if type_ is not None:\n yield type_, buff\n segment_start = i + 1\n buff = []\n type_ = line_type\n buff.append((line + \"\\n\", i))\n if buff:\n if type_ is None:\n raise ValueError(\n f\"Most likely missing Var name at \" f\"line {segment_start}\"\n )\n yield type_, buff",
"def select_stylestrs(cfgstr):\n stylestrs = []\n for s in cfgstr.split():\n if s in vars(fmt):\n stylestrs.append(s)\n return stylestrs",
"def parseConfStr(confStr):\n pairList = []\n specs = confStr.split(';')\n for spec in specs:\n if not spec:\n continue\n spec = spec.strip()\n splits = spec.split(',')\n splits = [ss.strip(\"()\") for ss in splits]\n splits = tuple(splits)\n pairList.append(splits)\n return pairList",
"def parse_etags(etag_str):\n if etag_str.strip() == \"*\":\n return [\"*\"]\n else:\n # Parse each ETag individually, and return any that are valid.\n etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(\",\"))\n return [match[1] for match in etag_matches if match]",
"def batch_parse(inputs, grammar, trace=0):\n\n # put imports here to avoid circult dependencies\n from nltk.grammar import FeatureGrammar\n from nltk.parse import FeatureChartParser, load_parser\n\n if isinstance(grammar, FeatureGrammar):\n cp = FeatureChartParser(grammar)\n else:\n cp = load_parser(grammar, trace=trace)\n parses = []\n for sent in inputs:\n tokens = sent.split() # use a tokenizer?\n syntrees = cp.nbest_parse(tokens)\n parses.append(syntrees)\n return parses",
"def parse(parser, string, ignore_white=True, trace=False):\n input_reader = InputReader(string, ignore_white)\n input_reader.trace = trace\n tokens = []\n try:\n tokens = parser.match(input_reader)\n parseResult = ParseResult(input_reader, tokens)\n except ParseException as e:\n parseResult = ParseResult(input_reader, tokens)\n parseResult.error = e\n parseResult.line = input_reader.line\n parseResult.linePos = input_reader.linePos\n return parseResult",
"def GetParserFilterListsFromString(cls, parser_filter_string):\n if not parser_filter_string:\n return [], []\n\n # Build the plugin to parser map, which cannot be a class member\n # otherwise the map will become invalid if a parser with plugins\n # is deregistered.\n plugin_to_parser_map = {}\n for parser_name, parser_class in cls._parser_classes.iteritems():\n if parser_class.SupportsPlugins():\n for plugin_name in parser_class.GetPluginNames():\n plugin_to_parser_map[plugin_name] = parser_name\n\n includes = set()\n excludes = set()\n\n preset_categories = presets.categories.keys()\n\n for filter_string in parser_filter_string.split(u','):\n filter_string = filter_string.strip()\n if not filter_string:\n continue\n\n if filter_string.startswith(u'-'):\n active_list = excludes\n filter_string = filter_string[1:]\n else:\n active_list = includes\n\n filter_string = filter_string.lower()\n if filter_string in cls._parser_classes:\n active_list.add(filter_string)\n\n elif filter_string in preset_categories:\n for entry in presets.GetParsersFromCategory(filter_string):\n active_list.add(plugin_to_parser_map.get(entry, entry))\n\n else:\n active_list.add(plugin_to_parser_map.get(\n filter_string, filter_string))\n\n return list(includes), list(excludes)",
"def parseTiming(timingString, outFilename):\n\n \n ls = timingString.split('\\n')\n \n timingobject = timing(outFilename)\n \n\n while len(ls) > 0:\n l = ls.pop(0)\n seg = l.split(':')\n if len(seg) > 3:\n seg = [seg[0], seg[1]+ ':' + seg[2], seg[3]]\n\n cl = seg[0].strip()\n na = seg[1].strip()\n de = seg[2].strip()\n\n if cl == 'C':\n timingobject.add_clock(na, de)\n elif cl == 'S':\n timingobject.add_signal(na, de)\n elif cl == 'B':\n # check if we have a class:\n if len(ls) > 0 and ls[0][:2] == \"BC\" :\n \n l = ls.pop(0)\n seg = l.split(':')\n bde = seg[2].strip()\n else:\n # there isn't a class; deal\n bde = \"\"\n \n timingobject.add_bus(na, de, bde)\n timingobject.timinggrid()\n timingobject.set_size()\n timingobject.save()",
"def parseComposite(s):\n\n answerlist = []\n try:\n RespDict = json.loads(s)\n except:\n return None\n for records in RespDict[\"Response\"]:\n if('Type' in records):\n for record in records[\"Response\"]:\n if(record is None):\n continue\n if(records['Type'] == 'T'):\n if (record[\"Selected\"] is True):\n if (record['val'].find(\"<math\") != -1):\n value = MathMLExtraction(record[\"val\"])\n else:\n value=record['val']\n answerlist.append(\"{}-{}\".format(records[\"PartId\"], value))\n elif(records['Type'] == 'MATCHMS'):#MatchMS\n value=\"{}-{}\".format(record['source'], record['target'])\n answerlist.append(\"{}-{}\".format(records[\"PartId\"], value))\n elif(records['Type']=='MCSS' or records['Type'] == 'MCMS' or records['Type'] == 'MAPMS' or\n records['Type'] == 'MAPSS' or records['Type'] == 'InlineChoices'):\n #MAPMS and MAPSS look the same as MCMS and MCSS\n if (record[\"Selected\"] is True):\n if (record[\"val\"] == \"\"):\n value='X'\n else:\n value=record[\"val\"]\n answerlist.append(\"{}-{}\".format(records[\"PartId\"], value))\n else:\n warnings.warn(\"Type is missing for Part ID\", records['PartId'])\n continue\n return answerlist",
"def parse(input):\n\n def read_ticket(spec):\n return [int(v) for v in spec.split(\",\")]\n\n class State(Enum):\n RULES = 1\n TICKET = 2\n SCANNED = 3\n\n rules = []\n ticket = None\n scanned = []\n state = State.RULES\n\n for r in [l.strip() for l in input.splitlines() if l.strip()]:\n if r == \"your ticket:\":\n state = State.TICKET\n elif r == \"nearby tickets:\":\n state = State.SCANNED\n elif state == State.RULES:\n rules.append(TicketRule(r))\n elif state == State.TICKET:\n ticket = read_ticket(r)\n elif state == State.SCANNED:\n scanned.append(read_ticket(r))\n else:\n assert False\n\n return (rules, ticket, scanned)"
]
| [
"0.57680905",
"0.5595512",
"0.5319835",
"0.5319835",
"0.50716096",
"0.5068398",
"0.5048298",
"0.50376815",
"0.50376624",
"0.49978292",
"0.49924955",
"0.49803588",
"0.4978812",
"0.49030364",
"0.4879855",
"0.4866006",
"0.4865482",
"0.48360372",
"0.4830144",
"0.48097444",
"0.48050496",
"0.47523758",
"0.47297138",
"0.47030562",
"0.46987364",
"0.46329254",
"0.46202788",
"0.46019632",
"0.459653",
"0.45656955"
]
| 0.77411985 | 0 |
Given a trace specification string, set the module traceSpec used by all instances of Trace. | def setTraceSpec(traceString):
if (not traceString):
raise Exception("The traceString argument must be a non-empty string.")
#endIf
Trace.traceSpec = parseTraceString(traceString)
Trace.traceString = traceString | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configureTrace(traceString):\n \n setTraceSpec(traceString)\n registeredModules = Trace.tracedEntities.keys()\n for module in registeredModules:\n for spec in Trace.traceSpec:\n if (spec.compiledRegex.match(module)):\n trace = Trace.tracedEntities[module]\n trace.setTraceLevel(spec.level)\n break\n #endIf\n #endFor\n #endFor",
"def configureTrace(self,traceString):\n configureTrace(traceString)",
"def parseTraceString(traceString):\n result = []\n # If the given traceString is enclosed in double-quotes,\n # then strip the double-quotes.\n if (traceString[0] == '\"' and traceString[-1] == '\"'):\n traceString = traceString[1:-1]\n #endIf\n traceStrings = traceString.split(\":\")\n for trace in traceStrings:\n traceParts = trace.split(\"=\")\n if (len(traceParts) != 2):\n raise TraceSpecificationException(\"Encountered an invalid trace string: %s A trace string looks like <module_pattern>=<level>.\" % trace)\n #endIf\n \n modulePattern = traceParts[0]\n level = traceParts[1]\n result.append(TraceSpecification(modulePattern,level))\n #endFor\n return result",
"def from_spec(cls, string_spec):\n old_ns, new_ns = string_spec.split('=')\n return cls(old_ns, new_ns)",
"def configureThisTrace(self):\n for spec in Trace.traceSpec:\n if (spec.compiledRegex.match(self.entityName)):\n self.traceLevel = spec.level\n break\n #endIf\n #endFor",
"def setModule(name, module):",
"def trace_id_set(trace_id: tuple[str, str]) -> None:\n trace_id_cv.set(trace_id)",
"def getTraceSpec():\n return Trace.traceSpec",
"def _add_spec(self, requirement_name, spec_str):\n spec_str = spec_str or '>=0.0.0'\n spec_str = spec_str.replace(' ', '')\n spec_str = '~' + spec_str.replace('.x', '.0') if '.x' in spec_str else spec_str\n self.versions_spec[requirement_name].add(spec_str)",
"def change_driver(node, driver):\n module_node = node.find('./attstr')\n module_node.set(\"val\", driver)",
"def AddToSpecificationDict(SpecificationDict, SpecificationString):\n for SpecificationMatch in mReSpecification.finditer(SpecificationString):\n Specification = SpecificationMatch.group(\"Specification\")\n Value = SpecificationMatch.group(\"Value\")\n SpecificationDict[Specification] = Value",
"def configure_specie(self, specie):\r\n pass",
"def trace_id(self, trace_id):\n\n self._trace_id = trace_id",
"def trace_id(self, trace_id):\n\n self._trace_id = trace_id",
"def change_track_name(node, driver):\n module_node = node.find(\"./attstr[@name='name']\")\n module_node.set(\"val\", driver)",
"def spec(self, spec):\n if spec is None:\n raise ValueError(\"Invalid value for `spec`, must not be `None`\") # noqa: E501\n\n self._spec = spec",
"def set_module(obj, mod):\n if not isinstance(mod, str):\n raise TypeError(\"The mod argument should be a string\")\n obj.__module__ = mod",
"def setmodule(self, module, priority='project'):\n\t\tself._assert_mutability()\n\t\tif isinstance(module, six.string_types):\n\t\t\tmodule = import_module(module)\n\t\tfor key in dir(module):\n\t\t\tif key.isupper():\n\t\t\t\tself.set(key, getattr(module, key), priority)",
"def setTimingProcessName(self, string: str) -> None:\n ...",
"def selectTrace(self,trace: int) -> None:\n\n if not self.debug:\n self.myFieldFox.write(\"CALC:PAR\" + str(trace) + \":SEL\")\n\n return",
"def settrace_patch(tracefunc: Any) -> None:\n global _is_debugger_active\n _is_debugger_active = bool(tracefunc)\n try:\n _original_settrace(tracefunc)\n except Exception:\n # IDEs, such as PyCharm, may ban calls to settrace().\n # http://pydev.blogspot.com/2007/06/why-cant-pydev-debugger-work-with.html\n # In such cases, do nothing.\n pass",
"def run_spec(self, run_spec):\n\n self._run_spec = run_spec",
"def set_tracing(self, tracing: bool) -> None:\n self.tracing = tracing",
"def override_paramset(self, override_str):\n\n paramset = ParamSet()\n if not override_str:\n return paramset\n\n override = eval(override_str, {}, {})\n if not override:\n return paramset\n\n for override_name in override:\n # The override can have a node_name/parm format which allows for point\n # instance overrides to override parms in a network.\n\n cached_override = self.override_cache.get(override_name, None)\n if cached_override is not None:\n # Hint to just skip\n if cached_override == -1:\n continue\n if isinstance(cached_override, PBRTParam):\n # textures which can't be overriden\n paramset.add(cached_override)\n continue\n pbrt_name, pbrt_type, tuple_names = cached_override\n if tuple_names:\n value = [override[x] for x in tuple_names]\n else:\n value = override[override_name]\n pbrt_param = PBRTParam(pbrt_type, pbrt_name, value)\n paramset.add(pbrt_param)\n continue\n\n override_match = self.override_pat.match(override_name)\n spectrum_type = override_match.group(\"spectrum\")\n parm_name = override_match.group(\"parm\")\n override_node = override_match.group(\"node\")\n if override_node is not None and override_node != self.name:\n self.override_cache[override_name] = -1\n continue\n\n # There can be two style of \"overrides\" one is a straight parm override\n # which is similar to what Houdini does. The other style of override is\n # for the spectrum type parms. Since spectrum parms can be of different\n # types and the Material Overrides only support \"rgb\" we are limited\n # in the types of spectrum overrides we can do. To work around this we'll\n # support a different style, override_parm:spectrum_type. If the parm name\n # ends in one of the \"rgb/color\" types then we'll handle it differently.\n # TODO add a comment as to what the value would look like\n\n # NOTE: The material SOP will use a parm style dictionary if there\n # parm name matches exactly\n # ie) if there is a color parm you will get\n # {'colorb':0.372511,'colorg':0.642467,'colorr':0.632117,}\n # But if the parm name doesn't match (which we are allowing\n # for you will get something like this -\n # {'colora':(0.632117,0.642467,0.372511),}\n\n # Once we have a parm name, we need to determine what \"style\" it is.\n # Whether its a hou.ParmTuple or hou.Parm style.\n tuple_names = tuple()\n parm_tuple = self.node.parmTuple(parm_name)\n if parm_tuple is None:\n # We couldn't find a tuple of that name, so let's try a parm\n parm = self.node.parm(parm_name)\n if parm is None:\n # Nope, not valid either, let's move along\n self.override_cache[override_name] = -1\n continue\n # if its a parm but not a parmtuple it must be a split.\n parm_tuple = parm.tuple()\n # we need to \"combine\" these and process them all at once and\n # then skip any other occurances. The skipping is handled by\n # the overall caching mechanism. self.override_cache\n tuple_names = tuple([x.name() for x in parm_tuple])\n\n # This is for wrangling parm names of texture nodes due to having a\n # signature parm.\n pbrt_parm_name = self.pbrt_parm_name(parm_tuple.name())\n\n if spectrum_type is None and tuple_names:\n # This is a \"traditional\" override, no spectrum or node name prefix\n value = [override[x] for x in tuple_names]\n pbrt_param = self._hou_parm_to_pbrt_param(\n parm_tuple, pbrt_parm_name, value\n )\n elif spectrum_type in (\"spectrum\", \"xyz\", \"blackbody\"):\n pbrt_param = PBRTParam(\n spectrum_type, pbrt_parm_name, override[override_name]\n )\n elif not tuple_names:\n pbrt_param = self._hou_parm_to_pbrt_param(\n parm_tuple, pbrt_parm_name, override[override_name]\n )\n else:\n raise ValueError(\"Unable to wrangle override name: %s\" % override_name)\n\n paramset.add(pbrt_param)\n\n # From here to the end of the loop is to allow for caching\n\n if pbrt_param.type == \"texture\":\n self.override_cache[override_name] = pbrt_param\n continue\n\n # we are making an assumption a split parm will never be a spectrum\n # or have a node prefix. The Material SOP doesn't allow for it as well.\n for name in tuple_names:\n # The -1 means \"continue\"\n self.override_cache[name] = -1\n # Sanity check\n if tuple_names and override_name not in tuple_names:\n raise ValueError(\n \"Override name: %s, not valid for a parmTuple\" % override_name\n )\n # override_name must match one of the tuple_names\n self.override_cache[override_name] = (\n pbrt_param.name,\n pbrt_param.param_type,\n tuple_names,\n )\n return paramset",
"def replace_trace(trace=None):\n oldtrace = sys.gettrace()\n sys.settrace(trace)\n try:\n yield\n finally:\n # specific hack to work around a bug in pycoverage, see\n # https://bitbucket.org/ned/coveragepy/issue/123\n if (oldtrace is not None and not callable(oldtrace) and\n hasattr(oldtrace, 'pytrace')):\n oldtrace = oldtrace.pytrace\n sys.settrace(oldtrace)",
"def define_measurement(self, trace: int, parameter: str) -> None:\n if trace not in range(1, self.ntraces + 1):\n self.ntraces = trace\n\n self.write(f\"CALC:PAR{trace}:DEF {parameter}\")",
"async def configure(self, ctx, *, specification):\n msg = error(\"Setup Stub.\")\n await ctx.send(msg)",
"def extend_spec(self, extend_spec):\n self._extend_spec = extend_spec",
"def settrace(function): # real signature unknown; restored from __doc__\n pass",
"def set_module(self, module):\n setattr(self, \"module\", module)"
]
| [
"0.7412666",
"0.6379489",
"0.59484065",
"0.556465",
"0.51999474",
"0.5198167",
"0.51660997",
"0.50286394",
"0.49554184",
"0.48970115",
"0.48137328",
"0.4759622",
"0.46943736",
"0.46943736",
"0.468349",
"0.46787977",
"0.46474877",
"0.4629121",
"0.4556305",
"0.45438346",
"0.45038274",
"0.4500232",
"0.44650495",
"0.4457894",
"0.4437393",
"0.4417406",
"0.44026178",
"0.43987966",
"0.43818724",
"0.43721643"
]
| 0.8000181 | 0 |
Return the module traceSpec used by all instances of Trace. | def getTraceSpec():
return Trace.traceSpec | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_testbench_specs(self, tb_type: str) -> Dict[str, Any]:\n return self._specs['testbenches'][tb_type]",
"def get_required_module_descriptors(self):\r\n return []",
"def trace(self):\n return self._trace",
"def tracing(self):\n return self.__trace",
"def return_spec(type_set, module):\n spec = importlib.util.spec_from_file_location(\n type_set,\n module\n )\n return spec",
"def get_module_info():\n\n return {RUNNER_NAME: ('mock runner', MockRunner)}",
"def spec(self):\n return self._spec",
"def spec(self):\n return self._spec",
"def get_trace(self):\n return self.trace",
"def getStandard(self):\n\n app = self.app\n loadData = app.loadData\n\n if not loadData or loadData == \"core\":\n return\n\n aContext = app.context\n moduleSpecs = aContext.moduleSpecs\n seen = self.seen\n checkout = self.checkout\n backend = self.backend\n\n for m in moduleSpecs or []:\n org = m[\"org\"]\n repo = m[\"repo\"]\n relative = m[\"relative\"]\n theCheckout = m.get(\"checkout\", checkout)\n theBackend = m.get(\"backend\", backend)\n bRep = backendRep(theBackend, \"spec\", default=backend)\n\n ref = f\"{bRep}{org}/{repo}{relative}\"\n if ref in seen:\n continue\n\n if not self.getModule(\n org,\n repo,\n relative,\n theCheckout,\n backend=theBackend,\n specs=m,\n ):\n self.good = False",
"def getTrace(self):\n trace = np.array([])\n for block in self.parent.data_block_list:\n \n trace = np.append(trace, block.getTrace(self.signal_type, self.native_channel_name))\n return trace",
"def configureTrace(traceString):\n \n setTraceSpec(traceString)\n registeredModules = Trace.tracedEntities.keys()\n for module in registeredModules:\n for spec in Trace.traceSpec:\n if (spec.compiledRegex.match(module)):\n trace = Trace.tracedEntities[module]\n trace.setTraceLevel(spec.level)\n break\n #endIf\n #endFor\n #endFor",
"def _get_runs_dict(module):\n d = {}\n for name, obj in inspect.getmembers(module):\n if inspect.isfunction(obj) and name.endswith('benchmarks'):\n d[name] = obj\n return d",
"def module(self):\n return self.lib.module",
"def get_scenes_to_test(module_name: str):\n return inspect.getmembers(\n sys.modules[module_name],\n lambda m: inspect.isclass(m) and m.__module__ == module_name,\n )",
"def chipset_driver_modules(self):\n\t\treturn self.__info_dict['info']['chipset_driver_modules']['value']",
"def module_info():\n pass",
"def object_specs(self):\n if self._object_specs is None:\n self.object_specs = self.generate_object_specs()\n \n return self._object_specs",
"def name(cls):\n return MODULE_NAME",
"def get_test_modules_names() -> typing.List[str]:\n\n from services.meter.tests.unit import constants_for_tests\n return constants_for_tests.TESTS_MODULES",
"def perf_config(self):\n\n return self._perf_config",
"def traces(self):\n if self._traces is None:\n raise NotImplementedError(\"Weak implementation not supported\")\n else:\n return self._traces",
"def TypeSpecs(self) -> Dict[str, tf.TypeSpec]:\n return self._type_specs",
"def tracing_type(self) -> str:\n return pulumi.get(self, \"tracing_type\")",
"def specs(self):\r\n try: ref = self.ref.full_spec(print_as_line=True)\r\n except: ref = ''\r\n\r\n frf_r = (' frf_r=' + str(self.frf_r)) if hasattr(self, 'frf_r') else ''\r\n rf_r = (' rf_r=' + str(self.rf_r)) if hasattr(self, 'rf_r') else ''\r\n\r\n return self.series + ', ' + ref + rf_r + frf_r",
"def get_service_module(self):\n return self.__class__.__module__.split('.')[-2]",
"def get_module(self):\n module = self.__class__.__module__.split('.')\n module = \".\".join(module[:-1])\n module = module + \".\" + self._get_valid_version().module\n return module",
"def module(self):\n return self._module",
"def module(self):\n return self._module",
"def module(self):\n return self._module"
]
| [
"0.56054235",
"0.5599977",
"0.5386948",
"0.5383461",
"0.5374143",
"0.53723806",
"0.5286025",
"0.5286025",
"0.5230701",
"0.5191652",
"0.5066103",
"0.5061323",
"0.50166225",
"0.49577042",
"0.4929801",
"0.48848036",
"0.4884137",
"0.48701167",
"0.48573104",
"0.48374268",
"0.48327395",
"0.48301044",
"0.48242554",
"0.48171985",
"0.48083922",
"0.48014826",
"0.47988355",
"0.47945252",
"0.47945252",
"0.47945252"
]
| 0.7961328 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.